id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
35158 | <reponame>Li-fAngyU/Paddle
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
def get_outputs(DOut, X, Y):
DX = np.dot(DOut, Y.T)
DY = np.dot(X.T, DOut)
DBias = np.sum(DOut, axis=0)
return DX, DY, DBias
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
DX, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DX': DX, 'DY': DY, 'DBias': DBias}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP32(
TestFuseGemmEpilogueGradOpDXYBiasFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP64(
TestFuseGemmEpilogueGradOpDXYBiasFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
_, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DY': DY, 'DBias': DBias}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP32(
TestFuseGemmEpilogueGradOpDYBiasFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP64(
TestFuseGemmEpilogueGradOpDYBiasFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
_, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DY': DY}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP32(TestFuseGemmEpilogueGradOpDYFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP64(TestFuseGemmEpilogueGradOpDYFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
DX, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DX': DX, 'DY': DY}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP32(TestFuseGemmEpilogueGradOpDXYFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP64(TestFuseGemmEpilogueGradOpDXYFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
if __name__ == "__main__":
np.random.seed(0)
unittest.main()
| StarcoderdataPython |
6543657 | <reponame>hwixley/SDP-DrPhil
import py_trees
import numpy as np
def dummy_nearest_obstacle(scan):
return(np.argmin(scan.ranges),0)
class ClosestObstacle(py_trees.behaviour.Behaviour):
""" a behaviour which analyses the "/scan" blackboard variable and sets "closest_obstacle/angle" and "closest_obstacle/distance".
returns FAILURE if no data available
"""
def __init__(self,name):
"""
Args:
name: name of the behaviour
"""
super().__init__(name=name)
self.blackboard = py_trees.Blackboard()
def initialise(self):
pass
def update(self):
if self.blackboard.get("scan") is not None:
angle,distance = dummy_nearest_obstacle(self.blackboard.scan)
self.blackboard.set("closest_obstacle/angle",angle)
self.blackboard.set("closest_obstacle/distance",distance)
self.feedback_message = str(angle) + ":" + str(distance)
return py_trees.common.Status.SUCCESS
else:
self.feedback_message = "No scan data"
return py_trees.common.Status.FAILURE
| StarcoderdataPython |
117349 | <filename>app/tilt_resources/meta.py<gh_stars>1-10
import uuid
from datetime import datetime
import hashlib
import json
from typing import Dict
from database.models import MetaTask, Task
class Meta:
def __init__(self, _id=None, name=None, version=None, language=None, created=None,
modified=None, url=None,
root_task: Task = None, status=None, _hash=None):
"""This class is a wrapper to a MongoEngine Document Class.
The wrapper is the Metadata Object which gets written into the Tilt Document.
It automatically passes necessary information for persistance to the document
class. In Tilt Creation this objet is reinstatiated again to prepare it for writing
it into the tilt document.
Args:
version ([type], optional): [description]. Defaults to None.
language ([type], optional): [description]. Defaults to None.
"""
self._id = _id if _id else str(uuid.uuid4())
self.name = name
self.created = created if created else datetime.now().isoformat()
self.modified = modified if modified else datetime.now().isoformat()
self.version = version if version else 1
self.url = url
self.language = language if language else "de"
self.status = status if status else "active"
self._hash = _hash if _hash else None
self.root_task = root_task if root_task else None
@classmethod
def from_db_document(cls, db_document) -> 'Meta':
"""Use this method to create a Metadata class for the tilt document.
A 'MetaTask' Object needs to be used to Instantiate this class from this
class method. No Database Object is created with this metod.
Afterwards the resulting object can be used for tilt creation.
"""
cls_obj = cls(_id=db_document._id,
name=db_document.name,
created=db_document.created,
modified=db_document.modified,
version=db_document.version,
language=db_document.language,
status=db_document.status,
url=db_document.url,
root_task=db_document.root_task,
_hash=db_document._hash)
return cls_obj
def generate_hash_entry(self, tilt_dict):
"""
Creates a Hash Value for the Meta Tilt Entry. If the Hash Value is not equal with the previous
hash value, than the modified field will be updated as well.
If hash values are identical, nothing happens.
Args:
tilt_dict ([type]): [description]
"""
json_string = json.dumps(tilt_dict).encode('utf-8')
new_hash = hashlib.sha256(json_string).hexdigest()
if new_hash != self._hash:
self.modified = datetime.now().isoformat()
self._hash = new_hash
print("Hash Value has been written into Metadata Object.")
def to_tilt_dict_meta(self) -> Dict:
tilt_dict_meta = {
"_id": self._id,
"name": self.name,
"created": self.created,
"modified": self.modified,
"version": self.version,
"language": self.language,
"status": self.status,
"url": self.url,
"_hash": self._hash
}
return {"meta": tilt_dict_meta}
def save(self):
meta_task = MetaTask(
_id=self._id,
name=self.name,
created=self.created,
modified=self.modified,
version=self.version,
language=self.language,
status=self.status,
url=self.url,
root_task=self.root_task,
_hash=self._hash if self._hash else None
)
meta_task.save()
| StarcoderdataPython |
11217530 | # -*- coding: utf-8 -*-
import sys
import time
from os.path import dirname, join as join_path
from chibitest import TestCase, Benchmark, ok
class BenchmarkLibraries(Benchmark):
def setup(self):
fp = join_path(dirname(__file__), 'data', 'markdown-syntax.md')
with open(fp, 'r') as f:
self.text = f.read()
if sys.version_info[0] == 2:
self.hoep_text = unicode(self.text)
else:
self.hoep_text = self.text
def test_misaka(self):
import misaka
extensions = (
'no-intra-emphasis',
'fenced=code',
'autolink',
'tables',
'strikethrough',
)
misaka.html(self.text, extensions)
def test_misaka_classes(self):
import misaka
extensions = (
'no-intra-emphasis',
'fenced=code',
'autolink',
'tables',
'strikethrough',
)
r = misaka.HtmlRenderer()
p = misaka.Markdown(r, extensions)
p(self.text)
def test_mistune(self):
import mistune
mistune.markdown(self.text)
def test_markdown(self):
import markdown
markdown.markdown(self.text, ['extra'])
def test_markdown2(self):
import markdown2
extras = ['code-friendly', 'fenced-code-blocks', 'footnotes']
markdown2.markdown(self.text, extras=extras)
def test_hoep(self):
import hoep as m
extensions = (
m.EXT_NO_INTRA_EMPHASIS | m.EXT_FENCED_CODE | m.EXT_AUTOLINK |
m.EXT_TABLES | m.EXT_STRIKETHROUGH | m.EXT_FOOTNOTES)
md = m.Hoep(extensions=extensions)
md.render(self.hoep_text)
| StarcoderdataPython |
3479400 | <reponame>hamiltonparker/Learning
from distutils.core import setup
setup(name = 'HNFGen',
version = '1.0',
py_modules=['HNFGen'],
)
| StarcoderdataPython |
11270898 | #!/usr/bin/env python
import asyncio
import logging
from typing import Optional, List
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.user_stream_tracker import UserStreamTracker
from hummingbot.connector.exchange.eunion.eunion_api_user_stream_data_source import EunionAPIUserStreamDataSource
from hummingbot.connector.exchange.eunion.eunion_auth import EunionAuth
class EunionUserStreamTracker(UserStreamTracker):
_btust_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._btust_logger is None:
cls._btust_logger = logging.getLogger(__name__)
return cls._btust_logger
def __init__(
self,
eunion_auth: Optional[EunionAuth] = None,
trading_pairs: Optional[List[str]] = [],
):
super().__init__()
self._eunion_auth: EunionAuth = eunion_auth
self._trading_pairs: List[str] = trading_pairs
self._ev_loop: asyncio.events.AbstractEventLoop = asyncio.get_event_loop()
self._data_source: Optional[UserStreamTrackerDataSource] = None
self._user_stream_tracking_task: Optional[asyncio.Task] = None
@property
def data_source(self) -> UserStreamTrackerDataSource:
if not self._data_source:
self._data_source = EunionAPIUserStreamDataSource(eunion_auth=self._eunion_auth)
return self._data_source
@property
def exchange_name(self) -> str:
return "eunion"
async def start(self):
self._user_stream_tracking_task = asyncio.ensure_future(
self.data_source.listen_for_user_stream(self._ev_loop, self._user_stream)
)
await asyncio.gather(self._user_stream_tracking_task)
| StarcoderdataPython |
149750 | <gh_stars>1000+
# Copyright 2016 ClusterHQ Inc. See LICENSE file for details.
from benchmark.metrics_parser import (
mean, container_convergence, cpu_usage_for_process,
wallclock_for_operation, request_latency, handle_cputime_metric,
handle_wallclock_metric
)
from flocker.testtools import TestCase
class MetricsParserTests(TestCase):
"""
Tests for the metrics parsing script.
"""
def test_mean_returns_floating_point_number(self):
values = [1, 2, 3, 4]
mean_result = mean(values)
self.assertIsInstance(mean_result, float)
def test_mean_returns_none_for_no_values(self):
values = []
self.assertEqual(mean(values), None)
def test_mean_correctly_calculates_mean(self):
values = [1, 2, 3, 4, 5, 6]
self.assertEqual(mean(values), 3.5)
def test_cpu_usage_for_process_no_matching_results(self):
"""
cputime_for_process only considers results which have the
'cputime' metric type and match the specified process name.
None is returned if no results match.
"""
process_name = 'test-process'
results = [
{
'metric': {
'type': 'wallclock'
},
'process': process_name
},
{
'metric': {
'type': 'cputime'
},
'process': 'another-process'
},
]
cputime_result = cpu_usage_for_process(results, process_name)
self.assertEqual(cputime_result, None)
def test_cpu_usage_for_process_calculates_result(self):
"""
cputime_for_process correctly calculates the CPU percentage for
a process by dividing the total cputime by the total wallclock
time across all the samples.
"""
process_name = 'test-process'
results = [
{
'metric': {
'type': 'cputime'
},
'process': process_name,
'value': 10,
'wallclock': 60
},
{
'metric': {
'type': 'cputime'
},
'process': process_name,
'value': 30,
'wallclock': 100
},
]
cputime_result = cpu_usage_for_process(results, process_name)
# Total CPU time: 40
# Total wallclock time: 160
self.assertEqual(cputime_result, 0.25)
def test_wallclock_for_operation_no_matching_results(self):
"""
wallclock_for_operation only considers results which have the
'wallclock' metrics and match the specified operation name.
None is returned if no results match.
"""
operation_name = 'test-operation'
results = [
{
'metric': {
'type': 'wallclock'
},
'operation': {
'type': 'another-operation'
}
},
{
'metric': {
'type': 'cputime'
},
'operation': {
'type': operation_name
}
},
]
wallclock_result = wallclock_for_operation(results, operation_name)
self.assertEqual(wallclock_result, None)
def test_wallclock_for_operation_calculates_result(self):
"""
wallclock_for_process returns the mean of the values from
samples which have the 'wallclock' metric type and match the
specified operation.
"""
operation = 'test-operation'
results = [
{
'metric': {
'type': 'wallclock'
},
'operation': {
'type': operation
},
'value': 11
},
{
'metric': {
'type': 'wallclock'
},
'operation': {
'type': operation
},
'value': 14
},
]
wallclock_result = wallclock_for_operation(results, operation)
self.assertEqual(wallclock_result, 12.5)
def test_container_convergence_no_matching_results(self):
"""
container_convergence only considers results which have the
'wallclock' metric and are for the 'create-container' operation.
None is returned if no results match.
"""
results = [
{
'metric': {
'type': 'cputime'
},
'operation': {
'type': 'create-container'
},
'value': 4
},
{
'metric': {
'type': 'wallclock'
},
'operation': {
'type': 'read-request'
},
'value': 10,
},
]
convergence_results = container_convergence(results, 10)
self.assertEqual(convergence_results, None)
def test_container_convergence_calculates_result(self):
"""
container_convergence returns the percentage of
'create-container' operations that completed within the
specified time limit.
"""
results = [
{
'metric': {
'type': 'wallclock'
},
'operation': {
'type': 'create-container'
},
'value': 4
},
{
'metric': {
'type': 'wallclock'
},
'operation': {
'type': 'create-container'
},
'value': 2
},
{
'metric': {
'type': 'wallclock'
},
'operation': {
'type': 'create-container'
},
'value': 5
},
{
'metric': {
'type': 'wallclock'
},
'operation': {
'type': 'create-container'
},
'value': 9,
},
]
convergence_results = container_convergence(results, 5)
self.assertEqual(convergence_results, 0.75)
def test_request_latency_no_matching_results(self):
"""
request_latency only considers results which have the
'scenario.metrics.call_durations' property.
None is returned if no results match.
"""
results = [
{
'scenario': {
'name': 'test-scenario',
'type': 'test-scenario-type'
},
},
{
'scenario': {
'name': 'test-scenario',
'type': 'test-scenario-type',
'metrics': {}
},
}
]
latency_result = request_latency(results, 10)
self.assertEqual(latency_result, None)
def test_request_latency_calculates_result(self):
"""
request_latency correctly calculates the percentage of scenario
requests that complete within the specified time limit.
"""
results = [
{
'scenario': {
'name': 'test-scenario',
'type': 'test-scenario-type',
'metrics': {
'call_durations': {
'1.0': 10,
'0.8': 10,
'0.9': 10,
'1.1': 10,
'0.7': 10
},
'ok_count': 50,
'err_count': 0,
}
},
},
{
'scenario': {
'name': 'test-scenario',
'type': 'test-scenario-type',
'metrics': {
'call_durations': {
'1.0': 10,
'0.8': 10,
'0.9': 10,
'1.1': 10,
'0.7': 10
},
'ok_count': 40,
'err_count': 10,
}
},
}
]
latency_result = request_latency(results, 1)
# 20/100 requests took more that 1 second.
self.assertEqual(latency_result, 0.8)
def test_handle_cputime_metric_creates_multiple_samples(self):
"""
handle_cputime_metric creates multiple sample objects, each with
one value, from a single sample containing many values. It does
not create a sample object from the '-- WALL --' key but adds
this value to every other sample.
"""
wallclock_key = '-- WALL --'
common_props = {
'version': '1.10.1',
'scenario': 'default'
}
sample = {
'value': {
'10.0.0.1': {
'process1': 3,
wallclock_key: 102.123
},
'10.0.0.2': {
'process1': 2,
'process2': 5,
wallclock_key: 124.462
}
}
}
expected_samples = [
{'process': 'process1', 'value': 3, 'wallclock': 102.123},
{'process': 'process1', 'value': 2, 'wallclock': 124.462},
{'process': 'process2', 'value': 5, 'wallclock': 124.462},
]
for s in expected_samples:
s.update(common_props)
samples = handle_cputime_metric(common_props, sample)
self.assertEqual(
sorted(samples),
sorted(expected_samples)
)
def test_handle_wallclock_metrics_creates_sample(self):
"""
handle_wallclock_metric returns a list containing a single
sample object with the value from the original sample.
"""
common_props = {
'version': '1.10.1',
'scenario': 'default'
}
sample = {
'value': 12
}
expected_samples = [
{'value': 12}
]
for s in expected_samples:
s.update(common_props)
samples = handle_wallclock_metric(common_props, sample)
self.assertEqual(
sorted(samples),
sorted(expected_samples)
)
| StarcoderdataPython |
5001856 | <filename>ms-refsnumber.py
#!/usr/bin/python
"""
This bot creates pages listing count of references to article from list:
Wikipedysta:Andrzei111/nazwiska z inicjałem
Call: python pwb.py masti/ms-refsnumber.py -page:'Wikipedysta:Andrzei111/nazwiska z inicjałem'
-summary:"Bot aktualizuje stronę" -outpage:'Wikipedysta:Andrzei111/nazwiska z inicjałem/licznik'
Use global -simulate option for test purposes. No changes to live wiki
will be done.
The following parameters are supported:
-always The bot won't ask for confirmation when putting a page
-text: Use this text to be added; otherwise 'Test' is used
-replace: Don't add text but replace it
-top Place additional text on top of the page
-summary: Set the action summary message for the edit.
This sample script is a
:py:obj:`ConfigParserBot <pywikibot.bot.ConfigParserBot>`. All settings can be
made either by giving option with the command line or with a settings file
which is scripts.ini by default. If you don't want the default values you can
add any option you want to change to that settings file below the [basic]
section like:
[basic] ; inline comments starts with colon
# This is a commend line. Assignments may be done with '=' or ':'
text: A text with line break and
continuing on next line to be put
replace: yes ; yes/no, on/off, true/false and 1/0 is also valid
summary = Bot: My first test edit with pywikibot
Every script has its own section with the script name as header.
In addition the following generators and filters are supported but
cannot be set by settings file:
¶ms;
"""
#
# (C) Pywikibot team, 2006-2021
#
# Distributed under the terms of the MIT license.
#
import pywikibot
from pywikibot import pagegenerators
from pywikibot.bot import (
AutomaticTWSummaryBot,
ConfigParserBot,
ExistingPageBot,
NoRedirectPageBot,
SingleSiteBot,
)
import re
import datetime
from pywikibot import textlib
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {'¶ms;': pagegenerators.parameterHelp} # noqa: N816
class BasicBot(
# Refer pywikobot.bot for generic bot classes
SingleSiteBot, # A bot only working on one site
ConfigParserBot, # A bot which reads options from scripts.ini setting file
# CurrentPageBot, # Sets 'current_page'. Process it in treat_page method.
# # Not needed here because we have subclasses
ExistingPageBot, # CurrentPageBot which only treats existing pages
NoRedirectPageBot, # CurrentPageBot which only treats non-redirects
AutomaticTWSummaryBot, # Automatically defines summary; needs summary_key
):
"""
An incomplete sample bot.
:ivar summary_key: Edit summary message key. The message that should be
used is placed on /i18n subdirectory. The file containing these
messages should have the same name as the caller script (i.e. basic.py
in this case). Use summary_key to set a default edit summary message.
:type summary_key: str
"""
summary_key = 'basic-changing'
results = {}
ranges = [(100, '100+'),
(50, '50-99'),
(40, '40-49'),
(30, '30-39'),
(25, '25-29'),
(22, '22-24'),
(20, '20-21'),
(18, '18-19'),
(16, '16-17'),
(15, '15'),
(14, '14'),
(13, '13'),
(12, '12'),
(11, '11'),
(10, '10'),
(5, '5+'),
]
update_options = {
'replace': False, # delete old text and write the new text
'summary': None, # your own bot summary
'text': 'Test', # add this text from option. 'Test' is default
'top': False, # append text on top of the page
'outpage': 'User:mastiBot/test', # default output page
'maxlines': 1000, # default number of entries per page
'testprint': False, # print testoutput
'negative': False, # if True negate behavior i.e. mark pages that DO NOT contain search string
'test': False, # test printouts
'testlinks': False, # test printouts
'progress': False, # show progress
'resprogress': False, # show progress in generating results
'minlinks': 50, # print only >minlinks results
'reset': False, # reset saved data
}
def run(self):
# prepare new page
# replace @@ with number of pages
header = 'Lista linkujących do artykułów z listy na stronie [[Wikipedysta:Andrzei111/nazwiska z inicjałem]].\n\n'
header += "Ta strona jest okresowo uaktualniana przez [[Wikipedysta:MastiBot|MastiBota]]. Ostatnia aktualizacja przez bota: '''~~~~~'''. \n\n"
header += 'Wszelkie uwagi proszę zgłaszać w [[Dyskusja_Wikipedysty:Masti|dyskusji operatora]].\n\n'
header += '{| class="wikitable sortable"\n|-\n'
header += '! Nr !! nazwa !! odwołania !! krótka nazwa !! odwołania\n|-\n'
# footer = '\n\n[[Kategoria:Najbardziej potrzebne strony]]'
footer = '|}\n'
counter = 1
for page in self.generator:
if self.opt.progress:
pywikibot.output('%s #%i Treating:%s' % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), counter, page.title(as_link=True)))
refs = self.treat(page)
counter += 1
return self.generateresultspage(refs, self.opt.outpage, header, footer)
def generateresultspage(self, redirlist, pagename, header, footer):
"""
Generates results page from redirlist
Starting with header, ending with footer
Output page is pagename
"""
finalpage = header
# res = sorted(redirlist, key=redirlist.__getitem__, reverse=True)
res = redirlist
if self.opt.test:
pywikibot.output('***** INPUT *****')
pywikibot.output(redirlist)
pywikibot.output('***** RESULT *****')
pywikibot.output(res)
linkcount = 0
for i in res:
linkcount += 1
finalpage += "| %i || [[%s]] || %s || [[%s]] || %s\n|-\n" % (linkcount, i['long'], str(i['refl']) +
' link' + self.suffix(i['refl']), i['short'],
str(i['refs']) + ' link' + self.suffix(
i['refs']))
finalpage += footer
if self.opt.test:
pywikibot.output('***** FINALPAGE *****')
pywikibot.output(finalpage)
outpage = pywikibot.Page(pywikibot.Site(), pagename)
outpage.text = finalpage
outpage.save(summary=self.opt.summary)
# if self.opt.test:
# pywikibot.output(redirlist)
return res
def suffix(self, count):
strcount = str(count)
if count == 1:
return ''
elif strcount[-1] in ('2', '3', '4') and (count > 20 or count < 10):
return 'i'
else:
return 'ów'
def savepart(self, body, suffix, pagename, header, footer):
if self.opt.test:
pywikibot.output('***** FINALPAGE *****')
pywikibot.output(body)
outpage = pywikibot.Page(pywikibot.Site(), pagename + '/' + suffix)
outpage.text = re.sub(r'@@', suffix, header) + body + footer
outpage.save(summary=self.opt.summary)
# if self.opt.test:
# pywikibot.output(redirlist)
return
def treat(self, page):
# get all linkedPages
# check for disambigs
linksR = re.compile('\[\[(?P<short>[^\]]*)\]\] *\|\| *\[\[(?P<long>[^\]]*)\]\]')
res = []
counter = 0
if self.opt.test:
pywikibot.output('Treat(%s)' % page.title(as_link=True))
for p in linksR.finditer(textlib.removeDisabledParts(page.text)):
counter += 1
longn = p.group('long')
shortn = p.group('short')
if self.opt.testlinks:
pywikibot.output('[%s][#%i] S:%s L:%s' % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), counter, shortn, longn))
rpl = pywikibot.Page(pywikibot.Site(), longn)
rplcount = len(list(rpl.getReferences(namespaces=0)))
if self.opt.testlinks:
pywikibot.output('L:%s #%i In %s checking:%s - referenced by %i' %
(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), counter,
page.title(as_link=True), rpl.title(as_link=True), rplcount))
rps = pywikibot.Page(pywikibot.Site(), shortn)
rpscount = len(list(rps.getReferences(namespaces=0)))
if self.opt.testlinks:
pywikibot.output('S:%s #%i In %s checking:%s - referenced by %i' %
(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), counter,
page.title(as_link=True), rps.title(as_link=True), rpscount))
res.append({"long": longn, "refl": rplcount, "short": shortn, "refs": rpscount})
print(res)
return res
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
options = {}
# Process global arguments to determine desired site
local_args = pywikibot.handle_args(args)
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
gen_factory = pagegenerators.GeneratorFactory()
# Process pagegenerators arguments
local_args = gen_factory.handle_args(local_args)
# Parse your own command line arguments
for arg in local_args:
arg, sep, value = arg.partition(':')
option = arg[1:]
if option in ('summary', 'text', 'outpage', 'maxlines', 'minlinks'):
if not value:
pywikibot.input('Please enter a value for ' + arg)
options[option] = value
# take the remaining options as booleans.
# You will get a hint if they aren't pre-defined in your bot class
else:
options[option] = True
# The preloading option is responsible for downloading multiple
# pages from the wiki simultaneously.
gen = gen_factory.getCombinedGenerator(preload=True)
if gen:
# pass generator and private options to the bot
bot = BasicBot(generator=gen, **options)
bot.run() # guess what it does
else:
pywikibot.bot.suggest_help(missing_generator=True)
if __name__ == '__main__':
main()
| StarcoderdataPython |
11315147 | import numpy as np
import pandas as pd
from scipy import stats
from rdkit.Chem import RDKFingerprint
from rdkit.Chem import AllChem
from rdkit.Chem import MACCSkeys
from rdkit.Chem import DataStructs
from mordred import Calculator, descriptors
from drug_learning.two_dimensions.Input import base_class as bc
from drug_learning.two_dimensions.Errors import errors as er
class MorganFP(bc.Fingerprint):
fp_name = "_MorganFP"
def transform(self):
super().transform()
fts = []
self.mol_names = []
for mol in self.structures:
try:
fp = AllChem.GetMorganFingerprintAsBitVect(mol,2,nBits=2048)
except:
continue
arr = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp,arr)
fts.append(arr)
self.features = np.array(fts)
self.mol_names.append(mol.GetProp("_Name"))
self.columns = [str(i) for i in list(range(self.features.shape[1]))]
return self.features
class MACCS_FP(bc.Fingerprint):
fp_name = "_MACCS_FP"
def transform(self):
super().transform()
fts = []
self.mol_names = []
for mol in self.structures:
fp = MACCSkeys.GenMACCSKeys(mol)
arr = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp,arr)
fts.append(arr)
self.features = np.array(fts)
self.mol_names.append(mol.GetProp("_Name"))
self.columns = [str(i) for i in list(range(self.features.shape[1]))]
return self.features
class RDkitFP(bc.Fingerprint):
fp_name = "_RDkitFP"
def transform(self):
super().transform()
fts = []
self.mol_names = []
for mol in self.structures:
fp = RDKFingerprint(mol)
arr = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp,arr)
fts.append(arr)
self.features = np.array(fts)
self.mol_names.append(mol.GetProp("_Name"))
self.columns = [str(i) for i in list(range(self.features.shape[1]))]
return self.features
class UnfoldedRDkitFP(bc.Fingerprint):
fp_name = "_UnfoldedRDkitFP"
def __init__(self, voc):
super().__init__()
if not voc:
raise er.NotVocabularyUnfolded("Vocabulary (--voc) must be pass to use unfolded rdkit fingerprints")
filename, file_extension = os.path.splitext(voc)
if file_extension != ".npy":
raise er.IncorrectFormat("Vocabulary must be an npy file (numpy.save)")
self.voc = np.load(voc)
def transform(self):
super().transform()
fts = []
self.mol_names = []
for mol in self.structures:
fingerprint = []
fp = AllChem.UnfoldedRDKFingerprintCountBased(mol)
fpDict = fp.GetNonzeroElements()
for fragment in self.voc:
if fragment in fpDict:
fingerprint.append(fpDict[fragment])
else:
fingerprint.append(0)
fts.append(np.array(fingerprint))
self.mol_names.append(mol.GetProp("_Name"))
self.features = np.array(fts)
self.columns = [str(i) for i in list(range(self.features.shape[1]))]
return self.features
class MordredFP(bc.Fingerprint):
fp_name = "_MordredFP"
def transform(self):
super().transform()
self.mol_names = []
calc = Calculator(descriptors, ignore_3D=True)
self.df = calc.pandas(self.structures)
self.columns = self.df.columns
self.features = self.df.values
self.mol_names = [mol.GetProp("_Name") for mol in self.structures]
return self.features
def clean(self):
self.df = self.df.apply(pd.to_numeric, errors="coerce")
self.df = self.df.astype("float64")
self.df[self.df > 1e3] = None
self.df = self.df.dropna(axis=1)
mask = ~np.any(np.abs(stats.zscore(self.df)) < 2, axis=0)
self.df = self.df.drop(columns=self.df.columns[mask])
self.columns = self.df.columns
self.features = self.df.values
return self.features
| StarcoderdataPython |
1984113 | <filename>physicslib/unit.py
"""Unit class and unit constants."""
from typing import Final
from . import dimension
from .formating import superscripted
def convert_float(func):
"""
Decorator for Unit class.
Converts second argument (`other`) to Unit.
"""
def wrapper(self, other):
if not isinstance(other, Unit):
other = Unit(other)
return func(self, other)
return wrapper
class Unit:
"""Unit of measurement."""
def __init__(self, coefficient: float = 1, dim: dimension.Dimension = dimension.SCALAR):
self.coefficient = coefficient
self.dimension = dim.copy()
def __repr__(self):
return f"Unit({self.coefficient}, {repr(self.dimension)})"
def __str__(self):
string = ""
if self.coefficient == -1:
string += "-"
elif self.coefficient != 1:
string += str(self.coefficient)
if self.dimension != dimension.SCALAR:
string += "\u22C5"
base_units = ["m", "kg", "s", "A", "K", "mol", "cd"]
for i in range(dimension.Dimension.BASE_DIMENSIONS_NUMBER):
if self.dimension.data[i] == 0:
continue
if string and string[-1].isalpha():
string += "\u22C5"
string += base_units[i]
if self.dimension.data[i] != 1:
string += superscripted(self.dimension.data[i])
return string if string else "scalar_unit"
@convert_float
def __mul__(self, other):
return Unit(self.coefficient * other.coefficient, self.dimension * other.dimension)
@convert_float
def __rmul__(self, other):
return self * other
@convert_float
def __imul__(self, other):
self.coefficient *= other.coefficient
self.dimension *= other.dimension
return self
@convert_float
def __truediv__(self, other):
return Unit(self.coefficient / other.coefficient, self.dimension / other.dimension)
@convert_float
def __rtruediv__(self, other):
return other / self
@convert_float
def __itruediv__(self, other):
self.coefficient /= other.coefficient
self.dimension /= other.dimension
return self
def __pow__(self, power):
return Unit(self.coefficient ** power, self.dimension ** power)
def __ipow__(self, power):
self.coefficient **= power
self.dimension **= power
return self
@convert_float
def __eq__(self, other):
return self.coefficient == self.coefficient and self.dimension == other.dimension
def copy(self):
"""Unit copy."""
return Unit(self.coefficient, self.dimension)
# Base SI units
ONE: Final = Unit()
METER: Final = Unit(1, dimension.LENGTH)
SECOND: Final = Unit(1, dimension.TIME)
KILOGRAM: Final = Unit(1, dimension.MASS)
AMPER: Final = Unit(1, dimension.AMPERAGE)
KELVIN: Final = Unit(1, dimension.TEMPERATURE)
MOLE: Final = Unit(1, dimension.SUBSTANCE_AMOUNT)
CANDELA: Final = Unit(1, dimension.LUMINOUS_INTENSITY)
# Derived SI units
NEWTON: Final = Unit(1, dimension.FORCE)
PASCAL: Final = Unit(1, dimension.PRESSURE)
JOULE: Final = Unit(1, dimension.ENERGY)
WATT: Final = Unit(1, dimension.POWER)
COULOMB: Final = Unit(1, dimension.ELECTRIC_CHARGE)
VOLT: Final = Unit(1, dimension.ELECTRIC_POTENTIAL)
OHM: Final = Unit(1, dimension.ELECTRIC_RESISTANCE)
FARAD: Final = Unit(1, dimension.CAPACITANCE)
# other units
KILOMETER: Final = 1e3 * METER
DECIMETER: Final = 1e-1 * METER
CENTIMETER: Final = 1e-2 * METER
MILLIMETER: Final = 1e-3 * METER
MINUTE: Final = 60 * SECOND
HOUR: Final = 60 * MINUTE
MILLISECOND: Final = 1e-3 * SECOND
GRAM: Final = 0.001 * KILOGRAM
TON: Final = 1000 * KILOGRAM
| StarcoderdataPython |
5041515 |
from collections import OrderedDict
from rlkit.core.timer import timer
from rlkit.core import logger
import torch
import ray
import os
class RayVAETrainer:
def __init__(self, trainer, train_dataset, test_dataset, variant, num_epochs):
self.t = trainer
self.train_dataset = train_dataset
self.test_dataset = test_dataset
self.variant = variant
self._start_epoch = 0
self.epoch = self._start_epoch
self.num_epochs = num_epochs
def _train(self):
epoch = self.epoch
variant = self.variant
t = self.t
save_period = variant['save_period']
should_save_imgs = (epoch % save_period == 0)
train_stats = t.train_epoch(epoch)
test_stats = t.test_epoch(epoch, save_vae=False, train=False, record_stats=True, batches=1,
save_reconstruction=should_save_imgs)
train_stats.update(test_stats)
t.test_epoch(epoch, save_vae=False, train=True, record_stats=False, batches=1,
save_reconstruction=should_save_imgs)
torch.save(t.model.state_dict(), open(os.getcwd() + '/model_params.pkl', "wb"))
done = False
if epoch == self.num_epochs:
done = True
return train_stats, done
def to(self, device):
self.t.model.to(device)
def _begin_epoch(self):
timer.reset()
def _end_epoch(self):
self.epoch += 1
| StarcoderdataPython |
12805189 | <reponame>egonrian/google-research<gh_stars>1-10
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for found TVNs."""
TVN1 = {
"num_frames": 2,
"num_blocks": 4,
"frame_stride": 4,
"blocks": [{
"temporal_kernel": 2,
"current_size": [192, 2],
"skip": 0,
"context_gate": 1,
"non_local": 0,
"spatial_kernel": 4,
"temporal_type": "maxpool",
"repeats": 2,
"temporal_stride": 2,
"filters": 64,
"spatial_type": "std",
"spatial_stride": 2,
"squeeze": 0.0,
"expand": 4
}, {
"temporal_kernel": 1,
"current_size": [64.0, 1.0],
"skip": 0,
"context_gate": 0,
"non_local": 0,
"spatial_kernel": 3,
"temporal_type": "1d",
"repeats": 1,
"temporal_stride": 1,
"filters": 128,
"spatial_type": "maxpool",
"spatial_stride": 2,
"squeeze": 0.9067600752736579,
"expand": 4
}, {
"temporal_kernel": 1,
"current_size": [22.0, 1.0],
"skip": 1,
"context_gate": 1,
"non_local": 0, # We removed the non-local layer from this module
# to further improve runtime.
"spatial_kernel": 5,
"temporal_type": "avgpool",
"repeats": 2,
"temporal_stride": 1,
"filters": 128,
"spatial_type": "depth",
"spatial_stride": 3,
"squeeze": 0.0,
"expand": 4
}, {
"temporal_kernel": 1,
"current_size": [8.0, 1.0],
"skip": 0,
"context_gate": 1,
"non_local": 0,
"spatial_kernel": 3,
"temporal_type": "avgpool",
"repeats": 2,
"temporal_stride": 1,
"filters": 256,
"spatial_type": "std",
"spatial_stride": 2,
"squeeze": 0.7265534208526172,
"expand": 8
}],
"image_size": 224,
"total_layers": 0
}
TVN2 = {
"num_frames":
2,
"image_size":
256,
"total_layers":
0,
"num_blocks":
7,
"frame_stride":
7,
"blocks": [{
"spatial_type": "depth",
"repeats": 3,
"spatial_kernel": 5,
"context_gate": 1,
"spatial_stride": 3,
"expand": 4,
"temporal_type": "1d",
"filters": 64,
"non_local": 0,
"temporal_kernel": 2,
"skip": 1,
"temporal_stride": 1,
"current_size": [256, 2],
"squeeze": 0.06667482492426025
}, {
"spatial_type": "std",
"repeats": 5,
"spatial_kernel": 4,
"context_gate": 1,
"spatial_stride": 2,
"expand": 4,
"temporal_type": "maxpool",
"filters": 128,
"non_local": 0,
"temporal_kernel": 2,
"skip": 1,
"temporal_stride": 2,
"current_size": [52.0, 2],
"squeeze": 0.693452618624749
}, {
"spatial_type": "depth",
"repeats": 6,
"spatial_kernel": 4,
"context_gate": 0,
"spatial_stride": 1,
"expand": 1,
"temporal_type": "1d",
"filters": 1024,
"non_local": 0,
"temporal_kernel": 1,
"skip": 0,
"temporal_stride": 1,
"current_size": [26.0, 1.0],
"squeeze": 0.0
}, {
"spatial_type": "avgpool",
"repeats": 1,
"spatial_kernel": 3,
"context_gate": 1,
"spatial_stride": 2,
"expand": 4,
"temporal_type": "1d",
"filters": 256,
"non_local": 0,
"temporal_kernel": 1,
"skip": 0,
"temporal_stride": 1,
"current_size": [26.0, 1.0],
"squeeze": 0.0
}, {
"spatial_type": "avgpool",
"repeats": 6,
"spatial_kernel": 4,
"context_gate": 0,
"spatial_stride": 3,
"expand": 4,
"temporal_type": "1d",
"filters": 512,
"non_local": 0,
"temporal_kernel": 1,
"skip": 0,
"temporal_stride": 1,
"current_size": [26.0, 1.0],
"squeeze": 0.9472192794783585
}, {
"spatial_type": "depth",
"repeats": 4,
"spatial_kernel": 3,
"context_gate": 0,
"spatial_stride": 3,
"expand": 4,
"temporal_type": "maxpool",
"filters": 512,
"non_local": 0,
"temporal_kernel": 1,
"skip": 0,
"temporal_stride": 1,
"current_size": [9.0, 1.0],
"squeeze": 0.0
}, {
"spatial_type": "maxpool",
"repeats": 1,
"spatial_kernel": 2,
"context_gate": 1,
"spatial_stride": 1,
"expand": 6,
"temporal_type": "maxpool",
"filters": 348,
"non_local": 0,
"temporal_kernel": 1,
"skip": 0,
"temporal_stride": 1,
"current_size": [3.0, 1.0],
"squeeze": 0.0
}]
}
TVN3 = {
"num_frames":
8,
"image_size":
160,
"total_layers":
0,
"num_blocks":
4,
"frame_stride":
2,
"blocks": [{
"temporal_type": "1d",
"repeats": 2,
"spatial_kernel": 3,
"context_gate": 1,
"spatial_stride": 2,
"expand": 3,
"non_local": 0,
"spatial_type": "depth",
"filters": 64,
"temporal_kernel": 5,
"skip": 0,
"temporal_stride": 2,
"current_size": [64, 16],
"squeeze": 0.6457257964198263
}, {
"temporal_type": "1d",
"repeats": 4,
"spatial_kernel": 3,
"context_gate": 0,
"spatial_stride": 2,
"expand": 5,
"non_local": 0,
"spatial_type": "depth",
"filters": 256,
"temporal_kernel": 1,
"skip": 1,
"temporal_stride": 1,
"current_size": [64.0, 6.0],
"squeeze": 0.9061418818635367
}, {
"temporal_type": "maxpool",
"repeats": 4,
"spatial_kernel": 5,
"context_gate": 0,
"spatial_stride": 4,
"expand": 2,
"non_local": 0,
"spatial_type": "depth",
"filters": 256,
"temporal_kernel": 3,
"skip": 1,
"temporal_stride": 2,
"current_size": [64.0, 6.0],
"squeeze": 0.0
}, {
"temporal_type": "maxpool",
"repeats": 4,
"spatial_kernel": 3,
"context_gate": 1,
"spatial_stride": 2,
"expand": 5,
"non_local": 0,
"spatial_type": "std",
"filters": 512,
"temporal_kernel": 3,
"skip": 0,
"temporal_stride": 3,
"current_size": [16.0, 3.0],
"squeeze": 0.0
}]
}
TVN4 = {
"num_frames":
8,
"image_size":
128,
"total_layers":
0,
"num_blocks":
3,
"frame_stride":
4,
"blocks": [{
"temporal_type": "1d",
"repeats": 5,
"spatial_kernel": 5,
"context_gate": 0,
"spatial_stride": 2,
"expand": 4,
"non_local": 0,
"spatial_type": "std",
"filters": 64,
"temporal_kernel": 3,
"skip": 1,
"temporal_stride": 2,
"squeeze": 0.4775900391242449,
"current_size": [64, 8]
}, {
"temporal_type": "maxpool",
"repeats": 4,
"spatial_kernel": 5,
"context_gate": 1,
"spatial_stride": 2,
"expand": 6,
"non_local": 0,
"spatial_type": "depth",
"filters": 256,
"temporal_kernel": 2,
"skip": 0,
"temporal_stride": 2,
"squeeze": 0.0,
"current_size": [64.0, 8.0]
}, {
"temporal_type": "1d",
"repeats": 4,
"spatial_kernel": 5,
"context_gate": 1,
"spatial_stride": 3,
"expand": 8,
"non_local": 0,
"spatial_type": "depth",
"filters": 256,
"temporal_kernel": 1,
"skip": 0,
"temporal_stride": 1,
"squeeze": 0.25309102981899967,
"current_size": [32.0, 4.0]
}]
}
TVN_MOBILE_1 = {
"blocks": [{
"non_local": 0,
"context_gate": 0,
"spatial_kernel": 5,
"temporal_act": "relu",
"temporal_kernel": 2,
"spatial_act": "relu",
"temporal_type": "1d",
"output_size": [56.0, 2.0],
"squeeze": 0.004598785768716973,
"inputs": [0],
"inv-bottle": 0,
"current_size": [224, 2],
"spatial_stride": 4,
"input_size": [224, 2],
"spatial_type": "depth",
"temporal_stride": 1,
"filters": 32,
"act": "relu",
"repeats": 4,
"expand": 3,
"skip": 0
}, {
"non_local": 0,
"context_gate": 0,
"spatial_kernel": 5,
"temporal_act": "hswish",
"temporal_kernel": 2,
"spatial_act": "hswish",
"temporal_type": "maxpool",
"output_size": [56.0, 2.0],
"squeeze": 0.8589741047143076,
"inputs": [1],
"inv-bottle": 1,
"current_size": [224, 2],
"spatial_stride": 4,
"input_size": [224, 2],
"spatial_type": "depth",
"temporal_stride": 1,
"filters": 128,
"act": "relu",
"repeats": 4,
"expand": 6,
"skip": 0
}],
"input_streams": [{
"frame_stride": 1,
"num_frames": 2,
"image_size": 224,
"current_size": [224, 2]
}],
}
TVN_MOBILE_2 = {
"blocks": [{
"non_local": 0,
"context_gate": 0,
"spatial_kernel": 5,
"temporal_act": "relu",
"temporal_kernel": 2,
"spatial_act": "hswish",
"output_size": [56.0, 2.0],
"spatial_type": "depth",
"inputs": [0],
"temporal_type": "1d",
"spatial_stride": 4,
"skip": 0,
"inv-bottle": 0,
"input_size": [224, 2],
"squeeze": 0.004598785768716973,
"temporal_stride": 1,
"filters": 32,
"act": "relu",
"repeats": 4,
"expand": 3,
"current_size": [224, 2]
}, {
"non_local": 0,
"context_gate": 0,
"spatial_kernel": 5,
"temporal_act": "hswish",
"temporal_kernel": 2,
"spatial_act": "hswish",
"output_size": [56.0, 2.0],
"spatial_type": "depth",
"inputs": [1],
"temporal_type": "maxpool",
"spatial_stride": 4,
"skip": 0,
"inv-bottle": 1,
"input_size": [224, 2],
"squeeze": 0.8589741047143076,
"temporal_stride": 1,
"filters": 128,
"act": "relu",
"repeats": 3,
"expand": 6,
"current_size": [224, 2]
}],
"input_streams": [{
"frame_stride": 1,
"num_frames": 2,
"image_size": 224,
"current_size": [224, 2]
}],
}
| StarcoderdataPython |
1753795 | #!/usr/bin/env python3
import pandas as pd
import requests
import time
import json
import os
headers = {"Authorization": "Bearer {:}".format('')}
if __name__ == "__main__":
df = pd.read_csv("missing_twitter_with_handles2.csv", header=None)
df.insert(3, "twitter_id", ['' for k in df.index], True)
last_time = 0
for ind in df.index:
if ind < 1000:
continue
if (ind + 1) % 500 == 0:
print("Saving to csv")
df.to_csv("missing_twitter_with_handles2.csv", index = False, header = None)
if ind % 25 == 0:
print(ind)
s = str(df.iloc[ind, 1]).strip().replace(" ", "").replace(".", "").replace("&", "").replace("-", "").lower()
twitter_user_request_string = 'https://api.twitter.com/2/users/by/username/{:}?user.fields=id,name,verified,description,protected,public_metrics,location'
curr_time = time.time()
diff_delay = curr_time - last_time - 3
if diff_delay < 0:
time.sleep(-1*diff_delay + 0.1)
user_r = requests.get(twitter_user_request_string.format(s), headers=headers)
last_time = time.time()
if user_r.status_code != 200:
print("Unable to perform HTTP request for ID: {:}".format(s))
print("Status code: ", user_r.status_code)
continue
json_data = json.loads(user_r.text)
if 'errors' in json_data:
print("Unable to find ID: {:}".format(s))
continue
# print(json_data)
if not json_data["data"]["verified"] and json_data["data"]["public_metrics"]["followers_count"] < 10000:
print("User {:} does not meet arbitrary criteria".format(json_data["data"]["username"]))
continue
df.iloc[ind, 2] = json_data["data"]["username"]
df.iloc[ind, 3] = json_data["data"]["id"]
df.to_csv("missing_twitter_with_handles2.csv", index = False, header = None)
| StarcoderdataPython |
4837420 | <gh_stars>1-10
#!/usr/bin/env python
import os
import sys
import jinja2
import opendbc
from common.dbc import dbc
if len(sys.argv) != 3:
print "usage: %s dbc_path struct_path" % (sys.argv[0],)
sys.exit(0)
dbc_fn = sys.argv[1]
out_fn = sys.argv[2]
template_fn = os.path.join(os.path.dirname(__file__), "dbc_template.cc")
can_dbc = dbc(dbc_fn)
with open(template_fn, "r") as template_f:
template = jinja2.Template(template_f.read(), trim_blocks=True, lstrip_blocks=True)
msgs = [(address, msg_name, sorted(msg_sigs, key=lambda s: s.name not in ("COUNTER", "CHECKSUM"))) # process counter and checksums first
for address, ((msg_name, _), msg_sigs) in sorted(can_dbc.msgs.iteritems()) if msg_sigs]
checksum_type = "honda" if can_dbc.name.startswith("honda") or can_dbc.name.startswith("acura") else None
parser_code = template.render(dbc=can_dbc, checksum_type=checksum_type, msgs=msgs, len=len)
with open(out_fn, "w") as out_f:
out_f.write(parser_code)
| StarcoderdataPython |
6580821 | <gh_stars>1-10
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import re
from aws_cdk import core
def format_aws_resource_name(feature_name: str, project_name: str, env: core.Environment, resource_type: str):
return f'{project_name}-{feature_name}-{resource_type}-{env.region}'
def format_aws_resource_id(feature_name: str, project_name: str, env: core.Environment, resource_type: str):
return f'{project_name}{feature_name}{resource_type}Id{env.region}'
def format_aws_resource_sid(feature_name: str, project_name: str, resource_type: str):
sid = f'{project_name}{feature_name}{resource_type}SId'
# Strip out all chars not valid in a sid
return re.sub(r'[^a-zA-Z0-9]', '', sid)
def format_aws_resource_authenticated_id(feature_name: str, project_name: str, env: core.Environment,
resource_type: str, authenticated: bool):
authenticated_string = 'Authenticated' if authenticated else 'Unauthenticated'
return f'{project_name}{feature_name}{resource_type}Id{authenticated_string}-{env.region}'
def format_aws_resource_authenticated_name(feature_name: str, project_name: str, env: core.Environment,
resource_type: str, authenticated: bool):
authenticated_string = 'Authenticated' if authenticated else 'Unauthenticated'
return f'{project_name}{feature_name}{resource_type}{authenticated_string}-{env.region}'
| StarcoderdataPython |
8103762 | <gh_stars>0
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch
# Import salt libs
import salt.utils
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TestWhich(TestCase):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
# The mock patch below will make sure that ALL calls to the which function
# returns None
@patch('salt.utils.which', lambda exe: None)
def test_missing_binary_in_linux(self):
self.assertTrue(
salt.utils.which('this-binary-does-not-exist') is None
)
# The mock patch below will make sure that ALL calls to the which function
# return whatever is sent to it
@patch('salt.utils.which', lambda exe: exe)
def test_existing_binary_in_linux(self):
self.assertTrue(salt.utils.which('this-binary-exists-under-linux'))
# The mock patch below, since we're not providing the return value, we
# will be able to tweak it within the test case. The testcase MUST accept
# an argument which is the MagicMock'ed object
@patch('os.access')
def test_existing_binary_in_windows(self, osaccess):
# We define the side_effect attribute on the mocked object in order to
# specify which calls return which values. First call to os.access
# returns X, the second Y, the third Z, etc...
osaccess.side_effect = [
# The first os.access should return False(the abspath one)
False,
# The second, iterating through $PATH, should also return False,
# still checking for Linux
False,
# Lastly return True, this is the windows check.
True
]
# Let's patch os.environ to provide a custom PATH variable
with patch.dict(os.environ, {'PATH': '/bin'}):
# Let's also patch is_windows to return True
with patch('salt.utils.is_windows', lambda: True):
with patch('os.path.isfile', lambda x: True):
self.assertEqual(
salt.utils.which('this-binary-exists-under-windows'),
# The returned path should return the .exe suffix
'/bin/this-binary-exists-under-windows.EXE'
)
@patch('os.access')
def test_missing_binary_in_windows(self, osaccess):
osaccess.side_effect = [
# The first os.access should return False(the abspath one)
False,
# The second, iterating through $PATH, should also return False,
# still checking for Linux
# which() will add 4 extra paths to the given one, os.access will
# be called 5 times
False, False, False, False, False
]
# Let's patch os.environ to provide a custom PATH variable
with patch.dict(os.environ, {'PATH': '/bin'}):
# Let's also patch is_widows to return True
with patch('salt.utils.is_windows', lambda: True):
self.assertEqual(
# Since we're passing the .exe suffix, the last True above
# will not matter. The result will be None
salt.utils.which('this-binary-is-missing-in-windows.exe'),
None
)
# The mock patch below, since we're not providing the return value, we
# will be able to tweak it within the test case. The testcase MUST accept
# an argument which is the MagicMock'ed object
@patch('os.access')
def test_existing_binary_in_windows_pathext(self, osaccess):
# We define the side_effect attribute on the mocked object in order to
# specify which calls return which values. First call to os.access
# returns X, the second Y, the third Z, etc...
osaccess.side_effect = [
# The first os.access should return False(the abspath one)
False,
# The second, iterating through $PATH, should also return False,
# still checking for Linux
False,
# We will now also return False 3 times so we get a .CMD back from
# the function, see PATHEXT below.
# Lastly return True, this is the windows check.
False, False, False,
True
]
# Let's patch os.environ to provide a custom PATH variable
with patch.dict(os.environ, {'PATH': '/bin',
'PATHEXT': '.COM;.EXE;.BAT;.CMD;.VBS;'
'.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.PY'}):
# Let's also patch is_windows to return True
with patch('salt.utils.is_windows', lambda: True):
with patch('os.path.isfile', lambda x: True):
self.assertEqual(
salt.utils.which('this-binary-exists-under-windows'),
# The returned path should return the .exe suffix
'/bin/this-binary-exists-under-windows.CMD'
)
| StarcoderdataPython |
9648874 | from JumpScale import j
import time
class RedisDB:
def __init__(self):
self.__jslocation__ = "j.data.redisdb"
def get(self, path, expiration=None):
"""
@param path in form of someting:something:...
TODO: *2 please describe and give example
"""
return RedisDBList(path, expiration)
def _test(self):
llist = self.get("root1:child1")
llist.delete()
data = {"a": "b"}
llist.set(data, "akey")
print("iterator:")
counter = 0
for item in llist:
counter += 1
print(item)
print("did you see 1 item")
assert(counter == 1)
assert data == llist.get("akey").struct
assert llist.len() == 1
llist.set(data, "akey")
assert llist.len() == 1
llist.set(data, "akey2")
assert llist.len() == 2
llist.delete()
# now tests around id
for i in range(10):
data = {"a": "b", "id": str(i), "aval": i}
llist.set(data, "akey%s" % i)
print(llist.get(id="5"))
res = llist.find(id="5")
assert len(res) == 1
res = llist.find(id="5")
assert res[0].struct["id"] == "5"
res = llist.find(aval=5)
assert len(res) == 1
class RedisDBObj:
def __init__(self, llist, path, id=""):
self._list = llist
self.db = j.core.db
self.path = path
self._struct = {}
self._id = id
@property
def id(self):
if self._id == "":
self._id = self.struct["id"]
return self._id
@property
def struct(self):
data = self.db.hget(self.path, self.id)
if data is None:
raise j.exceptions.RuntimeError(
"could not find object %s:%s" % (self.path, self.id))
obj = j.data.serializer.json.loads(data)
if "id" in obj:
self._id = obj["id"]
return obj
@struct.setter
def struct(self, val):
if j.data.types.dict.check(val) is False:
raise j.exceptions.RuntimeError("only dict supported")
self.db.hset(self.path, self.id,
j.data.serializer.json.dumps(val, sort_keys=True))
if self._list._expiration:
self.db.expire(self.path, self._list._expiration)
else:
self._list._list = {} # will reload
def __repr__(self):
return j.data.serializer.json.dumps(self.struct, sort_keys=True, indent=True)
__str__ = __repr__
class RedisDBList:
def __init__(self, path, expiration=None):
self.db = j.core.db
self.path = path
self._list = {}
self._expiration = expiration
@property
def list(self):
if self._expiration or not self._list:
keys = sorted(self.db.hkeys(self.path))
for name in keys:
self._list[name] = RedisDBObj(self, self.path, name)
keys = sorted(self._list.keys())
res = []
for key in keys:
res.append(self._list[key])
return res
def exists(self, id):
return self.db.hexists(self.path, id)
def get(self, id):
obj = RedisDBObj(self, self.path, id)
return obj
def set(self, data, id=""):
if j.data.types.dict.check(data) is False:
raise j.exceptions.RuntimeError("only dict supported")
if not id:
id = data['id']
obj = RedisDBObj(self, self.path, id)
obj.struct = data
self._list = {}
return obj
def find(self, **filter):
res = []
for item in self.list:
if id and item.id != id:
continue
found = True
for key, val in filter.items():
if item.struct[key] != val:
found = False
break
if found:
res.append(item)
return res
def delete(self):
self.db.delete(self.path)
self._list = {}
def remove(self, id):
self.db.hdel(self.path, id)
self._list.pop(id, None)
def __iter__(self):
return self.list.__iter__()
def len(self):
if self._expiration:
return self.db.hlen(self.path)
else:
return len(self.list)
def __bool__(self):
return self.len() != 0
def __repr__(self):
out = ""
for item in self.list:
out += "%s %s\n" % (self.path, item.id)
if out == "":
out = "Empty list %s" % (self.path)
return out
__str__ = __repr__
| StarcoderdataPython |
35727 | # Get arxiv data
import json
import logging
import os
import pickle
from collections import Counter
from datetime import datetime
from io import BytesIO
from zipfile import ZipFile
import numpy as np
import pandas as pd
import requests
from kaggle.api.kaggle_api_extended import KaggleApi
from eurito_indicators import PROJECT_DIR
from eurito_indicators.pipeline.clustering_naming import make_doc_comm_lookup
from eurito_indicators.pipeline.processing_utils import covid_getter
GRID_PATH = f"{PROJECT_DIR}/inputs/data/grid"
CORD_META_PATH = f"{PROJECT_DIR}/inputs/data/metadata.csv.zip"
DISC_QUERY = f"{PROJECT_DIR}/inputs/data/arxiv_discipline.csv"
COV_PAPERS_PATH = f"{PROJECT_DIR}/inputs/data/arxiv_papers_covid.csv"
def get_arxiv_articles():
"""Get arxiv - and cord - articles"""
art = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_articles_v2.csv",
dtype={"id": str},
parse_dates=["created"],
)
art = art.rename(columns={"id": "article_id"})
art["month_year"] = [
datetime(x.year, x.month, 1) if pd.isnull(x) == False else np.nan
for x in art["created"]
]
selected_columns = [
"article_id",
"created",
"month_year",
"title",
"journal_ref",
"doi",
"authors",
"abstract",
"mag_id",
"citation_count",
"article_source",
]
return art[selected_columns]
def get_arxiv_institutes():
"""Lookup between paper ids and org id"""
inst = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_institutes_updated.csv",
dtype={"article_id": str, "institute_id": str},
)
return inst
def get_article_categories():
"""Article categories"""
inst = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_categories.csv",
dtype={"article_id": str},
)
return inst
def get_arxiv_w2v():
with open(f"{PROJECT_DIR}/outputs/models/arxiv_w2v.p", "rb") as infile:
return pickle.load(infile)
def fetch_grid():
"""Fetch the grid data"""
if os.path.exists(GRID_PATH) is False:
logging.info("Collecting Grid data")
os.makedirs(GRID_PATH, exist_ok=True)
g = requests.get("https://ndownloader.figshare.com/files/28431024")
g_z = ZipFile(BytesIO(g.content))
g_z.extractall(GRID_PATH)
def fetch_cord_meta():
"""Fetch the cord metadata"""
if os.path.exists(CORD_META_PATH) is False:
logging.info("Fetching cord data")
api = KaggleApi()
api.authenticate()
api.dataset_download_file(
"allen-institute-for-ai/CORD-19-research-challenge",
"metadata.csv",
path=f"{PROJECT_DIR}/inputs/data",
)
def get_cord_metadata():
"""Gets the cord metadata"""
meta = pd.read_csv(f"{PROJECT_DIR}/inputs/data/metadata.csv.zip", compression="zip")
meta_has_date = meta.dropna(axis=0, subset=["publish_time"])
meta_bad_date = set(
[
f"cord-{_id}"
for _id, date in zip(
meta_has_date["cord_uid"], meta_has_date["publish_time"]
)
if "-" not in date
]
)
meta_year = {
f"cord-{_id}": int(date.split("-")[0]) if "-" in date else int(date)
for _id, date in zip(meta_has_date["cord_uid"], meta_has_date["publish_time"])
}
return meta_bad_date, meta_year
def get_covid_papers():
"""Make the papers table
Includes:
Removing duplicated papers in cord
Creating month year variable missing for cord papers without detailed
publication date
"""
if os.path.exists(COV_PAPERS_PATH) is False:
logging.info("Making covid papers")
arts = get_arxiv_articles()
logging.info("processing arxiv papers")
arxiv_covid = (
arts.query("article_source!='cord'")
.dropna(axis=0, subset=["abstract","title"])
.assign(text = lambda df: [" ".join([x,y]) for x,y in zip(df['title'],df['abstract'])])
.assign(has_cov=lambda df: [covid_getter(text) for text in df["text"]])
.query("has_cov == True")
)
arxiv_covid["month_year"] = [
datetime(x.year, x.month, 1) for x in arxiv_covid["created"]
]
arxiv_covid["year"] = [x.year for x in arxiv_covid["month_year"]]
logging.info("processing cord papers")
cord = (
arts.query("article_source=='cord'")
.dropna(axis=0, subset=["abstract"])
.assign(has_cov=lambda df: [covid_getter(text) for text in df["abstract"]])
.query("has_cov == True")
.assign(
journal_ref=lambda df: [
x.lower() if type(x) == str else np.nan for x in df["journal_ref"]
]
)
)
cord = cord.loc[~cord["journal_ref"].isin(["biorxiv", "medrxiv"])]
cord = cord.drop_duplicates("title")
meta_bad_date, meta_year = get_cord_metadata()
cord["year"] = cord["article_id"].map(meta_year)
cord["month_year"] = [
datetime(d.year, d.month, 1)
if (_id not in meta_bad_date) & (not pd.isnull(d))
else np.nan
for _id, d in zip(cord["article_id"], cord["created"])
]
papers = (
pd.concat([arxiv_covid, cord], axis=0)
.reset_index(drop=True)
.drop(axis=1, labels=["has_cov"])
)
papers.to_csv(COV_PAPERS_PATH, index=False)
return papers
else:
return pd.read_csv(
COV_PAPERS_PATH,
dtype={"article_id": str},
parse_dates=["created", "month_year"],
)
def get_grid_meta():
"""Get relevant grid metadata"""
name, address, org_type, geo = [
pd.read_csv(f"{GRID_PATH}/full_tables/{n}.csv")
for n in ["institutes", "addresses", "types", "geonames"]
]
merged = (
name.merge(address, on="grid_id")
.merge(org_type, on="grid_id")
.merge(geo, on=["geonames_city_id", "city"], how="left")
)
grid_meta = merged[
[
"grid_id",
"name",
"lat",
"lng",
"city",
"country",
"country_code",
"type",
"nuts_level1_code",
"nuts_level2_code",
"nuts_level3_code",
]
]
return grid_meta
def query_arxiv_institute():
"""Combine arXiv institute lookup with grid metadata"""
inst = get_arxiv_institutes()
grid_meta = get_grid_meta()
inst_meta = inst.merge(grid_meta, left_on="institute_id", right_on="grid_id")
return inst_meta
def get_arxiv_tokenised():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_tokenised.json", "r") as infile:
return json.load(infile)
def get_arxiv_fos():
return pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_fields_of_study.csv",
dtype={"article_id": str, "fos_id": int},
)
def get_children(values):
if type(values) is str:
return [int(x) for x in values.split(",")]
else:
return np.nan
def make_fos_l0_lookup():
"""Creates a lookup between all MAG fos levels and the top level of the taxonomy"""
logging.info("Reading data")
fos_taxon = pd.read_csv(f"{PROJECT_DIR}/inputs/data/mag_fields_of_study.csv")
id_name_lookup = fos_taxon.set_index("id")["name"].to_dict()
all_children = {
_id: get_children(values)
for _id, values in zip(fos_taxon["id"], fos_taxon["child_ids"])
}
fos_0 = fos_taxon.loc[fos_taxon["level"] == 0]["id"].tolist()
fos_lu = {}
logging.info("Finding children categories")
# We recursively look for the children of level 0s at different levels of the taxonomy
for f in fos_0:
children = all_children[f].copy()
for level in range(1, 5):
table = fos_taxon.loc[fos_taxon["id"].isin(children)].query(
f"level=={level}"
)
for _id in table["id"]:
try:
for ch in all_children[_id]:
children.append(ch)
except BaseException:
pass
for c in children:
if c not in fos_lu.keys():
fos_lu[c] = [f]
else:
fos_lu[c].append(f)
logging.info("Creating dataframe")
fos_lu_df = pd.DataFrame(
{"fos_id": fos_lu.keys(), "fos_l0": fos_lu.values()}
).explode("fos_l0")
fos_lu_df["fos_id_name"], fos_lu_df["fos_l0_name"] = [
fos_lu_df[var].map(id_name_lookup) for var in ["fos_id", "fos_l0"]
]
return fos_lu_df
def query_article_discipline():
"""Returns a lookup between articles and high level disciplines"""
if os.path.exists(DISC_QUERY) is False:
arxiv_fos = get_arxiv_fos()
fos_lu_df = make_fos_l0_lookup()
arxiv_f0 = arxiv_fos.merge(fos_lu_df, on="fos_id")
logging.info("Finding top discipline")
arxiv_discipline = (
arxiv_f0.groupby("article_id")["fos_l0_name"]
.apply(lambda x: Counter(x).most_common(1)[0][0])
.reset_index(drop=False)
)
arxiv_discipline.to_csv(DISC_QUERY, index=False)
return arxiv_discipline
else:
return pd.read_csv(DISC_QUERY, dtype={"article_id": str})
def get_arxiv_topic_model():
with open(f"{PROJECT_DIR}/outputs/models/topsbm_arxiv_sampled.p", "rb") as infile:
return pickle.load(infile)
def get_arxiv_tokenised():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_tokenised.json", "r") as infile:
return json.load(infile)
def get_ai_results():
with open(f"{PROJECT_DIR}/outputs/data/find_ai_outputs.p", "rb") as infile:
return pickle.load(infile)
def get_cluster_names():
with open(f"{PROJECT_DIR}/outputs/data/aux/arxiv_cluster_names.json",'r') as infile:
return {int(k):v for k,v in json.load(infile).items()}
def get_cluster_ids():
with open(f"{PROJECT_DIR}/inputs/data/arxiv_cluster_lookup.json",'r') as infile:
paper_cluster_lookup = json.load(infile)
cluster_names = get_cluster_names()
paper_cluster_name = {k: cluster_names[v] for k,v in paper_cluster_lookup.items()}
return paper_cluster_name
if __name__ == "__main__":
fetch_grid()
| StarcoderdataPython |
8117825 | <filename>back/test/models/test_social_helpers.py
import pytest
from back.models import helper
from back.models.social import Like, DisLike
from back.exceptions import _SchemaLoadError
def test_add_like(user_data, like_data, db_session):
assert db_session.query(Like).all() == []
user_id = helper.add_user(user_data)["id"]
like_data["user_id"] = user_id
like_id = helper.add_like(like_data)["id"]
assert db_session.query(Like).get(like_id)
def test_add_like_missing_data(user_data, like_data, db_session):
assert db_session.query(Like).all() == []
user_id = helper.add_user(user_data)["id"]
like_data["user_id"] = user_id
# Missing user_id
with pytest.raises(_SchemaLoadError):
helper.add_like({"place_id": like_data["place_id"]})
assert db_session.query(Like).all() == []
# Missing place_id
with pytest.raises(_SchemaLoadError):
helper.add_like({"user_id": like_data["user_id"]})
assert db_session.query(Like).all() == []
def test_delete_like(user_data, like_data, db_session):
user_id = helper.add_user(user_data)["id"]
like_data["user_id"] = user_id
like_id = helper.add_like(like_data)["id"]
assert db_session.query(Like).get(like_id)
helper.delete_like(like_id)
assert db_session.query(Like).get(like_id) is None
def test_add_dislike(user_data, dislike_data, db_session):
assert db_session.query(DisLike).all() == []
user_id = helper.add_user(user_data)["id"]
dislike_data["user_id"] = user_id
dislike_id = helper.add_dislike(dislike_data)["id"]
assert db_session.query(DisLike).get(dislike_id)
def test_add_dislike_missing_data(user_data, dislike_data, db_session):
assert db_session.query(DisLike).all() == []
user_id = helper.add_user(user_data)["id"]
dislike_data["user_id"] = user_id
# Missing user_id
with pytest.raises(_SchemaLoadError):
helper.add_dislike({"place_id": dislike_data["place_id"]})
assert db_session.query(DisLike).all() == []
# Missing place_id
with pytest.raises(_SchemaLoadError):
helper.add_dislike({"user_id": dislike_data["user_id"]})
assert db_session.query(DisLike).all() == []
def test_delete_dislike(user_data, dislike_data, db_session):
user_id = helper.add_user(user_data)["id"]
dislike_data["user_id"] = user_id
dislike_id = helper.add_dislike(dislike_data)["id"]
assert db_session.query(DisLike).get(dislike_id)
helper.delete_dislike(dislike_id)
assert db_session.query(DisLike).get(dislike_id) is None
| StarcoderdataPython |
372629 | import unittest
class Employee:
def __init__(self, name, lab, age):
self.name = name
self.lab = lab
self.age = age
john = Employee('john', 'computer lab', 40)
class Test(unittest.TestCase):
def test(self):
self.assertEqual(john.name, 'john')
if __name__=='__main__':
unittest.main() | StarcoderdataPython |
302517 | <reponame>AndersenLab/liftover-utils<filename>liftover/liftover.py
"""
Usage:
liftover.py <file> <release1> <release2> (bcf|vcf|gff|bed|refflat)
liftover.py <file> <release1> <release2> <chrom_col> <start_pos_column> [<end_pos_column>] [options]
Options:
-h --help Show this screen.
--delim=<delim> File Delimiter; Default is a tab [default: TAB].
"""
import sys
import os
import tempfile
import subprocess
from subprocess import *
# Pipeing function
def pipe_out(line):
try:
sys.stdout.write(line + "\n")
except IOError:
try:
sys.stdout.close()
except IOError:
pass
try:
sys.stderr.close()
except IOError:
pass
def unzip_gz(filename):
# For some files, unzip first as temp files.
if (filename.endswith(".gz")):
tmp_gz = tempfile.NamedTemporaryFile().name
os.system("gunzip -c %s > %s" % (filename, tmp_gz))
return tmp_gz
else:
return filename
def main():
from docopt import docopt
arguments = docopt(__doc__, version='Liftover Utilities 0.1')
# Check to see if CHROM DIFFs are available.
#if os.path.isfile("remap_gff_between_releases.pl") == False:
# os.system("wget ftp://ftp.sanger.ac.uk/pub2/wormbase/software/Remap-between-versions/remap.tar.bz2 && gunzip -f remap.tar.bz2 && tar -xf remap.tar")
# os.system("mv Remap-for-other-groups/remap_gff_between_releases.pl remap_gff_between_releases.pl")
# os.system("mv Remap-for-other-groups/CHROMOSOME_DIFFERENCES/ CHROMOSOME_DIFFERENCES/")
# os.system("rm -f -r Remap-for-other-groups/")
# os.remove("remap.tar")
_ROOT = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
# Define some necessary variables.
release1, release2 = arguments["<release1>"], arguments["<release2>"]
#gff_temp = tempfile.NamedTemporaryFile().name
gff_temp = tempfile.NamedTemporaryFile().name
gff_liftover_name = tempfile.NamedTemporaryFile().name
gff = file(gff_temp, 'w')
if arguments["--delim"] == "TAB":
arguments["--delim"] = "\t"
#
# BCF / VCF
#
vcf = any([arguments["vcf"],arguments["bcf"]])
if vcf:
chrom_col, start_col, end_col = 0, 1, 1
delim = "\t"
bcf_pos = tempfile.NamedTemporaryFile().name
os.system("bcftools query -f '%%CHROM\t%%POS\n' %s > %s" % (sys.argv[1], bcf_pos))
variant_positions = file(bcf_pos,'r')
elif arguments["gff"]:
chrom_col, start_col, end_col = 0, 3, 4
delim = "\t"
arguments["<file>"] = unzip_gz(arguments["<file>"])
variant_positions = file(arguments["<file>"],'r')
elif arguments["bed"]:
chrom_col, start_col, end_col = 0, 1, 2
delim = "\t"
arguments["<file>"] = unzip_gz(arguments["<file>"])
variant_positions = file(arguments["<file>"],'r')
elif arguments["refflat"]:
arguments["<file>"] = unzip_gz(arguments["<file>"])
refflat = tempfile.NamedTemporaryFile().name
# Process refflat file for liftover.
with open(refflat, "w+") as ref:
with open(arguments["<file>"]) as f:
for n, l in enumerate(f):
l = l.strip().split("\t")
geneName = l[0]
name = l[1]
chrom = l[2]
strand = l[3]
txStart = l[4]
txEnd = l[5]
cdsStart = l[6]
cdsEnd = l[7]
exonCount = l[8]
exonStarts = l[9].strip(",").split(",")
exonEnds = l[10].strip(",").split(",")
# Write a line for starts and ends
ref.write("{chrom}\t{txStart}\t{txEnd}\ttx\t{n}\n".format(**locals()))
ref.write("{chrom}\t{cdsStart}\t{cdsEnd}\tcds\t{n}\n".format(**locals()))
exons = zip(exonStarts, exonEnds)
for exonStart, exonEnd in exons:
ref.write("{chrom}\t{exonStart}\t{exonEnd}\texon\t{n}\n".format(**locals()))
delim = "\t"
chrom_col, start_col, end_col = 0, 1, 2
#arguments["<file>"] = "temp.txt"
variant_positions = file(refflat,"r")
else:
variant_positions = file(arguments["<file>"],'r')
chrom_col, start_col = int(arguments["<chrom_col>"])-1, int(arguments["<start_pos_column>"])-1
if arguments["<end_pos_column>"] is not None:
end_col = int(arguments["<end_pos_column>"])-1
else:
end_col = int(arguments["<start_pos_column>"])-1
for l in variant_positions.xreadlines():
l = l.replace("\n","").split(arguments["--delim"])
if l[0].startswith("#") == False and len(l) >= 2:
if l[0].lower() == "chrm":
l[0] = "CHROMOSOME_MtDNA"
# Write out the coordinates in temporary gff file.
line_out = "%s\t.\t.\t%s\t%s\t.\t+\t.\t%s\t%s\t%s\n" % tuple([l[chrom_col], l[start_col], l[end_col]]*2)
gff.write(line_out)
gff.close()
# Generate Liftover Coordinates
perl_script = _ROOT + "/remap_gff_between_releases.pl"
release1 = release1.upper().replace("WS","")
release2 = release2.upper().replace("WS","")
if int(release2) < int(release1):
raise Exception("Can only lift forward")
remap_command = "perl %s -gff=%s -release1=%s -release2=%s -output=%s" % (perl_script, gff_temp, release1, release2, gff_liftover_name)
subprocess.check_output(remap_command, shell=True)
gff_liftover = file(gff_liftover_name, 'r')
# Replace original coordinates
if vcf == True:
proc = Popen("bcftools view %s" % arguments["<file>"], stdout=PIPE, stdin=PIPE, shell=True)
for line in proc.stdout:
print line
line = line.replace("\n", "")
if line.startswith("#") == True:
pipe_out(line)
else:
# Add checks
l = gff_liftover.readline().replace("\n","").split("\t")
pos_orig = l[9]
pos_new = l[3]
line = line.split("\t")
if line[1] != pos_orig:
raise Exception("Coordinates Off")
else:
line[1] = pos_new
pipe_out('\t'.join(line))
elif arguments["refflat"]:
orig_file = file(arguments["<file>"], 'r')
# Organize liftover positions
org_pos = dict()
new_pos = [x.split("\t") for x in gff_liftover.read().strip().split("\n")]
proc = Popen(["paste", gff_liftover_name, refflat ], stdout=PIPE, stderr=PIPE)
for i in proc.stdout:
i = i.strip().split("\t")
n = int(i[15])
line_type = i[14]
if n not in org_pos:
org_pos[n] = {}
if line_type == "tx":
org_pos[n]["tx"] = [i[3], i[4]]
elif line_type == "cds":
org_pos[n]["cds"] = [i[3], i[4]]
elif line_type == "exon":
if "exon" not in org_pos[n]:
org_pos[n]["exon"] = {}
org_pos[n]["exon"]["start"] = []
org_pos[n]["exon"]["end"] = []
org_pos[n]["exon"]["start"].extend([i[3]])
org_pos[n]["exon"]["end"].extend([i[4]])
for n,l in enumerate(orig_file.xreadlines()):
l = l.strip().split("\t")
l[4] = org_pos[n]["tx"][0]
l[5] = org_pos[n]["tx"][1]
l[6] = org_pos[n]["cds"][0]
l[7] = org_pos[n]["cds"][1]
l[9] = ','.join(org_pos[n]["exon"]["start"]) + ","
l[10] = ','.join(org_pos[n]["exon"]["end"]) + ","
pipe_out('\t'.join(l))
else:
orig_file = file(arguments["<file>"], 'r')
for line in orig_file.xreadlines():
line = line.replace("\n", "")
if line.startswith("#") == True or line.startswith(">") == True:
pipe_out(line)
else:
# Add checks
l = gff_liftover.readline().split(arguments["--delim"])
# Ensure this isn't some strange line...
if len(l) >= 2:
pos_orig = l[9]
pos_new = l[3]
pos_end_orig = l[10]
pos_end_new = l[4]
line = line.split("\t")
if line[start_col] != pos_orig:
raise Exception("Coordinates Off")
else:
line[start_col] = pos_new
line[end_col] = pos_end_new
pipe_out('\t'.join(line))
else:
pipe_out(line)
| StarcoderdataPython |
1835326 | <reponame>n0rel/self
import queue
from PyQt5 import QtCore, QtGui, QtWidgets
class tools(QtWidgets.QWidget):
def __init__(self, eventQueue: queue.Queue):
QtWidgets.QWidget.__init__(self, None)
self.setFixedHeight(50)
self.eventQueue = eventQueue
self.layout = QtWidgets.QHBoxLayout()
self.setLayout(self.layout)
self.homeButton = QtWidgets.QPushButton('Home')
self.homeButton.clicked.connect(self.clicked)
# TODO: add icon for home button
self.layout.addWidget(self.homeButton)
def clicked(self):
self.eventQueue.put({'EVENT': 'HOMEBUTTONPRESS', 'ARGS': []})
| StarcoderdataPython |
189353 | <reponame>codervikash/online-courses
#!/usr/bin/python
"""
Majority Element: A majority element in an array A[] of size n is an element that appears more than n/2 times (and hence there is at most one such element).
Write a function which takes an array and emits the majority element (if it exists), otherwise prints NONE as follows:
I/P : 3 3 4 2 4 4 2 4 4
O/P : 4
I/P : 3 3 4 2 4 4 2 4
O/P : NONE
Using Moore Voting Algorithm:
1. Get an element occurring most of the time in the array. This phase will make sure that if there is a majority element then it will return that only.
2. Check if the element obtained from above step is majority element.
"""
def majority_element(arr):
l = len(arr)
maj_index = 0
count = 0
for i in xrange(l):
if arr[i] == arr[maj_index]:
count += 1
else:
count -= 1
if count == 0:
maj_index = i
count = 1
count = 0
for i in xrange(l):
if arr[i] == arr[maj_index]:
count += 1
if count == l/2:
return arr[maj_index]
arr = [1, 3, 3, 1, 2]
print 'majority element in array: ' + str(majority_element(arr))
arr = [1, 3, 3, 1, 2, 3]
print 'majority element in array: ' + str(majority_element(arr)) | StarcoderdataPython |
4950438 | # This entrypoint file to be used in development. Start by reading README.md
import prob_calculator
from unittest import main
prob_calculator.random.seed(95)
hat = prob_calculator.Hat(blue=3,red=2,green=6)
probability = prob_calculator.experiment(
hat=hat, expected_balls={"blue":2,"green":1}, num_balls_drawn=4, num_experiments=1000)
print("Probability:", probability)
# Run unit tests automatically
main(module='test_module', exit=False)
| StarcoderdataPython |
1740902 | <reponame>XinchaoGou/MyLeetCode
from typing import List
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
path = []
res = []
candidates.sort()
self.__dfs(0, candidates, path, res, target)
return res
def __dfs(self, begin, candidates, path, res, target):
if target == 0:
res.append(path[:])
return
for i in range(begin, len(candidates)):
if i == begin or candidates[i] != candidates[i - 1]:
cur = candidates[i]
residue = target - cur
if residue < 0:
break
path.append(cur)
self.__dfs(i + 1, candidates, path, res, residue)
path.pop() | StarcoderdataPython |
6676905 | <reponame>jfathi/document-understanding-solution<gh_stars>100-1000
import boto3
import json
import datetime
def convert_datetime_to_string(obj):
if isinstance(obj, datetime.datetime):
return obj.__str__()
def on_create(event, context):
kendra_client = boto3.client('kendra')
# get status of index
response = kendra_client.describe_index(Id=event['PhysicalResourceId'])
is_complete = False
status = response['Status']
if status == "CREATING":
print("Still creating kendra index")
elif status == "ACTIVE":
print("Kendra index is now active")
return { 'IsComplete': True }
elif status == "FAILED":
# throw an error
raise Exception("Kendra index creation failed with reason: {}".format(response['ErrorMessage']))
elif status == "DELETING" or status == "SYSTEM_UPDATING":
raise Exception("Kendra index creation shows inconsistent status code, please fix and try again. Reason:{}".format(response['ErrorMessage']))
return { 'IsComplete': is_complete }
def on_delete(event, context):
kendra_client = boto3.client('kendra')
DUSkendraIndexId = event['PhysicalResourceId']
# check if the list_indices has the index id, if yes, then check status
kendra_indices = kendra_client.list_indices()
kendra_indices = json.loads(json.dumps(kendra_indices,default = convert_datetime_to_string))
kendra_index_ids = []
for index in kendra_indices['IndexConfigurationSummaryItems']:
kendra_index_ids.append(index['Id'])
# if the index id is not present, it has been deleted
if DUSkendraIndexId not in kendra_index_ids:
print("Kendra index with id {} deleted".format(DUSkendraIndexId))
return {'IsComplete': True}
for indexId in kendra_index_ids:
if indexId == DUSkendraIndexId:
response = kendra_client.describe_index(Id=DUSkendraIndexId)
if response['Status'] == "DELETING":
print("DUSKendraIndex still deleting")
return {'IsComplete':False}
if response['Status'] == "FAILED":
# send the response as data to aws cloudformation
print("Delete of Kendra index with id {} failed with response {}".format(DUSkendraIndexId,response))
return {'IsComplete':True,'Data':response}
def on_update(event, context):
kendra_client = boto3.client('kendra')
# get status of index
response = kendra_client.describe_index(Id=event['PhysicalResourceId'])
is_complete = False
status = response['Status']
if status == "UPDATING":
print("Still updating kendra index")
elif status == "ACTIVE":
print("Kendra index is now updated & active")
return { 'IsComplete': True }
elif status == "FAILED":
raise Exception("Kendra index update failed with reason: {}".format(response['ErrorMessage']))
elif status == "DELETING" or status == "SYSTEM_UPDATING":
raise Exception("Kendra index update shows inconsistent status code, please fix and try again. Reason:{}".format(response['ErrorMessage']))
return { 'IsComplete': is_complete }
def lambda_handler(event, context):
print("Event: {}".format(event))
event_type = event['RequestType']
if event_type == 'Create': return on_create(event, context)
if event_type == 'Delete': return on_delete(event, context)
if event_type == 'Update': return on_update(event, context) | StarcoderdataPython |
3457358 | def reverse():
word=input("Word: ")
index=len(word)-1;
reversedWord=[];
while index>0:
reversedWord.append(word[index]);
index-=1;
reversedWord.append(word[0]);
polished="".join(reversedWord);
print (polished);
| StarcoderdataPython |
4807924 | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import losses
def create_embeddings_matrix(vectorizer, embeddings_path, embedding_dim=100, mask_zero=True):
embeddings_index = {}
with open(embeddings_path) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
voc = vectorizer.get_vocabulary()
word_index = dict(zip(voc, range(len(voc))))
num_tokens = len(voc) + 2
hits = 0
embedding_matrix = np.zeros((num_tokens, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
hits += 1
print("Converted %d words from %d" % (hits, len(voc)))
return layers.Embedding(
num_tokens,
embedding_dim,
embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix),
trainable=False,
mask_zero=mask_zero
)
def create_model_lstm(embedding_layer, num_labels=3):
text_input = layers.Input(shape=(None,), name='text')
txt = embedding_layer(text_input)
txt = layers.Bidirectional(tf.keras.layers.LSTM(64, recurrent_dropout=0.5, dropout=0.5))(txt)
x = layers.Dropout(0.25)(txt)
out = layers.Dense(num_labels, activation='softmax')(x)
return tf.keras.Model(inputs=[text_input], outputs=[out])
def create_model_gru(embedding_layer, num_labels=3):
text_input = layers.Input(shape=(None,), name='text')
txt = embedding_layer(text_input)
txt = tf.keras.layers.GRU(128)(txt)
# txt = layers.Bidirectional(tf.keras.layers.GRU(64, recurrent_dropout=0.5, dropout=0.5))(txt)
series_input = layers.Input(shape=(None, num_labels), name='series')
series = layers.GRU(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)(series_input)
series = layers.GRU(64)(series)
series = layers.Reshape([-1])(series)
x = layers.concatenate([txt, series])
# txt = layers.Dropout(0.25)(x)
x = layers.Dense(64)(x)
x = layers.Dropout(0.2)(x)
out = layers.Dense(num_labels, activation='softmax')(x)
return tf.keras.Model(inputs=[text_input, series_input], outputs=[out])
def create_model_lstm_big(embedding_layer, num_labels=3):
text_input = layers.Input(shape=(None,), name='text')
txt = embedding_layer(text_input)
txt = layers.Bidirectional(tf.keras.layers.LSTM(64, recurrent_dropout=0.5, dropout=0.5))(txt)
txt = layers.Dense(32)(txt)
series_input = layers.Input(shape=(None,num_labels), name='series')
series = layers.LSTM(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)(series_input)
series = layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)(series)
series = layers.Dense(32)(series)
series = layers.Reshape([-1])(series)
x = layers.concatenate([txt, series])
x = layers.Dropout(0.25)(txt)
out = layers.Dense(num_labels, activation='softmax')(x)
return tf.keras.Model(inputs=[text_input], outputs=[out])
def build_model(embeddings_layer, model_fn, categories=3, optimizer='adam',
loss='categorical_crossentropy', metrics=[tf.keras.metrics.CategoricalAccuracy()]):
model = model_fn(embeddings_layer, categories)
model.summary()
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
return model
def early_stopping(min_delta=1e-3, patience=3, monitor='val_categorical_accuracy'):
return tf.keras.callbacks.EarlyStopping(
monitor=monitor,
min_delta=min_delta,
patience=patience,
verbose=1,
restore_best_weights=True
)
| StarcoderdataPython |
1823023 | def full_function():
# Note that this function is not called, it's there just to make the mapping explicit.
a = 1 # map to cEll1, line 2
b = 2 # map to cEll1, line 3
c = 3 # map to cEll2, line 2
d = 4 # map to cEll2, line 3
def create_code():
cell1_code = compile(''' # line 1
a = 1 # line 2
b = 2 # line 3
''', '<cEll1>', 'exec')
cell2_code = compile('''# line 1
c = 3 # line 2
d = 4 # line 3
''', '<cEll2>', 'exec')
return {'cEll1': cell1_code, 'cEll2': cell2_code}
if __name__ == '__main__':
code = create_code()
exec(code['cEll1'])
exec(code['cEll1'])
exec(code['cEll2'])
exec(code['cEll2'])
print('TEST SUCEEDED')
| StarcoderdataPython |
12866364 | <filename>steel_segmentation/utils.py<gh_stars>1-10
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_eda.ipynb (unless otherwise specified).
__all__ = ['palet', 'seed_everything', 'print_competition_data', 'get_train_pivot', 'get_train_df', 'count_pct',
'get_classification_df', 'rle2mask', 'make_mask', 'mask2rle', 'plot_mask_image', 'plot_defected_image',
'get_random_idx', 'show_defects']
# Cell
from fastai.vision.all import *
import numpy as np
import pandas as pd
import cv2
from matplotlib import pyplot as plt
# Cell
palet = [
(249, 192, 12), # ClassId 1
(0, 185, 241), # ClassId 2
(114, 0, 218), # ClassId 3
(249,50,12) # ClassId 4
]
# Cell
def seed_everything(seed=69):
"""
Seeds `random`, `os.environ["PYTHONHASHSEED"]`,
`numpy`, `torch.cuda` and `torch.backends`.
"""
warnings.filterwarnings("ignore")
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Cell
def print_competition_data(p: Path):
for elem in p.ls():
print(elem)
# Cell
def get_train_pivot(df):
"""
Summarize the training csv with ClassId as columns and values EncodedPixels
"""
def rles2classids(s: pd.Series):
classids = []
for classid in s.index:
if classid != "n":
value = s[classid]
if not (value is np.nan):
classids.append(str(classid))
return " ".join(classids)
train_pivot = df.pivot(
index="ImageId", columns="ClassId", values="EncodedPixels")
train_pivot["n"] = train_pivot.notnull().sum(1)
train_pivot["ClassIds"] = train_pivot.apply(rles2classids, axis=1)
return train_pivot
def get_train_df(path, only_faulty=False, pivot=False, hard_negatives=False):
"""
Get training DataFrame with all the images in data/train_images.
Returns only the faulty images if `only_faulty`.
"""
img_path = path/"train_images"
csv_file_name = path/"train.csv"
train = pd.read_csv(csv_file_name)
img_names = [img.name for img in get_image_files(img_path)]
df_all = pd.DataFrame({'ImageId': img_names})
train_all = pd.merge(df_all, train, on="ImageId", how="outer", indicator=True)
# Renaming and fillna
train_all.rename(columns={'_merge': 'status'}, inplace=True)
rename_dict = {"both": "faulty", "left_only": "no_faulty", "right_only": "missing"}
train_all["status"] = train_all["status"].cat.rename_categories(rename_dict)
train_all = train_all[train_all["status"]!="missing"]
train_all.ClassId.fillna(0, inplace=True)
train_all.ClassId = train_all.ClassId.astype('int64')
train_all.EncodedPixels.fillna(-1, inplace=True)
train_all["ImageId_ClassId"] = train_all["ImageId"] + "_" + train_all["ClassId"].astype('str')
if hard_negatives:
hard_neg_patterns = pd.read_csv(
path/"hard_negatives_patterns.txt", header=None, names=["ImageId"])
cond = train_all["status"]=="faulty"
cond_hn = train_all["ImageId"].isin(hard_neg_patterns["ImageId"].tolist())
train_all = train_all.loc[cond | cond_hn]
if only_faulty:
train_all = train_all[train_all["status"]=="faulty"]
if pivot:
return get_train_pivot(train_all)
return train_all
# Cell
def count_pct(df, column="ClassId"):
"""Returns a `pandas.DataFrame` with count and frequencies stats for `column`."""
class_count = df[column].value_counts().sort_index()
class_count.index.set_names(column, inplace=True)
class_count = class_count.to_frame()
class_count.rename(columns={column: "num"}, inplace=True)
return class_count.assign(freq=lambda df: df["num"] / df["num"].sum())
# Cell
def get_classification_df(df: pd.DataFrame):
"""
Get the DataFrame for the multiclass classification model
"""
def assign_multi_ClassId(x):
"""Returns a string with multi ClassId sep with a blank space (' ')"""
def fill_cols(c):
return c.fillna(5).astype('int64').astype(str)
cols = [fill_cols(x[i]) for i in range(5)]
cols = [col.replace('5', '') for col in cols]
ClassId_multi = cols[0] + " " + cols[1] + " " + \
cols[2] + " " + cols[3] + " " + cols[4]
ClassId_multi = ClassId_multi.str.strip()
ClassId_multi = ClassId_multi.str.replace(' ', ' ')
return ClassId_multi.str.strip()
train_multi = df.pivot(
index="ImageId", columns="ClassId", values="ClassId")
train_multi = train_multi.assign(
ClassId_multi=lambda x: assign_multi_ClassId(x))
return train_multi.reset_index()[["ImageId", "ClassId_multi"]]
# Cell
def rle2mask(rle, value=1, shape=(256,1600)):
"""
mask_rle: run-length as string formated (start length)
shape: (width,height) of array to return
Returns numpy array, 1 - mask, 0 - background
Source: https://www.kaggle.com/paulorzp/rle-functions-run-lenght-encode-decode
"""
s = rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = value
return img.reshape((shape[1], shape[0])).T
# Cell
def make_mask(item, df, flatten=False):
'''
Given an item as:
- row index [int] or
- ImageId [str] or
- file [Path] or
- query [pd.Series],
returns the image_item and mask with two types of shapes:
- (256, 1600) if `flatten`,
- (256, 1600, 4) if not `flatten`,
'''
if isinstance(item, str): cond = df.loc[item]
elif isinstance(item, int): cond = df.iloc[item]
elif isinstance(item, Path): cond = df.loc[item.name]
elif isinstance(item, pd.Series): cond = df.loc[item["ImageId"]]
else:
print(item, type(item))
raise KeyError("invalid item")
fname = cond.name
# without 0 ClassId, only 1,2,3,4 ClassId
labels = cond[1:-2]
h, w = (256, 1600)
masks = np.zeros((h, w, 4), dtype=np.float32) # 4:class 1~4 (ch:0~3)
for itemx, label in enumerate(labels.values):
if label is not np.nan:
masks[:, :, itemx] = rle2mask(rle=label, value=1, shape=(h,w))
if flatten:
classes = np.array([1, 2, 3, 4])
masks = (masks * classes).sum(-1)
return fname, masks
# Cell
def mask2rle(mask):
"""
Efficient implementation of mask2rle, from @paulorzp
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
Source: https://www.kaggle.com/xhlulu/efficient-mask2rle
"""
pixels = mask.T.flatten()
pixels = np.pad(pixels, ((1, 1), ))
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
# Cell
def plot_mask_image(name: str, img: np.array, mask: np.array):
"""Plot a np.array image and mask with contours."""
fig, ax = plt.subplots(figsize=(15, 5))
mask = mask.astype(np.uint8)
for ch in range(4):
contours, _ = cv2.findContours(mask[:, :, ch], cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for i in range(len(contours)):
cv2.polylines(img, contours[i], True, palet[ch], 2)
ax.set_title(name, fontsize=13)
ax.imshow(img)
plt.xticks([])
plt.yticks([])
plt.show()
# Cell
def plot_defected_image(img_path: Path, df: pd.DataFrame, class_id=None):
"""Plot a `img_path` Path image from the training folder with contours."""
img_name = img_path.name
img = cv2.imread(str(img_path))
_, mask = make_mask(img_path, df)
class_ids = np.arange(1, 5)
cond = np.argmax(mask, axis=0).argmax(axis=0) > 0
classid = class_ids[cond]
if class_id is None:
title = f"Original: Image {img_name} with defect type: {list(classid)}"
plot_mask_image(title, img, mask)
else:
title = f"Original: Image {img_name} with defect type {class_id}"
idx = class_id-1
filter_mask = np.zeros((256, 1600, 4), dtype=np.float32)
filter_mask[:, :, idx] = mask[:, :, idx]
plot_mask_image(title, img, filter_mask)
# Cell
def get_random_idx(n: int) -> np.ndarray:
"""
Return a random sequence of size `n`.
"""
rng = np.random.default_rng()
return rng.permutation(n)
# Cell
def show_defects(path, df, class_id=None, n=20, only_defects=True, multi_defects=False):
"""
Plot multiple images.
Attributes:
`path`: [Path]
`df`: [pd.DataFrame] only train_pivot
`class_id`: [str or int] select a type of defect otherwise plot all kinds;
`n`: select the number of images to plot;
`only_defects` [bool, default True]: if False it shows even the no faulty images;
`multi_defects` [bool, default False]: if True it shows imgs with multi defects.
"""
cond_no_defects = df[0] == -1
cond_multi_defects = df["n"] > 1
df = df.loc[cond_no_defects] if not only_defects else df.loc[~cond_no_defects]
df = df.loc[cond_multi_defects] if multi_defects else df.loc[~cond_multi_defects]
if class_id is not None:
cond_classId = df[class_id].notna()
df = df.loc[cond_classId]
imgid_from_df = df.index.tolist()
pfiles_list = L([path / "train_images" / imgid for imgid in imgid_from_df])
perm_paths = pfiles_list[get_random_idx(len(pfiles_list))]
for img_path in perm_paths[:n]:
plot_defected_image(img_path, df) | StarcoderdataPython |
8094468 | <gh_stars>0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
#Carregando a base de dados de treino e teste
treino = pd.read_csv('train.csv')
teste = pd.read_csv('test.csv')
#Array de colunas que nao existem no teste
colunasInexistentesTest = []
#Buscando as colunas que existem no treino mais nao existem no teste
for coluna in treino.columns:
result = list(filter(lambda x: x==coluna ,teste.columns))
if not result:
colunasInexistentesTest.append(coluna)
#Linha da nota de matematica
del(colunasInexistentesTest[69])
#Plotagem dos dados e suas correlacoes
corr = treino.corr()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(corr,cmap='coolwarm', vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,len(treino.columns),1)
ax.set_xticks(ticks)
plt.xticks(rotation=90)
ax.set_yticks(ticks)
ax.set_xticklabels(treino.columns)
ax.set_yticklabels(treino.columns)
plt.show()
#Apagando as colunas de treino que nao existem no teste e outras que nao tem correlacao com a target
treino = treino.drop(colunasInexistentesTest, axis=1)
colunasNaoImportantes = ["NU_INSCRICAO","SG_UF_RESIDENCIA", "CO_PROVA_CN", "CO_PROVA_CH",
"CO_PROVA_LC", "CO_PROVA_MT", "Q027", "CO_UF_RESIDENCIA", "NU_IDADE",
"TP_COR_RACA", "TP_NACIONALIDADE", "TP_ST_CONCLUSAO", "TP_ANO_CONCLUIU",
"TP_ESCOLA", "TP_ENSINO", "TP_DEPENDENCIA_ADM_ESC", "IN_BAIXA_VISAO",
"IN_CEGUEIRA", "IN_SURDEZ", "IN_DISLEXIA", "IN_DISCALCULIA", "IN_SABATISTA",
"IN_GESTANTE","IN_IDOSO", "TP_PRESENCA_CN", "TP_PRESENCA_CH", "TP_PRESENCA_LC",
"TP_LINGUA", "TP_STATUS_REDACAO"]
for k in colunasNaoImportantes:
treino = treino.drop(k, axis=1)
teste = teste.drop(k, axis=1)
#Remocao dos valores faltantes
#teste
row = list(teste.isnull().any(axis=1))
idxx = [i for i, j in enumerate(row) if j is True]
teste = teste.drop(idxx)
#Nvalores faltantes, substituindo por -1
imputer = SimpleImputer(strategy='constant', fill_value = -1)
treino = imputer.fit_transform(treino)
treino = pd.DataFrame(data = treino)
#Transformando colunas categoricas em numericas
ct = ColumnTransformer(transformers = [('encoder', OneHotEncoder(), [0, 12,13,14,15,16,17,18])], remainder='passthrough')
treino = ct.fit_transform(treino)
treino = pd.DataFrame(data = treino)
treino.rename(columns = {54: "NU_MT"}, inplace=True)
ct2 = ColumnTransformer(transformers = [('encoder', OneHotEncoder(), [0, 11,12,13,14,15,16,17])], remainder='passthrough')
teste = ct2.fit_transform(teste)
teste = pd.DataFrame(data = teste)
#Parametrizacao
copia = treino['NU_MT']
treino["NU_NOTA_MT"] = copia
treino = treino.drop('NU_MT', axis=1)
base = treino
_, a = base.shape
atributos = base.iloc[:, 0:a - 1].values
classe = base.iloc[:, a - 1].values
#Faz o treinamento com regressao linear
regressor = LinearRegression()
regressor.fit(atributos, classe)
#Predicao do arquivo de teste
resultados = regressor.predict(teste) | StarcoderdataPython |
8075550 | <reponame>eeetem/GPT3-Discord-Bot
import discord
import asyncio
import random
import time
import openai
client = discord.Client()
with open('ApiKey.txt') as f:
openai.api_key = f.readline()
with open('BotKey.txt') as f:
BotKey = f.readline()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
client.channel_dict={}
client.oldtime = 5
client.last_sender =""
client.sender_list = dict()
client.last_sent = ""
@client.event
async def on_message(message):
#if message.author == client.user:
# return
#ignore DMs
if str(message.channel.type) == "private":
return
#keep track of conversations across channels and servers
if message.channel.id in client.channel_dict:
message_list = client.channel_dict[message.channel.id]
else:
message_list = []
client.channel_dict[message.channel.id] = message_list
client.last_sender = message.author
if len(message.content) > 1:
message_list.append(message)
#if we remember too much of discord history then it'll consume too many tokens
if len(message_list)>6:
message_list.pop(0)
#random 1% chance to talk
if(random.random() < 0.01):
await talk(message,message_list)
#otherwise talk if it's mentioned
for x in message.mentions:
if(x==client.user):
await talk(message,message_list)
async def talk(message,messagelist):
#a basic spam filter to preven a single person spamming the bot over and over and draining the tokens
if time.time() - client.oldtime < 10 and client.last_sender == message.author:
return
#another general spam filter so two people can't do the above
if time.time() - client.oldtime < 3:
return
client.oldtime = time.time()
#GPT3 prompt
prompt="Continue the following online chat. " + client.user.name + " is a witty and cynical chatbot.\n\n"
#format the message for the prompt and clear out numbers and other garbage that will confuse the AI
for msg in messagelist:
prompt += msg.author.display_name+": "+msg.clean_content+"\n"
prompt += client.user.name+":"
prompt = prompt.encode('ascii', 'ignore').decode()#remove weird ascii characters
prompt2 = prompt.replace(":"," ")
prompt2 = prompt2.replace(">"," ")
prompt2 = prompt2.replace("<"," ")
numbers = [int(word) for word in prompt2.split() if word.isdigit()]
for i in numbers:
if i > 1000000:
prompt = prompt.replace(str(i),"")
#emoji names appear along with IDs - filter out the IDs
prompt = prompt.replace(">","")
prompt = prompt.replace("<","")
#emoji are alos surounded by <> remove those too
print(prompt)
response = openai.Completion.create(engine="davinci-instruct-beta", prompt=prompt, stop=["\n"], temperature=0.8, max_tokens=100,presence_penalty=0.9,frequency_penalty=0)
if len(response["choices"][0]["text"]) < 1:
print("empty")#sometimes response comes back blank - trying to print blank message to discord causes an exception
return
print(response["choices"][0]["text"])
await message.channel.send(response["choices"][0]["text"])
#start the bot
loop = asyncio.get_event_loop()
loop.create_task(client.start(BotKey))
loop.run_forever()
| StarcoderdataPython |
6557385 | <reponame>jantzen/eugene
# fragment_timeseries.py
from __future__ import division
import warnings
import numpy as np
import pdb
""" Methods for dividing a timeseries into multiple sub-series for anaylsis by
dynamical distance.
"""
def split_timeseries(data, num_frags, verbose=False):
""" data: a list of length n of (vars x sample) numpy arrays, presumed
to be timeseries describing n different systems or treatments.
returns: an n-length list of num_frags length lists of numpy arrays
"""
ll = data[0].shape[1]
# data verification
for sys in data:
# verify format
if sys.shape[0] > sys.shape[1]:
errmsg = 'Some timeseries data appears to be transposed.'
warnings.warn(errmsg)
# check whether all timeseries are the same length
if not sys.shape[1] == ll:
errmsg = """Some timeseries are of different lengths.
This will result in framgents of different lengths after splitting
each timeseries."""
warnings.warn(errmsg)
# split timeseries
fragmented_data = []
for sys in data:
tmp = np.array_split(sys, num_frags, axis=1)
# ensure all curves are of the same length
if (not tmp[0].shape == tmp[-1].shape):
# find the minimum length
m = tmp[-1].shape[1]
if verbose:
print("Trimming all sub-series to length {}.".format(m))
data_split = []
for curve in tmp:
data_split.append(curve[:,:m])
else:
data_split = tmp
fragmented_data.append(data_split)
return fragmented_data
def fixed_length_frags(data, frag_length):
""" data: a list of length n of (vars x sample) numpy arrays, presumed
to be timeseries describing n different systems or treatments.
returns: an n-length list of variable length lists of numpy arrays
"""
ll = data[0].shape[1]
# data verification
for sys in data:
# verify format
if sys.shape[0] > sys.shape[1]:
errmsg = 'Some timeseries data appears to be transposed.'
warnings.warn(errmsg)
# split timeseries
fragmented_data = []
for sys in data:
# series length
ll = sys.shape[1]
# number of fragments
num_frags = int(np.ceil(ll / frag_length))
data_split = []
for ii in range(num_frags):
data_split.append(sys[:, (ii * frag_length):((ii + 1) * frag_length)])
fragmented_data.append(data_split)
return fragmented_data
def trim( data ):
out = []
for element in data:
if element[0].shape == element[-1].shape:
out.append(element)
else:
out.append(element[:-1])
return out
| StarcoderdataPython |
12816858 | <reponame>ferren/pyXterm<filename>demo/Main.py
#!/bin/env python
#----------------------------------------------------------------------------
# Name: Main.py
# Purpose: Testing lots of stuff, controls, window types, etc.
#
# Author: <NAME>
#
# Created: A long time ago, in a galaxy far, far away...
# Copyright: (c) 1999-2018 by Total Control Software
# Licence: wxWindows license
# Tags: phoenix-port, py3-port
#----------------------------------------------------------------------------
# FIXME List:
# * Problems with flickering related to ERASE_BACKGROUND
# and the splitters. Might be a problem with this 2.5 beta...?
# UPDATE: can't see on 2.5.2 GTK - maybe just a faster machine :)
# * Demo Code menu?
# * Annoying switching between tabs and resulting flicker
# how to replace a page in the notebook without deleting/adding?
# Where is SetPage!? tried freeze...tried reparent of dummy panel....
# AG: It looks like this issue is fixed by Freeze()ing and Thaw()ing the
# main frame and not the notebook
# TODO List:
# * UI design more professional (is the new version more professional?)
# * save file positions (new field in demoModules) (@ LoadDemoSource)
# * Update main overview
# * Why don't we move _treeList into a separate module
# =====================
# = EXTERNAL Packages =
# =====================
# In order to let a package (like AGW) be included in the wxPython demo,
# the package owner should create a sub-directory of the wxPython demo folder
# in which all the package's demos should live. In addition, the sub-folder
# should contain a Python file called __demo__.py which, when imported, should
# contain the following methods:
#
# * GetDemoBitmap: returns the bitmap to be used in the wxPython demo tree control
# in a PyEmbeddedImage format;
# * GetRecentAdditions: returns a list of demos which will be displayed under the
# "Recent Additions/Updates" tree item. This list should be a subset (or the full
# set) of the package's demos;
# * GetDemos: returns a tuple. The first item of the tuple is the package's name
# as will be displayed in the wxPython demo tree, right after the "Custom Controls"
# item. The second element of the tuple is the list of demos for the external package.
# * GetOverview: returns a wx.html-ready representation of the package's documentation.
#
# Please see the __demo__.py file in the demo/agw/ folder for an example.
# Last updated: <NAME>, 20 Oct 2008, 18.00 GMT
import sys, os, time, traceback
import re
import shutil
from threading import Thread
from distutils.version import LooseVersion
import wx
import wx.adv
import wx.lib.agw.aui as aui
import wx.html
from wx.lib.msgpanel import MessagePanel
from wx.adv import TaskBarIcon as TaskBarIcon
from wx.adv import SplashScreen as SplashScreen
import wx.lib.mixins.inspection
import six
from six import exec_, BytesIO
from six.moves import cPickle
from six.moves import urllib
import version
# We won't import the images module yet, but we'll assign it to this
# global when we do.
images = None
# For debugging
##wx.Trap();
##print("wx.VERSION_STRING = %s (%s)" % (wx.VERSION_STRING, wx.USE_UNICODE and 'unicode' or 'ansi'))
##print("pid:", os.getpid())
##raw_input("Press Enter...")
#---------------------------------------------------------------------------
USE_CUSTOMTREECTRL = False
DEFAULT_PERSPECTIVE = "Default Perspective"
#---------------------------------------------------------------------------
# get images and demo list
from demodata import _demoPngs, _treeList
#---------------------------------------------------------------------------
_styleTable = '<h3>Window %s</h3>\n' \
'<p>This class supports the following window %s:\n' \
'<p><table bgcolor=\"#ffffff\" border cols=1>'
_eventTable = '<h3>Events</h3>\n' \
'<p>Events emitted by this class:\n' \
'<p><table bgcolor=\"#ffffff\" border cols=1>'
_appearanceTable = '<h3>Appearance</h3>\n' \
'<p>Control appearance on various platform:\n' \
'<p><table bgcolor=\"#ffffff\" cellspacing=20>'
_styleHeaders = ["Style Name", "Description"]
_eventHeaders = ["Event Name", "Description"]
_headerTable = '<td><b>%s</b></td>'
_styleTag = '<td><tt>%s</tt></td>'
_eventTag = '<td><i>%s</i></td>'
_hexValues = '<td><font color="%s"> %s </font></td>'
_description = '<td>%s</td>'
_imageTag = '<td align=center valign=middle><a href="%s"><img src="%s" alt="%s"></a></td>'
_platformTag = '<td align=center><b>%s</b></td>'
_trunkURL = "http://docs.wxwidgets.org/trunk/"
_docsURL = _trunkURL + "classwx%s.html"
_platformNames = ["wxMSW", "wxGTK", "wxMac"]
_importList = ["wx.aui", "wx.calendar", "wx.html", "wx.media", "wx.wizard",
"wx.combo", "wx.animate", "wx.gizmos", "wx.glcanvas", "wx.grid",
"wx.richtext", "wx.stc"]
_dirWX = dir(wx)
for mod in _importList:
try:
module = __import__(mod)
except ImportError:
continue
#---------------------------------------------------------------------------
def ReplaceCapitals(string):
"""
Replaces the capital letter in a string with an underscore plus the
corresponding lowercase character.
**Parameters:**
* `string`: the string to be analyzed.
"""
newString = ""
for char in string:
if char.isupper():
newString += "_%s"%char.lower()
else:
newString += char
return newString
def RemoveHTMLTags(data):
"""
Removes all the HTML tags from a string.
**Parameters:**
* `data`: the string to be analyzed.
"""
p = re.compile(r'<[^<]*?>')
return p.sub('', data)
def FormatDocs(keyword, values, num):
names = list(values.keys())
names.sort()
headers = (num == 2 and [_eventHeaders] or [_styleHeaders])[0]
table = (num == 2 and [_eventTable] or [_styleTable])[0]
if num == 3:
text = "<br>" + table%(keyword.lower(), keyword.lower()) + "\n<tr>\n"
else:
text = "<br>" + table
for indx in range(2):
text += _headerTable%headers[indx]
text += "\n</tr>\n"
for name in names:
text += "<tr>\n"
description = values[name].strip()
pythonValue = name.replace("wx", "wx.")
if num == 3:
colour = "#ff0000"
value = "Unavailable"
cutValue = pythonValue[3:]
if cutValue in _dirWX:
try:
val = eval(pythonValue)
value = "%s"%hex(val)
colour = "#0000ff"
except AttributeError:
value = "Unavailable"
else:
for packages in _importList:
if cutValue in dir(eval(packages)):
val = eval("%s.%s"%(packages, cutValue))
value = "%s"%hex(val)
colour = "#0000ff"
pythonValue = "%s.%s"%(packages, cutValue)
break
text += _styleTag%pythonValue + "\n"
else:
text += _eventTag%pythonValue + "\n"
text += _description%FormatDescription(description) + "\n"
text += "</tr>\n"
text += "\n</table>\n\n<p>"
return text
def FormatDescription(description):
"""
Formats a wxWidgets C++ description in a more wxPython-based way.
**Parameters:**
* `description`: the string description to be formatted.
"""
description = description.replace("wx", "wx.")
description = description.replace("EVT_COMMAND", "wxEVT_COMMAND")
description = description.replace("wx.Widgets", "wxWidgets")
return description
def FormatImages(appearance):
text = "<p><br>" + _appearanceTable
for indx in range(2):
text += "\n<tr>\n"
for key in _platformNames:
if indx == 0:
src = appearance[key]
alt = key + "Appearance"
text += _imageTag%(src, src, alt)
else:
text += _platformTag%key
text += "</tr>\n"
text += "\n</table>\n\n<p>"
return text
def FindWindowStyles(text, originalText, widgetName):
"""
Finds the windows styles and events in the input text.
**Parameters:**
* `text`: the wxWidgets C++ docs for a particular widget/event, stripped
of all HTML tags;
* `originalText`: the wxWidgets C++ docs for a particular widget/event, with
all HTML tags.
"""
winStyles, winEvents, winExtra, winAppearance = {}, {}, {}, {}
inStyle = inExtra = inEvent = False
for line in text:
if "following styles:" in line:
inStyle = True
continue
elif "Event macros" in line:
inEvent = True
continue
if "following extra styles:" in line:
inExtra = True
continue
if "Appearance:" in line:
winAppearance = FindImages(originalText, widgetName)
continue
elif not line.strip():
inStyle = inEvent = inExtra = False
continue
if inStyle:
start = line.index(':')
windowStyle = line[0:start]
styleDescription = line[start+1:]
winStyles[windowStyle] = styleDescription
elif inEvent:
start = line.index(':')
eventName = line[0:start]
eventDescription = line[start+1:]
winEvents[eventName] = eventDescription
elif inExtra:
start = line.index(':')
styleName = line[0:start]
styleDescription = line[start+1:]
winExtra[styleName] = styleDescription
return winStyles, winEvents, winExtra, winAppearance
def FindImages(text, widgetName):
"""
When the wxWidgets docs contain athe control appearance (a screenshot of the
control), this method will try and download the images.
**Parameters:**
* `text`: the wxWidgets C++ docs for a particular widget/event, with
all HTML tags.
"""
winAppearance = {}
start = text.find("class='appearance'")
if start < 0:
return winAppearance
imagesDir = GetDocImagesDir()
end = start + text.find("</table>")
text = text[start:end]
split = text.split()
for indx, items in enumerate(split):
if "src=" in items:
possibleImage = items.replace("src=", "").strip()
possibleImage = possibleImage.replace('"', "")
f = urllib.request.urlopen(_trunkURL + possibleImage)
stream = f.read()
elif "alt=" in items:
plat = items.replace("alt=", "").replace("'", "").strip()
path = os.path.join(imagesDir, plat, widgetName + ".png")
if not os.path.isfile(path):
image = wx.ImageFromStream(BytesIO(stream))
image.SaveFile(path, wx.BITMAP_TYPE_PNG)
winAppearance[plat] = path
return winAppearance
def GetCaretPeriod(win = None):
"""
Attempts to identify the correct caret blinkrate to use in the Demo Code panel.
:pram wx.Window win: a window to pass to wx.SystemSettings.GetMetric.
:return: a value in milliseconds that indicates the proper period.
:rtype: int
:raises: ValueError if unable to resolve a proper caret blink rate.
"""
if '--no-caret-blink' in sys.argv:
return 0
try:
onmsec = wx.SystemSettings.GetMetric(wx.SYS_CARET_ON_MSEC, win)
offmsec = wx.SystemSettings.GetMetric(wx.SYS_CARET_OFF_MSEC, win)
# check values aren't -1
if -1 in (onmsec, offmsec):
raise ValueError("Unable to determine caret blink rate.")
# attempt to average.
# (wx systemsettings allows on and off time, but scintilla just takes a single period.)
return (onmsec + offmsec) / 2.0
except AttributeError:
# Issue where wx.SYS_CARET_ON/OFF_MSEC is unavailable.
raise ValueError("Unable to determine caret blink rate.")
#---------------------------------------------------------------------------
# Set up a thread that will scan the wxWidgets docs for window styles,
# events and widgets screenshots
class InternetThread(Thread):
""" Worker thread class to attempt connection to the internet. """
def __init__(self, notifyWindow, selectedClass):
Thread.__init__(self)
self.notifyWindow = notifyWindow
self.selectedClass = selectedClass
self.keepRunning = True
self.setDaemon(True)
self.start()
def run(self):
""" Run the worker thread. """
# This is the code executing in the new thread. Simulation of
# a long process as a simple urllib call
try:
url = _docsURL % ReplaceCapitals(self.selectedClass)
fid = urllib.request.urlopen(url)
if six.PY2:
originalText = fid.read()
else:
originalText = fid.read().decode("utf-8")
text = RemoveHTMLTags(originalText).split("\n")
data = FindWindowStyles(text, originalText, self.selectedClass)
if not self.keepRunning:
return
wx.CallAfter(self.notifyWindow.LoadDocumentation, data)
except (IOError, urllib.error.HTTPError):
# Unable to get to the internet
t, v = sys.exc_info()[:2]
message = traceback.format_exception_only(t, v)
wx.CallAfter(self.notifyWindow.StopDownload, message)
except:
# Some other strange error...
t, v = sys.exc_info()[:2]
message = traceback.format_exception_only(t, v)
wx.CallAfter(self.notifyWindow.StopDownload, message)
#---------------------------------------------------------------------------
# Show how to derive a custom wxLog class
class MyLog(wx.Log):
def __init__(self, textCtrl, logTime=0):
wx.Log.__init__(self)
self.tc = textCtrl
self.logTime = logTime
def DoLogText(self, message):
if self.tc:
self.tc.AppendText(message + '\n')
#---------------------------------------------------------------------------
# A class to be used to display source code in the demo. Try using the
# wxSTC in the StyledTextCtrl_2 sample first, fall back to wxTextCtrl
# if there is an error, such as the stc module not being present.
#
try:
##raise ImportError # for testing the alternate implementation
from wx import stc
from StyledTextCtrl_2 import PythonSTC
class DemoCodeEditor(PythonSTC):
def __init__(self, parent, style=wx.BORDER_NONE):
PythonSTC.__init__(self, parent, -1, style=style)
self.SetUpEditor()
# Some methods to make it compatible with how the wxTextCtrl is used
def SetValue(self, value):
# if wx.USE_UNICODE:
# value = value.decode('iso8859_1')
val = self.GetReadOnly()
self.SetReadOnly(False)
self.SetText(value)
self.EmptyUndoBuffer()
self.SetSavePoint()
self.SetReadOnly(val)
def SetEditable(self, val):
self.SetReadOnly(not val)
def IsModified(self):
return self.GetModify()
def Clear(self):
self.ClearAll()
def SetInsertionPoint(self, pos):
self.SetCurrentPos(pos)
self.SetAnchor(pos)
def ShowPosition(self, pos):
line = self.LineFromPosition(pos)
#self.EnsureVisible(line)
self.GotoLine(line)
def GetLastPosition(self):
return self.GetLength()
def GetPositionFromLine(self, line):
return self.PositionFromLine(line)
def GetRange(self, start, end):
return self.GetTextRange(start, end)
def GetSelection(self):
return self.GetAnchor(), self.GetCurrentPos()
def SetSelection(self, start, end):
self.SetSelectionStart(start)
self.SetSelectionEnd(end)
def SelectLine(self, line):
start = self.PositionFromLine(line)
end = self.GetLineEndPosition(line)
self.SetSelection(start, end)
def SetUpEditor(self):
"""
This method carries out the work of setting up the demo editor.
It's seperate so as not to clutter up the init code.
"""
import keyword
self.SetLexer(stc.STC_LEX_PYTHON)
self.SetKeyWords(0, " ".join(keyword.kwlist))
# Enable folding
self.SetProperty("fold", "1" )
# Highlight tab/space mixing (shouldn't be any)
self.SetProperty("tab.timmy.whinge.level", "1")
# Set left and right margins
self.SetMargins(2,2)
# Set up the numbers in the margin for margin #1
self.SetMarginType(1, wx.stc.STC_MARGIN_NUMBER)
# Reasonable value for, say, 4-5 digits using a mono font (40 pix)
self.SetMarginWidth(1, 40)
# Indentation and tab stuff
self.SetIndent(4) # Proscribed indent size for wx
self.SetIndentationGuides(True) # Show indent guides
self.SetBackSpaceUnIndents(True)# Backspace unindents rather than delete 1 space
self.SetTabIndents(True) # Tab key indents
self.SetTabWidth(4) # Proscribed tab size for wx
self.SetUseTabs(False) # Use spaces rather than tabs, or
# TabTimmy will complain!
# White space
self.SetViewWhiteSpace(False) # Don't view white space
# EOL: Since we are loading/saving ourselves, and the
# strings will always have \n's in them, set the STC to
# edit them that way.
self.SetEOLMode(wx.stc.STC_EOL_LF)
self.SetViewEOL(False)
# No right-edge mode indicator
self.SetEdgeMode(stc.STC_EDGE_NONE)
# Setup a margin to hold fold markers
self.SetMarginType(2, stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(2, stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(2, True)
self.SetMarginWidth(2, 12)
# and now set up the fold markers
self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_BOXPLUSCONNECTED, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_BOXMINUSCONNECTED, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNER, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNER, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_BOXPLUS, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_BOXMINUS, "white", "black")
# Global default style
if wx.Platform == '__WXMSW__':
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier New')
elif wx.Platform == '__WXMAC__':
# TODO: if this looks fine on Linux too, remove the Mac-specific case
# and use this whenever OS != MSW.
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Monaco')
else:
defsize = wx.SystemSettings.GetFont(wx.SYS_ANSI_FIXED_FONT).GetPointSize()
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier,size:%d'%defsize)
# Clear styles and revert to default.
self.StyleClearAll()
# Following style specs only indicate differences from default.
# The rest remains unchanged.
# Line numbers in margin
self.StyleSetSpec(wx.stc.STC_STYLE_LINENUMBER,'fore:#000000,back:#99A9C2')
# Highlighted brace
self.StyleSetSpec(wx.stc.STC_STYLE_BRACELIGHT,'fore:#00009D,back:#FFFF00')
# Unmatched brace
self.StyleSetSpec(wx.stc.STC_STYLE_BRACEBAD,'fore:#00009D,back:#FF0000')
# Indentation guide
self.StyleSetSpec(wx.stc.STC_STYLE_INDENTGUIDE, "fore:#CDCDCD")
# Python styles
self.StyleSetSpec(wx.stc.STC_P_DEFAULT, 'fore:#000000')
# Comments
self.StyleSetSpec(wx.stc.STC_P_COMMENTLINE, 'fore:#008000,back:#F0FFF0')
self.StyleSetSpec(wx.stc.STC_P_COMMENTBLOCK, 'fore:#008000,back:#F0FFF0')
# Numbers
self.StyleSetSpec(wx.stc.STC_P_NUMBER, 'fore:#008080')
# Strings and characters
self.StyleSetSpec(wx.stc.STC_P_STRING, 'fore:#800080')
self.StyleSetSpec(wx.stc.STC_P_CHARACTER, 'fore:#800080')
# Keywords
self.StyleSetSpec(wx.stc.STC_P_WORD, 'fore:#000080,bold')
# Triple quotes
self.StyleSetSpec(wx.stc.STC_P_TRIPLE, 'fore:#800080,back:#FFFFEA')
self.StyleSetSpec(wx.stc.STC_P_TRIPLEDOUBLE, 'fore:#800080,back:#FFFFEA')
# Class names
self.StyleSetSpec(wx.stc.STC_P_CLASSNAME, 'fore:#0000FF,bold')
# Function names
self.StyleSetSpec(wx.stc.STC_P_DEFNAME, 'fore:#008080,bold')
# Operators
self.StyleSetSpec(wx.stc.STC_P_OPERATOR, 'fore:#800000,bold')
# Identifiers. I leave this as not bold because everything seems
# to be an identifier if it doesn't match the above criterae
self.StyleSetSpec(wx.stc.STC_P_IDENTIFIER, 'fore:#000000')
# Caret color
self.SetCaretForeground("BLUE")
# Selection background
self.SetSelBackground(1, '#66CCFF')
# Attempt to set caret blink rate.
try:
self.SetCaretPeriod(GetCaretPeriod(self))
except ValueError:
pass
self.SetSelBackground(True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))
self.SetSelForeground(True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT))
def RegisterModifiedEvent(self, eventHandler):
self.Bind(wx.stc.EVT_STC_CHANGE, eventHandler)
except ImportError:
class DemoCodeEditor(wx.TextCtrl):
def __init__(self, parent):
wx.TextCtrl.__init__(self, parent, -1, style =
wx.TE_MULTILINE | wx.HSCROLL | wx.TE_RICH2 | wx.TE_NOHIDESEL)
def RegisterModifiedEvent(self, eventHandler):
self.Bind(wx.EVT_TEXT, eventHandler)
def SetReadOnly(self, flag):
self.SetEditable(not flag)
# NOTE: STC already has this method
def GetText(self):
return self.GetValue()
def GetPositionFromLine(self, line):
return self.XYToPosition(0,line)
def GotoLine(self, line):
pos = self.GetPositionFromLine(line)
self.SetInsertionPoint(pos)
self.ShowPosition(pos)
def SelectLine(self, line):
start = self.GetPositionFromLine(line)
end = start + self.GetLineLength(line)
self.SetSelection(start, end)
#---------------------------------------------------------------------------
# Constants for module versions
modOriginal = 0
modModified = 1
modDefault = modOriginal
#---------------------------------------------------------------------------
class DemoCodePanel(wx.Panel):
"""Panel for the 'Demo Code' tab"""
def __init__(self, parent, mainFrame):
wx.Panel.__init__(self, parent, size=(1,1))
if 'wxMSW' in wx.PlatformInfo:
self.Hide()
self.mainFrame = mainFrame
self.editor = DemoCodeEditor(self)
self.editor.RegisterModifiedEvent(self.OnCodeModified)
self.btnSave = wx.Button(self, -1, "Save Changes")
self.btnRestore = wx.Button(self, -1, "Delete Modified")
self.btnSave.Enable(False)
self.btnSave.Bind(wx.EVT_BUTTON, self.OnSave)
self.btnRestore.Bind(wx.EVT_BUTTON, self.OnRestore)
self.radioButtons = { modOriginal: wx.RadioButton(self, -1, "Original", style = wx.RB_GROUP),
modModified: wx.RadioButton(self, -1, "Modified") }
self.controlBox = wx.BoxSizer(wx.HORIZONTAL)
self.controlBox.Add(wx.StaticText(self, -1, "Active Version:"), 0,
wx.RIGHT | wx.LEFT | wx.ALIGN_CENTER_VERTICAL, 5)
for modID, radioButton in self.radioButtons.items():
self.controlBox.Add(radioButton, 0, wx.EXPAND | wx.RIGHT, 5)
radioButton.modID = modID # makes it easier for the event handler
radioButton.Bind(wx.EVT_RADIOBUTTON, self.OnRadioButton)
self.controlBox.Add(self.btnSave, 0, wx.RIGHT, 5)
self.controlBox.Add(self.btnRestore, 0)
self.box = wx.BoxSizer(wx.VERTICAL)
self.box.Add(self.controlBox, 0, wx.EXPAND)
self.box.Add(wx.StaticLine(self), 0, wx.EXPAND)
self.box.Add(self.editor, 1, wx.EXPAND)
self.box.Fit(self)
self.SetSizer(self.box)
# Loads a demo from a DemoModules object
def LoadDemo(self, demoModules):
self.demoModules = demoModules
if (modDefault == modModified) and demoModules.Exists(modModified):
demoModules.SetActive(modModified)
else:
demoModules.SetActive(modOriginal)
self.radioButtons[demoModules.GetActiveID()].Enable(True)
self.ActiveModuleChanged()
def ActiveModuleChanged(self):
self.LoadDemoSource(self.demoModules.GetSource())
self.UpdateControlState()
self.mainFrame.pnl.Freeze()
self.ReloadDemo()
self.mainFrame.pnl.Thaw()
def LoadDemoSource(self, source):
self.editor.Clear()
self.editor.SetValue(source)
self.JumpToLine(0)
self.btnSave.Enable(False)
def JumpToLine(self, line, highlight=False):
self.editor.GotoLine(line)
self.editor.SetFocus()
if highlight:
self.editor.SelectLine(line)
def UpdateControlState(self):
active = self.demoModules.GetActiveID()
# Update the radio/restore buttons
for moduleID in self.radioButtons:
btn = self.radioButtons[moduleID]
if moduleID == active:
btn.SetValue(True)
else:
btn.SetValue(False)
if self.demoModules.Exists(moduleID):
btn.Enable(True)
if moduleID == modModified:
self.btnRestore.Enable(True)
else:
btn.Enable(False)
if moduleID == modModified:
self.btnRestore.Enable(False)
def OnRadioButton(self, event):
radioSelected = event.GetEventObject()
modSelected = radioSelected.modID
if modSelected != self.demoModules.GetActiveID():
busy = wx.BusyInfo("Reloading demo module...")
self.demoModules.SetActive(modSelected)
self.ActiveModuleChanged()
def ReloadDemo(self):
if self.demoModules.name != __name__:
self.mainFrame.RunModule()
def OnCodeModified(self, event):
self.btnSave.Enable(self.editor.IsModified())
def OnSave(self, event):
if self.demoModules.Exists(modModified):
if self.demoModules.GetActiveID() == modOriginal:
overwriteMsg = "You are about to overwrite an already existing modified copy\n" + \
"Do you want to continue?"
dlg = wx.MessageDialog(self, overwriteMsg, "wxPython Demo",
wx.YES_NO | wx.NO_DEFAULT| wx.ICON_EXCLAMATION)
result = dlg.ShowModal()
if result == wx.ID_NO:
return
dlg.Destroy()
self.demoModules.SetActive(modModified)
modifiedFilename = GetModifiedFilename(self.demoModules.name)
# Create the demo directory if one doesn't already exist
if not os.path.exists(GetModifiedDirectory()):
try:
os.makedirs(GetModifiedDirectory())
if not os.path.exists(GetModifiedDirectory()):
wx.LogMessage("BUG: Created demo directory but it still doesn't exist")
raise AssertionError
except:
wx.LogMessage("Error creating demo directory: %s" % GetModifiedDirectory())
return
else:
wx.LogMessage("Created directory for modified demos: %s" % GetModifiedDirectory())
# Save
f = open(modifiedFilename, "wt")
source = self.editor.GetText()
try:
f.write(source)
finally:
f.close()
busy = wx.BusyInfo("Reloading demo module...")
self.demoModules.LoadFromFile(modModified, modifiedFilename)
self.ActiveModuleChanged()
self.mainFrame.SetTreeModified(True)
def OnRestore(self, event): # Handles the "Delete Modified" button
modifiedFilename = GetModifiedFilename(self.demoModules.name)
self.demoModules.Delete(modModified)
os.unlink(modifiedFilename) # Delete the modified copy
busy = wx.BusyInfo("Reloading demo module...")
self.ActiveModuleChanged()
self.mainFrame.SetTreeModified(False)
#---------------------------------------------------------------------------
def opj(path):
"""Convert paths to the platform-specific separator"""
st = os.path.join(*tuple(path.split('/')))
# HACK: on Linux, a leading / gets lost...
if path.startswith('/'):
st = '/' + st
return st
def GetDataDir():
"""
Return the standard location on this platform for application data
"""
sp = wx.StandardPaths.Get()
return sp.GetUserDataDir()
def GetModifiedDirectory():
"""
Returns the directory where modified versions of the demo files
are stored
"""
return os.path.join(GetDataDir(), "modified")
def GetModifiedFilename(name):
"""
Returns the filename of the modified version of the specified demo
"""
if not name.endswith(".py"):
name = name + ".py"
return os.path.join(GetModifiedDirectory(), name)
def GetOriginalFilename(name):
"""
Returns the filename of the original version of the specified demo
"""
if not name.endswith(".py"):
name = name + ".py"
if os.path.isfile(name):
return name
originalDir = os.getcwd()
listDir = os.listdir(originalDir)
# Loop over the content of the demo directory
for item in listDir:
if not os.path.isdir(item):
# Not a directory, continue
continue
dirFile = os.listdir(item)
# See if a file called "name" is there
if name in dirFile:
return os.path.join(item, name)
# We must return a string...
return ""
def DoesModifiedExist(name):
"""Returns whether the specified demo has a modified copy"""
if os.path.exists(GetModifiedFilename(name)):
return True
else:
return False
def GetConfig():
if not os.path.exists(GetDataDir()):
os.makedirs(GetDataDir())
config = wx.FileConfig(
localFilename=os.path.join(GetDataDir(), "options"))
return config
def MakeDocDirs():
docDir = os.path.join(GetDataDir(), "docs")
if not os.path.exists(docDir):
os.makedirs(docDir)
for plat in _platformNames:
imageDir = os.path.join(docDir, "images", plat)
if not os.path.exists(imageDir):
os.makedirs(imageDir)
def GetDocFile():
docFile = os.path.join(GetDataDir(), "docs", "TrunkDocs.pkl")
return docFile
def GetDocImagesDir():
MakeDocDirs()
return os.path.join(GetDataDir(), "docs", "images")
def SearchDemo(name, keyword):
""" Returns whether a demo contains the search keyword or not. """
fid = open(GetOriginalFilename(name), "rt")
fullText = fid.read()
fid.close()
if six.PY2:
fullText = fullText.decode("iso-8859-1")
if fullText.find(keyword) >= 0:
return True
return False
def HuntExternalDemos():
"""
Searches for external demos (i.e. packages like AGW) in the wxPython
demo sub-directories. In order to be found, these external packages
must have a __demo__.py file in their directory.
"""
externalDemos = {}
originalDir = os.getcwd()
listDir = os.listdir(originalDir)
# Loop over the content of the demo directory
for item in listDir:
if not os.path.isdir(item):
# Not a directory, continue
continue
dirFile = os.listdir(item)
# See if a __demo__.py file is there
if "__demo__.py" in dirFile:
# Extend sys.path and import the external demos
sys.path.append(item)
externalDemos[item] = __import__("__demo__")
if not externalDemos:
# Nothing to import...
return {}
# Modify the tree items and icons
index = 0
for category, demos in _treeList:
# We put the external packages right before the
# More Windows/Controls item
if category == "More Windows/Controls":
break
index += 1
# Sort and reverse the external demos keys so that they
# come back in alphabetical order
keys = list(externalDemos.keys())
keys.sort()
keys.reverse()
# Loop over all external packages
for extern in keys:
package = externalDemos[extern]
# Insert a new package in the _treeList of demos
_treeList.insert(index, package.GetDemos())
# Get the recent additions for this package
_treeList[0][1].extend(package.GetRecentAdditions())
# Extend the demo bitmaps and the catalog
_demoPngs.insert(index+1, extern)
images.catalog[extern] = package.GetDemoBitmap()
# That's all folks...
return externalDemos
def LookForExternals(externalDemos, demoName):
"""
Checks if a demo name is in any of the external packages (like AGW) or
if the user clicked on one of the external packages parent items in the
tree, in which case it returns the html overview for the package.
"""
pkg = overview = None
# Loop over all the external demos
for key, package in externalDemos.items():
# Get the tree item name for the package and its demos
treeName, treeDemos = package.GetDemos()
# Get the overview for the package
treeOverview = package.GetOverview()
if treeName == demoName:
# The user clicked on the parent tree item, return the overview
return pkg, treeOverview
elif demoName in treeDemos:
# The user clicked on a real demo, return the package
return key, overview
# No match found, return None for both
return pkg, overview
#---------------------------------------------------------------------------
class ModuleDictWrapper(object):
"""Emulates a module with a dynamically compiled __dict__"""
def __init__(self, dict):
self.dict = dict
def __getattr__(self, name):
if name in self.dict:
return self.dict[name]
else:
raise AttributeError
class DemoModules(object):
"""
Dynamically manages the original/modified versions of a demo
module
"""
def __init__(self, name):
self.modActive = -1
self.name = name
# (dict , source , filename , description , error information )
# ( 0 , 1 , 2 , 3 , 4 )
self.modules = [[dict(), "" , "" , "<original>" , None],
[dict(), "" , "" , "<modified>" , None]]
getcwd = os.getcwd if six.PY3 else os.getcwdu
for i in [modOriginal, modModified]:
self.modules[i][0]['__file__'] = \
os.path.join(getcwd(), GetOriginalFilename(name))
# load original module
self.LoadFromFile(modOriginal, GetOriginalFilename(name))
self.SetActive(modOriginal)
# load modified module (if one exists)
if DoesModifiedExist(name):
self.LoadFromFile(modModified, GetModifiedFilename(name))
def LoadFromFile(self, modID, filename):
self.modules[modID][2] = filename
file = open(filename, "rt")
self.LoadFromSource(modID, file.read())
file.close()
def LoadFromSource(self, modID, source):
self.modules[modID][1] = source
self.LoadDict(modID)
def LoadDict(self, modID):
if self.name != __name__:
source = self.modules[modID][1]
description = self.modules[modID][2]
if six.PY2:
description = description.encode(sys.getfilesystemencoding())
try:
code = compile(source, description, "exec")
exec_(code, self.modules[modID][0])
except:
self.modules[modID][4] = DemoError(sys.exc_info())
self.modules[modID][0] = None
else:
self.modules[modID][4] = None
def SetActive(self, modID):
if modID != modOriginal and modID != modModified:
raise LookupError
else:
self.modActive = modID
def GetActive(self):
dict = self.modules[self.modActive][0]
if dict is None:
return None
else:
return ModuleDictWrapper(dict)
def GetActiveID(self):
return self.modActive
def GetSource(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[modID][1]
def GetFilename(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[self.modActive][2]
def GetErrorInfo(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[self.modActive][4]
def Exists(self, modID):
return self.modules[modID][1] != ""
def UpdateFile(self, modID = None):
"""Updates the file from which a module was loaded
with (possibly updated) source"""
if modID is None:
modID = self.modActive
source = self.modules[modID][1]
filename = self.modules[modID][2]
try:
file = open(filename, "wt")
file.write(source)
finally:
file.close()
def Delete(self, modID):
if self.modActive == modID:
self.SetActive(0)
self.modules[modID][0] = None
self.modules[modID][1] = ""
self.modules[modID][2] = ""
#---------------------------------------------------------------------------
class DemoError(object):
"""Wraps and stores information about the current exception"""
def __init__(self, exc_info):
import copy
excType, excValue = exc_info[:2]
# traceback list entries: (filename, line number, function name, text)
self.traceback = traceback.extract_tb(exc_info[2])
# --Based on traceback.py::format_exception_only()--
if isinstance(excType, type):
self.exception_type = excType.__name__
else:
self.exception_type = excType
# If it's a syntax error, extra information needs
# to be added to the traceback
if excType is SyntaxError:
try:
msg, (filename, lineno, self.offset, line) = excValue
except:
pass
else:
if not filename:
filename = "<string>"
line = line.strip()
self.traceback.append( (filename, lineno, "", line) )
excValue = msg
try:
self.exception_details = str(excValue)
except:
self.exception_details = "<unprintable %s object>" & type(excValue).__name__
del exc_info
def __str__(self):
ret = "Type %s \n \
Traceback: %s \n \
Details : %s" % ( str(self.exception_type), str(self.traceback), self.exception_details )
return ret
#---------------------------------------------------------------------------
class DemoErrorPanel(wx.Panel):
"""Panel put into the demo tab when the demo fails to run due to errors"""
def __init__(self, parent, codePanel, demoError, log):
wx.Panel.__init__(self, parent, -1)#, style=wx.NO_FULL_REPAINT_ON_RESIZE)
self.codePanel = codePanel
self.nb = parent
self.log = log
self.box = wx.BoxSizer(wx.VERTICAL)
# Main Label
self.box.Add(wx.StaticText(self, -1, "An error has occurred while trying to run the demo")
, 0, wx.ALIGN_CENTER | wx.TOP, 10)
# Exception Information
boxInfo = wx.StaticBox(self, -1, "Exception Info" )
boxInfoSizer = wx.StaticBoxSizer(boxInfo, wx.VERTICAL ) # Used to center the grid within the box
boxInfoGrid = wx.FlexGridSizer( cols=2 )
textFlags = wx.ALIGN_RIGHT | wx.LEFT | wx.RIGHT | wx.TOP
boxInfoGrid.Add(wx.StaticText(self, -1, "Type: "), 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, str(demoError.exception_type)) , 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, "Details: ") , 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, demoError.exception_details) , 0, textFlags, 5 )
boxInfoSizer.Add(boxInfoGrid, 0, wx.ALIGN_CENTRE | wx.ALL, 5 )
self.box.Add(boxInfoSizer, 0, wx.ALIGN_CENTER | wx.ALL, 5)
# Set up the traceback list
# This one automatically resizes last column to take up remaining space
from ListCtrl import TestListCtrl
self.list = TestListCtrl(self, -1, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.list.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.list.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected)
self.list.InsertColumn(0, "Filename")
self.list.InsertColumn(1, "Line", wx.LIST_FORMAT_RIGHT)
self.list.InsertColumn(2, "Function")
self.list.InsertColumn(3, "Code")
self.InsertTraceback(self.list, demoError.traceback)
self.list.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.list.SetColumnWidth(2, wx.LIST_AUTOSIZE)
self.box.Add(wx.StaticText(self, -1, "Traceback:")
, 0, wx.ALIGN_CENTER | wx.TOP, 5)
self.box.Add(self.list, 1, wx.EXPAND | wx.ALL, 5)
self.box.Add(wx.StaticText(self, -1, "Entries from the demo module are shown in blue\n"
+ "Double-click on them to go to the offending line")
, 0, wx.ALIGN_CENTER | wx.BOTTOM, 5)
self.box.Fit(self)
self.SetSizer(self.box)
def InsertTraceback(self, list, traceback):
#Add the traceback data
for x in range(len(traceback)):
data = traceback[x]
list.InsertItem(x, os.path.basename(data[0])) # Filename
list.SetItem(x, 1, str(data[1])) # Line
list.SetItem(x, 2, str(data[2])) # Function
list.SetItem(x, 3, str(data[3])) # Code
# Check whether this entry is from the demo module
if data[0] == "<original>" or data[0] == "<modified>": # FIXME: make more generalised
self.list.SetItemData(x, int(data[1])) # Store line number for easy access
# Give it a blue colour
item = self.list.GetItem(x)
item.SetTextColour(wx.BLUE)
self.list.SetItem(item)
else:
self.list.SetItemData(x, -1) # Editor can't jump into this one's code
def OnItemSelected(self, event):
# This occurs before OnDoubleClick and can be used to set the
# currentItem. OnDoubleClick doesn't get a wxListEvent....
self.currentItem = event.Index
event.Skip()
def OnDoubleClick(self, event):
# If double-clicking on a demo's entry, jump to the line number
line = self.list.GetItemData(self.currentItem)
if line != -1:
self.nb.SetSelection(1) # Switch to the code viewer tab
wx.CallAfter(self.codePanel.JumpToLine, line-1, True)
event.Skip()
#---------------------------------------------------------------------------
class MainPanel(wx.Panel):
"""
Just a simple derived panel where we override Freeze and Thaw to work
around an issue on wxGTK.
"""
def Freeze(self):
if 'wxMSW' in wx.PlatformInfo:
return super(MainPanel, self).Freeze()
def Thaw(self):
if 'wxMSW' in wx.PlatformInfo:
return super(MainPanel, self).Thaw()
#---------------------------------------------------------------------------
class DemoTaskBarIcon(TaskBarIcon):
TBMENU_RESTORE = wx.NewIdRef()
TBMENU_CLOSE = wx.NewIdRef()
TBMENU_CHANGE = wx.NewIdRef()
TBMENU_REMOVE = wx.NewIdRef()
def __init__(self, frame):
TaskBarIcon.__init__(self, wx.adv.TBI_DOCK) # wx.adv.TBI_CUSTOM_STATUSITEM
self.frame = frame
# Set the image
icon = self.MakeIcon(images.WXPdemo.GetImage())
self.SetIcon(icon, "wxPython Demo")
self.imgidx = 1
# bind some events
self.Bind(wx.adv.EVT_TASKBAR_LEFT_DCLICK, self.OnTaskBarActivate)
self.Bind(wx.EVT_MENU, self.OnTaskBarActivate, id=self.TBMENU_RESTORE)
self.Bind(wx.EVT_MENU, self.OnTaskBarClose, id=self.TBMENU_CLOSE)
self.Bind(wx.EVT_MENU, self.OnTaskBarChange, id=self.TBMENU_CHANGE)
self.Bind(wx.EVT_MENU, self.OnTaskBarRemove, id=self.TBMENU_REMOVE)
def CreatePopupMenu(self):
"""
This method is called by the base class when it needs to popup
the menu for the default EVT_RIGHT_DOWN event. Just create
the menu how you want it and return it from this function,
the base class takes care of the rest.
"""
menu = wx.Menu()
menu.Append(self.TBMENU_RESTORE, "Restore wxPython Demo")
menu.Append(self.TBMENU_CLOSE, "Close wxPython Demo")
menu.AppendSeparator()
menu.Append(self.TBMENU_CHANGE, "Change the TB Icon")
menu.Append(self.TBMENU_REMOVE, "Remove the TB Icon")
return menu
def MakeIcon(self, img):
"""
The various platforms have different requirements for the
icon size...
"""
if "wxMSW" in wx.PlatformInfo:
img = img.Scale(16, 16)
elif "wxGTK" in wx.PlatformInfo:
img = img.Scale(22, 22)
# wxMac can be any size upto 128x128, so leave the source img alone....
icon = wx.Icon(img.ConvertToBitmap())
return icon
def OnTaskBarActivate(self, evt):
if self.frame.IsIconized():
self.frame.Iconize(False)
if not self.frame.IsShown():
self.frame.Show(True)
self.frame.Raise()
def OnTaskBarClose(self, evt):
wx.CallAfter(self.frame.Close)
def OnTaskBarChange(self, evt):
names = [ "WXPdemo", "Mondrian", "Pencil", "Carrot" ]
name = names[self.imgidx]
eImg = getattr(images, name)
self.imgidx += 1
if self.imgidx >= len(names):
self.imgidx = 0
icon = self.MakeIcon(eImg.Image)
self.SetIcon(icon, "This is a new icon: " + name)
def OnTaskBarRemove(self, evt):
self.RemoveIcon()
#---------------------------------------------------------------------------
class wxPythonDemo(wx.Frame):
overviewText = "wxPython Overview"
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title, size = (970, 720),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetMinSize((640,480))
self.pnl = pnl = MainPanel(self)
self.mgr = aui.AuiManager()
self.mgr.SetManagedWindow(pnl)
self.loaded = False
self.cwd = os.getcwd()
self.curOverview = ""
self.demoPage = None
self.codePage = None
self.shell = None
self.firstTime = True
self.finddlg = None
icon = images.WXPdemo.GetIcon()
self.SetIcon(icon)
try:
self.tbicon = DemoTaskBarIcon(self)
except:
self.tbicon = None
self.otherWin = None
self.allowDocs = False
self.downloading = False
self.internetThread = None
self.downloadImage = 2
self.sendDownloadError = True
self.downloadTimer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_ICONIZE, self.OnIconfiy)
self.Bind(wx.EVT_MAXIMIZE, self.OnMaximize)
self.Bind(wx.EVT_TIMER, self.OnDownloadTimer, self.downloadTimer)
self.Centre(wx.BOTH)
self.statusBar = self.CreateStatusBar(2)#, wx.ST_SIZEGRIP
self.statusBar.SetStatusWidths([-2, -1])
statusText = "Welcome to wxPython %s" % wx.VERSION_STRING
self.statusBar.SetStatusText(statusText, 0)
self.downloadGauge = wx.Gauge(self.statusBar, wx.ID_ANY, 50)
self.downloadGauge.SetToolTip("Downloading Docs...")
self.downloadGauge.Hide()
self.sizeChanged = False
self.Reposition()
self.statusBar.Bind(wx.EVT_SIZE, self.OnStatusBarSize)
self.statusBar.Bind(wx.EVT_IDLE, self.OnStatusBarIdle)
self.dying = False
self.skipLoad = False
self.allowAuiFloating = False
def EmptyHandler(evt): pass
self.ReadConfigurationFile()
self.externalDemos = HuntExternalDemos()
# Create a Notebook
self.nb = wx.Notebook(pnl, -1, style=wx.CLIP_CHILDREN)
if 'wxMac' not in wx.PlatformInfo:
imgList = wx.ImageList(16, 16)
for png in ["overview", "code", "demo"]:
bmp = images.catalog[png].GetBitmap()
imgList.Add(bmp)
for indx in range(9):
bmp = images.catalog["spinning_nb%d"%indx].GetBitmap()
imgList.Add(bmp)
self.nb.AssignImageList(imgList)
self.BuildMenuBar()
self.finddata = wx.FindReplaceData()
self.finddata.SetFlags(wx.FR_DOWN)
# Create a TreeCtrl
leftPanel = wx.Panel(pnl, style=wx.TAB_TRAVERSAL|wx.CLIP_CHILDREN)
self.treeMap = {}
self.searchItems = {}
self.tree = wxPythonDemoTree(leftPanel)
self.filter = wx.SearchCtrl(leftPanel, style=wx.TE_PROCESS_ENTER)
self.filter.ShowCancelButton(True)
self.filter.Bind(wx.EVT_TEXT, self.RecreateTree)
self.filter.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN,
lambda e: self.filter.SetValue(''))
self.filter.Bind(wx.EVT_TEXT_ENTER, self.OnSearch)
if 'gtk3' in wx.PlatformInfo:
# Something is wrong with the bestsize of the SearchCtrl, so for now
# let's set it based on the size of a TextCtrl.
txt = wx.TextCtrl(leftPanel)
bs = txt.GetBestSize()
txt.DestroyLater()
self.filter.SetMinSize((-1, bs.height+4))
searchMenu = wx.Menu()
item = searchMenu.AppendRadioItem(-1, "Sample Name")
self.Bind(wx.EVT_MENU, self.OnSearchMenu, item)
item = searchMenu.AppendRadioItem(-1, "Sample Content")
self.Bind(wx.EVT_MENU, self.OnSearchMenu, item)
self.filter.SetMenu(searchMenu)
self.RecreateTree()
self.tree.SetExpansionState(self.expansionState)
self.tree.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded)
self.tree.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.tree.Bind(wx.EVT_LEFT_DOWN, self.OnTreeLeftDown)
# Set up a wx.html.HtmlWindow on the Overview Notebook page
# we put it in a panel first because there seems to be a
# refresh bug of some sort (wxGTK) when it is directly in
# the notebook...
if 0: # the old way
self.ovr = wx.html.HtmlWindow(self.nb, -1, size=(400, 400))
self.nb.AddPage(self.ovr, self.overviewText, imageId=0)
else: # hopefully I can remove this hacky code soon, see SF bug #216861
panel = wx.Panel(self.nb, -1, style=wx.CLIP_CHILDREN)
self.ovr = wx.html.HtmlWindow(panel, -1, size=(400, 400))
self.nb.AddPage(panel, self.overviewText, imageId=0)
def OnOvrSize(evt, ovr=self.ovr):
ovr.SetSize(evt.GetSize())
panel.Bind(wx.EVT_SIZE, OnOvrSize)
panel.Bind(wx.EVT_ERASE_BACKGROUND, EmptyHandler)
if "gtk2" in wx.PlatformInfo or "gtk3" in wx.PlatformInfo:
self.ovr.SetStandardFonts()
self.SetOverview(self.overviewText, mainOverview)
# Set up a log window
self.log = wx.TextCtrl(pnl, -1,
style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL)
if wx.Platform == "__WXMAC__":
self.log.MacCheckSpelling(False)
# Set the wxWindows log target to be this textctrl
#wx.Log.SetActiveTarget(wx.LogTextCtrl(self.log))
# But instead of the above we want to show how to use our own wx.Log class
wx.Log.SetActiveTarget(MyLog(self.log))
# for serious debugging
#wx.Log.SetActiveTarget(wx.LogStderr())
#wx.Log.SetTraceMask(wx.TraceMessages)
self.Bind(wx.EVT_ACTIVATE, self.OnActivate)
wx.GetApp().Bind(wx.EVT_ACTIVATE_APP, self.OnAppActivate)
# add the windows to the splitter and split it.
leftBox = wx.BoxSizer(wx.VERTICAL)
leftBox.Add(self.tree, 1, wx.EXPAND)
leftBox.Add(wx.StaticText(leftPanel, label = "Filter Demos:"), 0, wx.TOP|wx.LEFT, 5)
leftBox.Add(self.filter, 0, wx.EXPAND|wx.ALL, 5)
if 'wxMac' in wx.PlatformInfo:
leftBox.Add((5,5)) # Make sure there is room for the focus ring
leftPanel.SetSizer(leftBox)
# select initial items
self.nb.SetSelection(0)
self.tree.SelectItem(self.root)
# Load 'Main' module
self.LoadDemo(self.overviewText)
self.loaded = True
# select some other initial module?
if len(sys.argv) > 1:
arg = sys.argv[1]
if arg.endswith('.py'):
arg = arg[:-3]
selectedDemo = self.treeMap.get(arg, None)
if selectedDemo:
self.tree.SelectItem(selectedDemo)
self.tree.EnsureVisible(selectedDemo)
# Use the aui manager to set up everything
self.mgr.AddPane(self.nb, aui.AuiPaneInfo().CenterPane().Name("Notebook"))
self.mgr.AddPane(leftPanel,
aui.AuiPaneInfo().
Left().Layer(2).BestSize((240, -1)).
MinSize((240, -1)).
Floatable(self.allowAuiFloating).FloatingSize((240, 700)).
Caption("wxPython Demos").
CloseButton(False).
Name("DemoTree"))
self.mgr.AddPane(self.log,
aui.AuiPaneInfo().
Bottom().BestSize((-1, 150)).
MinSize((-1, 140)).
Floatable(self.allowAuiFloating).FloatingSize((500, 160)).
Caption("Demo Log Messages").
CloseButton(False).
Name("LogWindow"))
self.auiConfigurations[DEFAULT_PERSPECTIVE] = self.mgr.SavePerspective()
self.mgr.Update()
self.mgr.SetAGWFlags(self.mgr.GetAGWFlags() ^ aui.AUI_MGR_TRANSPARENT_DRAG)
def ReadConfigurationFile(self):
self.auiConfigurations = {}
self.expansionState = [0, 1]
config = GetConfig()
val = config.Read('ExpansionState')
if val:
self.expansionState = eval(val)
val = config.Read('AUIPerspectives')
if val:
self.auiConfigurations = eval(val)
val = config.Read('AllowDownloads')
if val:
self.allowDocs = eval(val)
val = config.Read('AllowAUIFloating')
if val:
self.allowAuiFloating = eval(val)
MakeDocDirs()
pickledFile = GetDocFile()
if not os.path.isfile(pickledFile):
self.pickledData = {}
return
fid = open(pickledFile, "rb")
try:
self.pickledData = cPickle.load(fid)
except:
self.pickledData = {}
fid.close()
def BuildMenuBar(self):
# Make a File menu
self.mainmenu = wx.MenuBar()
menu = wx.Menu()
item = menu.Append(-1, '&Redirect Output',
'Redirect print statements to a window',
wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.OnToggleRedirect, item)
exitItem = wx.MenuItem(menu, wx.ID_EXIT, 'E&xit\tCtrl-Q', 'Get the heck outta here!')
exitItem.SetBitmap(images.catalog['exit'].GetBitmap())
menu.Append(exitItem)
self.Bind(wx.EVT_MENU, self.OnFileExit, exitItem)
self.mainmenu.Append(menu, '&File')
# Make a Demo menu
menu = wx.Menu()
for indx, item in enumerate(_treeList[:-1]):
menuItem = wx.MenuItem(menu, -1, item[0])
submenu = wx.Menu()
for childItem in item[1]:
mi = submenu.Append(-1, childItem)
self.Bind(wx.EVT_MENU, self.OnDemoMenu, mi)
menuItem.SetBitmap(images.catalog[_demoPngs[indx+1]].GetBitmap())
menuItem.SetSubMenu(submenu)
menu.Append(menuItem)
self.mainmenu.Append(menu, '&Demo')
# Make an Option menu
menu = wx.Menu()
item = wx.MenuItem(menu, -1, 'Allow download of docs', 'Docs for window styles and events from the web', wx.ITEM_CHECK)
menu.Append(item)
item.Check(self.allowDocs)
self.Bind(wx.EVT_MENU, self.OnAllowDownload, item)
item = wx.MenuItem(menu, -1, 'Delete saved docs', 'Deletes the cPickle file where docs are stored')
item.SetBitmap(images.catalog['deletedocs'].GetBitmap())
menu.Append(item)
self.Bind(wx.EVT_MENU, self.OnDeleteDocs, item)
menu.AppendSeparator()
item = wx.MenuItem(menu, -1, 'Allow floating panes', 'Allows the demo panes to be floated using wxAUI', wx.ITEM_CHECK)
menu.Append(item)
item.Check(self.allowAuiFloating)
self.Bind(wx.EVT_MENU, self.OnAllowAuiFloating, item)
auiPerspectives = list(self.auiConfigurations.keys())
auiPerspectives.sort()
perspectivesMenu = wx.Menu()
item = wx.MenuItem(perspectivesMenu, -1, DEFAULT_PERSPECTIVE, "Load startup default perspective", wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
perspectivesMenu.Append(item)
for indx, key in enumerate(auiPerspectives):
if key == DEFAULT_PERSPECTIVE:
continue
item = wx.MenuItem(perspectivesMenu, -1, key, "Load user perspective %d"%indx, wx.ITEM_RADIO)
perspectivesMenu.Append(item)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
menu.Append(wx.ID_ANY, "&AUI Perspectives", perspectivesMenu)
self.perspectives_menu = perspectivesMenu
item = wx.MenuItem(menu, -1, 'Save Perspective', 'Save AUI perspective')
item.SetBitmap(images.catalog['saveperspective'].GetBitmap())
menu.Append(item)
self.Bind(wx.EVT_MENU, self.OnSavePerspective, item)
item = wx.MenuItem(menu, -1, 'Delete Perspective', 'Delete AUI perspective')
item.SetBitmap(images.catalog['deleteperspective'].GetBitmap())
menu.Append(item)
self.Bind(wx.EVT_MENU, self.OnDeletePerspective, item)
menu.AppendSeparator()
item = wx.MenuItem(menu, -1, 'Restore Tree Expansion', 'Restore the initial tree expansion state')
item.SetBitmap(images.catalog['expansion'].GetBitmap())
menu.Append(item)
self.Bind(wx.EVT_MENU, self.OnTreeExpansion, item)
self.mainmenu.Append(menu, '&Options')
self.options_menu = menu
# Make a Help menu
menu = wx.Menu()
findItem = wx.MenuItem(menu, -1, '&Find\tCtrl-F', 'Find in the Demo Code')
findItem.SetBitmap(images.catalog['find'].GetBitmap())
if 'wxMac' not in wx.PlatformInfo:
findNextItem = wx.MenuItem(menu, -1, 'Find &Next\tF3', 'Find Next')
else:
findNextItem = wx.MenuItem(menu, -1, 'Find &Next\tCtrl-G', 'Find Next')
findNextItem.SetBitmap(images.catalog['findnext'].GetBitmap())
menu.Append(findItem)
menu.Append(findNextItem)
menu.AppendSeparator()
shellItem = wx.MenuItem(menu, -1, 'Open Py&Shell Window\tF5',
'An interactive interpreter window with the demo app and frame objects in the namesapce')
shellItem.SetBitmap(images.catalog['pyshell'].GetBitmap())
menu.Append(shellItem)
inspToolItem = wx.MenuItem(menu, -1, 'Open &Widget Inspector\tF6',
'A tool that lets you browse the live widgets and sizers in an application')
inspToolItem.SetBitmap(images.catalog['inspect'].GetBitmap())
menu.Append(inspToolItem)
if 'wxMac' not in wx.PlatformInfo:
menu.AppendSeparator()
helpItem = menu.Append(wx.ID_ABOUT, '&About wxPython Demo', 'wxPython RULES!!!')
self.Bind(wx.EVT_MENU, self.OnOpenShellWindow, shellItem)
self.Bind(wx.EVT_MENU, self.OnOpenWidgetInspector, inspToolItem)
self.Bind(wx.EVT_MENU, self.OnHelpAbout, helpItem)
self.Bind(wx.EVT_MENU, self.OnHelpFind, findItem)
self.Bind(wx.EVT_MENU, self.OnFindNext, findNextItem)
self.Bind(wx.EVT_FIND, self.OnFind)
self.Bind(wx.EVT_FIND_NEXT, self.OnFind)
self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateFindItems, findItem)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateFindItems, findNextItem)
self.mainmenu.Append(menu, '&Help')
self.SetMenuBar(self.mainmenu)
self.EnableAUIMenu()
if False:
# This is another way to set Accelerators, in addition to
# using the '\t<key>' syntax in the menu items.
aTable = wx.AcceleratorTable([(wx.ACCEL_ALT, ord('X'), exitItem.GetId()),
(wx.ACCEL_CTRL, ord('H'), helpItem.GetId()),
(wx.ACCEL_CTRL, ord('F'), findItem.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F3, findNextItem.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F9, shellItem.GetId()),
])
self.SetAcceleratorTable(aTable)
#---------------------------------------------
def RecreateTree(self, evt=None):
# Catch the search type (name or content)
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if evt:
if fullSearch:
# Do not`scan all the demo files for every char
# the user input, use wx.EVT_TEXT_ENTER instead
return
expansionState = self.tree.GetExpansionState()
current = None
item = self.tree.GetSelection()
if item:
prnt = self.tree.GetItemParent(item)
if prnt:
current = (self.tree.GetItemText(item),
self.tree.GetItemText(prnt))
self.tree.Freeze()
self.tree.DeleteAllItems()
self.root = self.tree.AddRoot("wxPython Overview")
self.tree.SetItemImage(self.root, 0)
self.tree.SetItemData(self.root, 0)
treeFont = self.tree.GetFont()
catFont = self.tree.GetFont()
# The native treectrl on MSW has a bug where it doesn't draw
# all of the text for an item if the font is larger than the
# default. It seems to be clipping the item's label as if it
# was the size of the same label in the default font.
if USE_CUSTOMTREECTRL or 'wxMSW' not in wx.PlatformInfo:
treeFont.SetPointSize(treeFont.GetPointSize()+2)
treeFont.SetWeight(wx.FONTWEIGHT_BOLD)
catFont.SetWeight(wx.FONTWEIGHT_BOLD)
self.tree.SetItemFont(self.root, treeFont)
firstChild = None
selectItem = None
filter = self.filter.GetValue()
count = 0
for category, items in _treeList:
count += 1
if filter:
if fullSearch:
items = self.searchItems[category]
else:
items = [item for item in items if filter.lower() in item.lower()]
if items:
child = self.tree.AppendItem(self.root, category, image=count)
self.tree.SetItemFont(child, catFont)
self.tree.SetItemData(child, count)
if not firstChild: firstChild = child
for childItem in items:
image = count
if DoesModifiedExist(childItem):
image = len(_demoPngs)
theDemo = self.tree.AppendItem(child, childItem, image=image)
self.tree.SetItemData(theDemo, count)
self.treeMap[childItem] = theDemo
if current and (childItem, category) == current:
selectItem = theDemo
self.tree.Expand(self.root)
if firstChild:
self.tree.Expand(firstChild)
if filter:
self.tree.ExpandAll()
elif expansionState:
self.tree.SetExpansionState(expansionState)
if selectItem:
self.skipLoad = True
self.tree.SelectItem(selectItem)
self.skipLoad = False
self.tree.Thaw()
self.searchItems = {}
def OnStatusBarSize(self, evt):
self.Reposition() # for normal size events
# Set a flag so the idle time handler will also do the repositioning.
# It is done this way to get around a buglet where GetFieldRect is not
# accurate during the EVT_SIZE resulting from a frame maximize.
self.sizeChanged = True
def OnStatusBarIdle(self, evt):
if self.sizeChanged:
self.Reposition()
# reposition the download gauge
def Reposition(self):
# rect = self.statusBar.GetFieldRect(1)
# self.downloadGauge.SetPosition((rect.x+2, rect.y+2))
# self.downloadGauge.SetSize((rect.width-4, rect.height-4))
self.sizeChanged = False
def OnSearchMenu(self, event):
# Catch the search type (name or content)
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if fullSearch:
self.OnSearch()
else:
self.RecreateTree()
def OnSearch(self, event=None):
value = self.filter.GetValue()
if not value:
self.RecreateTree()
return
wx.BeginBusyCursor()
for category, items in _treeList:
self.searchItems[category] = []
for childItem in items:
if SearchDemo(childItem, value):
self.searchItems[category].append(childItem)
wx.EndBusyCursor()
self.RecreateTree()
def SetTreeModified(self, modified):
item = self.tree.GetSelection()
if modified:
image = len(_demoPngs)
else:
image = self.tree.GetItemData(item)
self.tree.SetItemImage(item, image)
def WriteText(self, text):
if text[-1:] == '\n':
text = text[:-1]
wx.LogMessage(text)
def write(self, txt):
self.WriteText(txt)
#---------------------------------------------
def OnItemExpanded(self, event):
item = event.GetItem()
wx.LogMessage("OnItemExpanded: %s" % self.tree.GetItemText(item))
event.Skip()
#---------------------------------------------
def OnItemCollapsed(self, event):
item = event.GetItem()
wx.LogMessage("OnItemCollapsed: %s" % self.tree.GetItemText(item))
event.Skip()
#---------------------------------------------
def OnTreeLeftDown(self, event):
# reset the overview text if the tree item is clicked on again
pt = event.GetPosition()
item, flags = self.tree.HitTest(pt)
if item == self.tree.GetSelection():
self.SetOverview(self.tree.GetItemText(item)+" Overview", self.curOverview)
event.Skip()
#---------------------------------------------
def OnSelChanged(self, event):
if self.dying or not self.loaded or self.skipLoad:
return
self.StopDownload()
item = event.GetItem()
itemText = self.tree.GetItemText(item)
self.LoadDemo(itemText)
self.StartDownload()
#---------------------------------------------
def LoadDemo(self, demoName):
try:
wx.BeginBusyCursor()
self.pnl.Freeze()
os.chdir(self.cwd)
self.ShutdownDemoModule()
if demoName == self.overviewText:
# User selected the "wxPython Overview" node
# ie: _this_ module
# Changing the main window at runtime not yet supported...
self.demoModules = DemoModules(__name__)
self.SetOverview(self.overviewText, mainOverview)
self.LoadDemoSource()
self.UpdateNotebook(0)
else:
if os.path.exists(GetOriginalFilename(demoName)):
wx.LogMessage("Loading demo %s.py..." % demoName)
self.demoModules = DemoModules(demoName)
self.LoadDemoSource()
else:
package, overview = LookForExternals(self.externalDemos, demoName)
if package:
wx.LogMessage("Loading demo %s.py..." % ("%s/%s"%(package, demoName)))
self.demoModules = DemoModules("%s/%s"%(package, demoName))
self.LoadDemoSource()
elif overview:
self.SetOverview(demoName, overview)
self.codePage = None
self.UpdateNotebook(0)
else:
self.SetOverview("wxPython", mainOverview)
self.codePage = None
self.UpdateNotebook(0)
finally:
wx.EndBusyCursor()
self.pnl.Thaw()
#---------------------------------------------
def LoadDemoSource(self):
self.codePage = None
self.codePage = DemoCodePanel(self.nb, self)
self.codePage.LoadDemo(self.demoModules)
#---------------------------------------------
def RunModule(self):
"""Runs the active module"""
module = self.demoModules.GetActive()
self.ShutdownDemoModule()
overviewText = ""
# o The RunTest() for all samples must now return a window that can
# be palced in a tab in the main notebook.
# o If an error occurs (or has occurred before) an error tab is created.
if module is not None:
wx.LogMessage("Running demo module...")
if hasattr(module, "overview"):
overviewText = module.overview
try:
self.demoPage = module.runTest(self, self.nb, self)
except:
self.demoPage = DemoErrorPanel(self.nb, self.codePage,
DemoError(sys.exc_info()), self)
bg = self.nb.GetThemeBackgroundColour()
if bg:
self.demoPage.SetBackgroundColour(bg)
assert self.demoPage is not None, "runTest must return a window!"
else:
# There was a previous error in compiling or exec-ing
self.demoPage = DemoErrorPanel(self.nb, self.codePage,
self.demoModules.GetErrorInfo(), self)
self.SetOverview(self.demoModules.name + " Overview", overviewText)
if self.firstTime:
# change to the demo page the first time a module is run
self.UpdateNotebook(2)
self.firstTime = False
else:
# otherwise just stay on the same tab in case the user has changed to another one
self.UpdateNotebook()
#---------------------------------------------
def ShutdownDemoModule(self):
if self.demoPage:
# inform the window that it's time to quit if it cares
if hasattr(self.demoPage, "ShutdownDemo"):
self.demoPage.ShutdownDemo()
## wx.YieldIfNeeded() # in case the page has pending events
self.demoPage = None
#---------------------------------------------
def UpdateNotebook(self, select = -1):
nb = self.nb
debug = False
self.pnl.Freeze()
def UpdatePage(page, pageText):
pageExists = False
pagePos = -1
for i in range(nb.GetPageCount()):
if nb.GetPageText(i) == pageText:
pageExists = True
pagePos = i
break
if page:
if not pageExists:
# Add a new page
nb.AddPage(page, pageText, imageId=nb.GetPageCount())
if debug: wx.LogMessage("DBG: ADDED %s" % pageText)
else:
if nb.GetPage(pagePos) != page:
# Reload an existing page
nb.DeletePage(pagePos)
nb.InsertPage(pagePos, page, pageText, imageId=pagePos)
if debug: wx.LogMessage("DBG: RELOADED %s" % pageText)
else:
# Excellent! No redraw/flicker
if debug: wx.LogMessage("DBG: SAVED from reloading %s" % pageText)
elif pageExists:
# Delete a page
nb.DeletePage(pagePos)
if debug: wx.LogMessage("DBG: DELETED %s" % pageText)
else:
if debug: wx.LogMessage("DBG: STILL GONE - %s" % pageText)
if select == -1:
select = nb.GetSelection()
UpdatePage(self.codePage, "Demo Code")
UpdatePage(self.demoPage, "Demo")
if select >= 0 and select < nb.GetPageCount():
nb.SetSelection(select)
self.pnl.Thaw()
#---------------------------------------------
def SetOverview(self, name, text):
self.curOverview = text
lead = text[:6]
if lead != '<html>' and lead != '<HTML>':
text = '<br>'.join(text.split('\n'))
# if wx.USE_UNICODE:
# text = text.decode('iso8859_1')
self.ovr.SetPage(text)
self.nb.SetPageText(0, os.path.split(name)[1])
#---------------------------------------------
def StartDownload(self):
if self.downloading or not self.allowDocs:
return
item = self.tree.GetSelection()
if self.tree.ItemHasChildren(item):
return
itemText = self.tree.GetItemText(item)
if itemText in self.pickledData:
self.LoadDocumentation(self.pickledData[itemText])
return
text = self.curOverview
text += "<br><p><b>Checking for documentation on the wxWidgets website, please stand by...</b><br>"
lead = text[:6]
if lead != '<html>' and lead != '<HTML>':
text = '<br>'.join(text.split('\n'))
self.ovr.SetPage(text)
self.downloadTimer.Start(100)
self.downloadGauge.Show()
self.Reposition()
self.downloading = True
self.internetThread = InternetThread(self, itemText)
#---------------------------------------------
def StopDownload(self, error=None):
self.downloadTimer.Stop()
if not self.downloading:
return
if error:
if self.sendDownloadError:
self.log.AppendText("Warning: problems in downloading documentation from the wxWidgets website.\n")
self.log.AppendText("Error message from the documentation downloader was:\n")
self.log.AppendText("\n".join(error))
self.sendDownloadError = False
self.nb.SetPageImage(0, 0)
self.internetThread.keepRunning = False
self.internetThread = None
self.downloading = False
self.downloadGauge.Hide()
self.Reposition()
text = self.curOverview
lead = text[:6]
if lead != '<html>' and lead != '<HTML>':
text = '<br>'.join(text.split('\n'))
self.ovr.SetPage(text)
#---------------------------------------------
def LoadDocumentation(self, data):
text = self.curOverview
addHtml = False
if '<html>' not in text and '<HTML>' not in text:
text = '<br>'.join(text.split('\n'))
styles, events, extra, appearance = data
if appearance:
text += FormatImages(appearance)
for names, values in zip(["Styles", "Extra Styles", "Events"], [styles, extra, events]):
if not values:
continue
headers = (names == "Events" and [2] or [3])[0]
text += "<p>" + FormatDocs(names, values, headers)
item = self.tree.GetSelection()
itemText = self.tree.GetItemText(item)
self.pickledData[itemText] = data
if six.PY2:
# TODO: verify that this encoding is correct
text = text.decode('iso8859_1')
self.StopDownload()
self.ovr.SetPage(text)
#print("load time: ", time.time() - start)
# Menu methods
def OnFileExit(self, *event):
self.Close()
def OnToggleRedirect(self, event):
app = wx.GetApp()
if event.Checked():
app.RedirectStdio()
print("Print statements and other standard output will now be directed to this window.")
else:
app.RestoreStdio()
print("Print statements and other standard output will now be sent to the usual location.")
def OnAllowDownload(self, event):
self.allowDocs = event.IsChecked()
if self.allowDocs:
self.StartDownload()
else:
self.StopDownload()
def OnDeleteDocs(self, event):
deleteMsg = "You are about to delete the downloaded documentation.\n" + \
"Do you want to continue?"
dlg = wx.MessageDialog(self, deleteMsg, "wxPython Demo",
wx.YES_NO | wx.NO_DEFAULT| wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_NO:
dlg.Destroy()
return
dlg.Destroy()
busy = wx.BusyInfo("Deleting downloaded data...")
wx.SafeYield()
pickledFile = GetDocFile()
docDir = os.path.split(pickledFile)[0]
if os.path.exists(docDir):
shutil.rmtree(docDir, ignore_errors=True)
self.pickledData = {}
del busy
self.sendDownloadError = True
def OnAllowAuiFloating(self, event):
self.allowAuiFloating = event.Checked()
for pane in self.mgr.GetAllPanes():
if pane.name != "Notebook":
pane.Floatable(self.allowAuiFloating)
self.EnableAUIMenu()
self.mgr.Update()
def EnableAUIMenu(self):
menuItems = self.options_menu.GetMenuItems()
for indx in range(4, len(menuItems)-1):
item = menuItems[indx]
item.Enable(self.allowAuiFloating)
def OnAUIPerspectives(self, event):
perspective = self.perspectives_menu.GetLabel(event.GetId())
self.mgr.LoadPerspective(self.auiConfigurations[perspective])
self.mgr.Update()
def OnSavePerspective(self, event):
dlg = wx.TextEntryDialog(self, "Enter a name for the new perspective:", "AUI Configuration")
dlg.SetValue(("Perspective %d")%(len(self.auiConfigurations)+1))
if dlg.ShowModal() != wx.ID_OK:
return
perspectiveName = dlg.GetValue()
menuItems = self.perspectives_menu.GetMenuItems()
for item in menuItems:
if item.GetLabel() == perspectiveName:
wx.MessageBox("The selected perspective name:\n\n%s\n\nAlready exists."%perspectiveName,
"Error", style=wx.ICON_ERROR)
return
item = wx.MenuItem(self.perspectives_menu, -1, dlg.GetValue(),
"Load user perspective %d"%(len(self.auiConfigurations)+1),
wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
self.perspectives_menu.Append(item)
item.Check(True)
self.auiConfigurations.update({dlg.GetValue(): self.mgr.SavePerspective()})
def OnDeletePerspective(self, event):
menuItems = self.perspectives_menu.GetMenuItems()
lst = []
loadDefault = False
for indx, item in enumerate(menuItems):
if indx > 0:
lst.append(item.GetLabel())
dlg = wx.MultiChoiceDialog(self,
"Please select the perspectives\nyou would like to delete:",
"Delete AUI Perspectives", lst)
if dlg.ShowModal() == wx.ID_OK:
selections = dlg.GetSelections()
strings = [lst[x] for x in selections]
for sel in strings:
self.auiConfigurations.pop(sel)
item = menuItems[lst.index(sel)+1]
if item.IsChecked():
loadDefault = True
self.perspectives_menu.GetMenuItems()[0].Check(True)
self.perspectives_menu.DeleteItem(item)
lst.remove(sel)
if loadDefault:
self.mgr.LoadPerspective(self.auiConfigurations[DEFAULT_PERSPECTIVE])
self.mgr.Update()
def OnTreeExpansion(self, event):
self.tree.SetExpansionState(self.expansionState)
def OnHelpAbout(self, event):
from About import MyAboutBox
about = MyAboutBox(self)
about.ShowModal()
about.Destroy()
def OnHelpFind(self, event):
if self.finddlg != None:
return
self.nb.SetSelection(1)
self.finddlg = wx.FindReplaceDialog(self, self.finddata, "Find",
wx.FR_NOMATCHCASE | wx.FR_NOWHOLEWORD)
self.finddlg.Show(True)
def OnUpdateFindItems(self, evt):
evt.Enable(self.finddlg == None)
def OnFind(self, event):
editor = self.codePage.editor
self.nb.SetSelection(1)
end = editor.GetLastPosition()
textstring = editor.GetRange(0, end).lower()
findstring = self.finddata.GetFindString().lower()
backward = not (self.finddata.GetFlags() & wx.FR_DOWN)
if backward:
start = editor.GetSelection()[0]
loc = textstring.rfind(findstring, 0, start)
else:
start = editor.GetSelection()[1]
loc = textstring.find(findstring, start)
if loc == -1 and start != 0:
# string not found, start at beginning
if backward:
start = end
loc = textstring.rfind(findstring, 0, start)
else:
start = 0
loc = textstring.find(findstring, start)
if loc == -1:
dlg = wx.MessageDialog(self, 'Find String Not Found',
'Find String Not Found in Demo File',
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
if self.finddlg:
if loc == -1:
self.finddlg.SetFocus()
return
else:
self.finddlg.Destroy()
self.finddlg = None
editor.ShowPosition(loc)
editor.SetSelection(loc, loc + len(findstring))
def OnFindNext(self, event):
if self.finddata.GetFindString():
self.OnFind(event)
else:
self.OnHelpFind(event)
def OnFindClose(self, event):
event.GetDialog().Destroy()
self.finddlg = None
def OnOpenShellWindow(self, evt):
if self.shell:
# if it already exists then just make sure it's visible
s = self.shell
if s.IsIconized():
s.Iconize(False)
s.Raise()
else:
# Make a PyShell window
from wx import py
namespace = { 'wx' : wx,
'app' : wx.GetApp(),
'frame' : self,
}
self.shell = py.shell.ShellFrame(None, locals=namespace)
self.shell.SetSize((640,480))
self.shell.Show()
# Hook the close event of the main frame window so that we
# close the shell at the same time if it still exists
def CloseShell(evt):
if self.shell:
self.shell.Close()
evt.Skip()
self.Bind(wx.EVT_CLOSE, CloseShell)
def OnOpenWidgetInspector(self, evt):
# Activate the widget inspection tool, giving it a widget to preselect
# in the tree. Use either the one under the cursor, if any, or this
# frame.
from wx.lib.inspection import InspectionTool
wnd = wx.FindWindowAtPointer()
if not wnd:
wnd = self
InspectionTool().Show(wnd, True)
#---------------------------------------------
def OnCloseWindow(self, event):
self.mgr.UnInit()
self.dying = True
self.demoPage = None
self.codePage = None
self.mainmenu = None
self.StopDownload()
if self.tbicon is not None:
self.tbicon.Destroy()
config = GetConfig()
config.Write('ExpansionState', str(self.tree.GetExpansionState()))
config.Write('AUIPerspectives', str(self.auiConfigurations))
config.Write('AllowDownloads', str(self.allowDocs))
config.Write('AllowAUIFloating', str(self.allowAuiFloating))
config.Flush()
MakeDocDirs()
pickledFile = GetDocFile()
fid = open(pickledFile, "wb")
cPickle.dump(self.pickledData, fid, cPickle.HIGHEST_PROTOCOL)
fid.close()
self.Destroy()
#---------------------------------------------
def OnIdle(self, event):
if self.otherWin:
self.otherWin.Raise()
self.demoPage = self.otherWin
self.otherWin = None
#---------------------------------------------
def OnDownloadTimer(self, event):
self.downloadGauge.Pulse()
self.downloadImage += 1
if self.downloadImage > 9:
self.downloadImage = 3
self.nb.SetPageImage(0, self.downloadImage)
## wx.SafeYield()
#---------------------------------------------
def ShowTip(self):
config = GetConfig()
showTipText = config.Read("tips")
if showTipText:
showTip, index = eval(showTipText)
else:
showTip, index = (1, 0)
# if showTip:
# tp = wx.CreateFileTipProvider(opj("data/tips.txt"), index)
# showTip = wx.ShowTip(self, tp)
# index = tp.GetCurrentTip()
# config.Write("tips", str( (showTip, index) ))
# config.Flush()
#---------------------------------------------
def OnDemoMenu(self, event):
try:
selectedDemo = self.treeMap[self.mainmenu.GetLabel(event.GetId())]
except:
selectedDemo = None
if selectedDemo:
self.tree.SelectItem(selectedDemo)
self.tree.EnsureVisible(selectedDemo)
#---------------------------------------------
def OnIconfiy(self, evt):
wx.LogMessage("OnIconfiy: %s" % evt.IsIconized())
evt.Skip()
#---------------------------------------------
def OnMaximize(self, evt):
wx.LogMessage("OnMaximize")
evt.Skip()
#---------------------------------------------
def OnActivate(self, evt):
wx.LogMessage("OnActivate: %s" % evt.GetActive())
evt.Skip()
#---------------------------------------------
def OnAppActivate(self, evt):
wx.LogMessage("OnAppActivate: %s" % evt.GetActive())
evt.Skip()
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
class MySplashScreen(SplashScreen):
def __init__(self):
bmp = wx.Image(opj("bitmaps/splash.png")).ConvertToBitmap()
SplashScreen.__init__(self, bmp,
wx.adv.SPLASH_CENTRE_ON_SCREEN | wx.adv.SPLASH_TIMEOUT,
5000, None, -1)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.fc = wx.CallLater(1000, self.ShowMain)
def OnClose(self, evt):
# Make sure the default handler runs too so this window gets
# destroyed
evt.Skip()
self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
if self.fc.IsRunning():
self.fc.Stop()
self.ShowMain()
def ShowMain(self):
frame = wxPythonDemo(None, "wxPython: (A Demonstration)")
frame.Show()
if self.fc.IsRunning():
self.Raise()
wx.CallAfter(frame.ShowTip)
#---------------------------------------------------------------------------
from wx.lib.mixins.treemixin import ExpansionState
if USE_CUSTOMTREECTRL:
import wx.lib.agw.customtreectrl as CT
TreeBaseClass = CT.CustomTreeCtrl
else:
TreeBaseClass = wx.TreeCtrl
class wxPythonDemoTree(ExpansionState, TreeBaseClass):
def __init__(self, parent):
TreeBaseClass.__init__(self, parent, style=wx.TR_DEFAULT_STYLE|
wx.TR_HAS_VARIABLE_ROW_HEIGHT)
self.BuildTreeImageList()
if USE_CUSTOMTREECTRL:
self.SetSpacing(10)
self.SetWindowStyle(self.GetWindowStyle() & ~wx.TR_LINES_AT_ROOT)
self.SetInitialSize((100,80))
def AppendItem(self, parent, text, image=-1, wnd=None):
if USE_CUSTOMTREECTRL:
item = TreeBaseClass.AppendItem(self, parent, text, image=image, wnd=wnd)
else:
item = TreeBaseClass.AppendItem(self, parent, text, image=image)
return item
def BuildTreeImageList(self):
imgList = wx.ImageList(16, 16)
for png in _demoPngs:
imgList.Add(images.catalog[png].GetBitmap())
# add the image for modified demos.
imgList.Add(images.catalog["custom"].GetBitmap())
self.AssignImageList(imgList)
def GetItemIdentity(self, item):
return self.GetItemData(item)
#---------------------------------------------------------------------------
class MyApp(wx.App, wx.lib.mixins.inspection.InspectionMixin):
def OnInit(self):
# Check runtime version
if LooseVersion(version.VERSION_STRING) != LooseVersion(wx.VERSION_STRING):
wx.MessageBox(caption="Warning",
message="You're using version %s of wxPython, but this copy of the demo was written for version %s.\n"
"There may be some version incompatibilities..."
% (wx.VERSION_STRING, version.VERSION_STRING))
self.InitInspection() # for the InspectionMixin base class
# Now that we've warned the user about possibile problems,
# lets import images
import images as i
global images
images = i
# For debugging
#self.SetAssertMode(wx.PYAPP_ASSERT_DIALOG|wx.PYAPP_ASSERT_EXCEPTION)
wx.SystemOptions.SetOption("mac.window-plain-transition", 1)
self.SetAppName("wxPyDemo")
# Create and show the splash screen. It will then create and
# show the main frame when it is time to do so. Normally when
# using a SplashScreen you would create it, show it and then
# continue on with the application's initialization, finally
# creating and showing the main application window(s). In
# this case we have nothing else to do so we'll delay showing
# the main frame until later (see ShowMain above) so the users
# can see the SplashScreen effect.
splash = MySplashScreen()
splash.Show()
return True
#---------------------------------------------------------------------------
def main():
try:
demoPath = os.path.dirname(__file__)
os.chdir(demoPath)
except:
pass
app = MyApp(False)
app.MainLoop()
#---------------------------------------------------------------------------
mainOverview = """<html><body>
<h2>wxPython</h2>
<p> wxPython is a <b>GUI toolkit</b> for the Python programming
language. It allows Python programmers to create programs with a
robust, highly functional graphical user interface, simply and easily.
It is implemented as a Python extension module (native code) that
wraps the popular wxWindows cross platform GUI library, which is
written in C++.
<p> Like Python and wxWindows, wxPython is <b>Open Source</b> which
means that it is free for anyone to use and the source code is
available for anyone to look at and modify. Or anyone can contribute
fixes or enhancements to the project.
<p> wxPython is a <b>cross-platform</b> toolkit. This means that the
same program will run on multiple platforms without modification.
Currently supported platforms are 32-bit Microsoft Windows, most Unix
or unix-like systems, and Macintosh OS X. Since the language is
Python, wxPython programs are <b>simple, easy</b> to write and easy to
understand.
<p> <b>This demo</b> is not only a collection of test cases for
wxPython, but is also designed to help you learn about and how to use
wxPython. Each sample is listed in the tree control on the left.
When a sample is selected in the tree then a module is loaded and run
(usually in a tab of this notebook,) and the source code of the module
is loaded in another tab for you to browse and learn from.
"""
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
if __name__ == '__main__':
__name__ = 'Main'
main()
#----------------------------------------------------------------------------
| StarcoderdataPython |
6557186 | <reponame>michalk8/NeuralEE<gh_stars>1-10
import torch
import math
@torch.no_grad()
def error_ee(X, Wp, Wn, lam):
"""Elastic embedding loss function.
It's quite straightforward, may unapplicable when size is large,
and the alternative error_ee_cpu and error_ee_cuda are designed to
release computation stress.
:param X: sample-coordinates matrix.
:type X: torch.FloatTensor
:param Wp: attractive weights.
:type Wp: torch.FloatTensor
:param Wn: repulsive weights.
:type Wn: torch.FloatTensor
:param lam: trade-off factor of elastic embedding function.
:returns: elastic embedding loss value and the kernel matrix.
"""
sqd = sqdist(X)
ker = torch.exp(-sqd)
error = Wp.view(-1).dot(sqd.view(-1)) + lam * Wn.view(-1).dot(ker.view(-1))
return error, ker
@torch.no_grad()
def sqdist(X):
"""Euclidean distance of coordinates.
:param X: sample-coordinates matrix.
:type X: torch.FloatTensor
:return: Euclidean distance.
:rtype: torch.FloatTensor
"""
x = (X ** 2).sum(dim=1, keepdim=True)
sqd = x - 2 * X @ X.t() + x.t()
ind = torch.arange(X.shape[0]).tolist()
sqd[ind, ind] = torch.zeros(
X.shape[0], device=X.device, dtype=torch.float32)
sqd = sqd.clamp_min(0)
return sqd
@torch.no_grad()
def error_ee_split(X, Wp, Wn, lam, memory=2, device=None):
"""Elastic embedding loss function deployed on GPU.
It splits X, Wp, Wn into pieces and summarizes respective loss values
to release computation stress.
:param X: sample-coordinates matrix
:type X: torch.FloatTensor
:param Wp: attractive weights.
:type Wp: torch.FloatTensor
:param Wn: repulsive weights.
:type Wn: torch.FloatTensor
:param lam: trade-off factor of elastic embedding function.
:param memory: memory(GB) allocated to computer error.
:type device: torch.device
:param device: device chosen to operate.
If None, set as torch.device('cpu').
:type device: torch.device
:returns: elastic embedding loss value.
"""
device = X.device if device is None else device
X = X.to(device)
N = X.shape[0]
B = math.floor((memory * 1024 ** 3) / (2 * N * 8))
error = 0
i1 = 0
i2 = min(N, B)
X2 = X ** 2
x2 = X2.sum(dim=1, keepdim=True)
while i1 < N:
sqd = X2[i1: i2, :].sum(dim=1, keepdim=True) - \
2 * X[i1: i2, :] @ X.t() + x2.t()
ker = (-sqd).exp()
error += Wp[i1: i2, :].to(device).view(-1).dot(sqd.view(-1)) + \
lam * Wn[i1: i2, :].to(device).view(-1).dot(ker.view(-1))
i1 = i1 + B
i2 = min(N, i1 + B)
return error
| StarcoderdataPython |
3599811 | <gh_stars>10-100
__author__ = 'jatwood'
import sys
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from sklearn.linear_model import LogisticRegression
import data
import util
import kernel
import structured
def node_proportion_baseline_experiment(model_fn, data_fn, data_name, model_name, prop_valid, prop_test):
print 'Running node experiment (%s)...' % (data_name,)
A, X, Y = data_fn()
n_nodes = A.shape[0]
indices = np.arange(n_nodes)
valid_start = int(n_nodes * (1 - (prop_valid + prop_test)))
test_start = int(n_nodes * (1 - prop_test))
valid_indices = indices[valid_start:test_start]
test_indices = indices[test_start:]
for train_prop in [x / 10.0 for x in range(1, 11)]:
train_end = int(valid_start * train_prop)
train_indices = indices[:train_end]
best_C = None
best_acc = float('-inf')
for C in [10**(-x) for x in range(-4,4)]:
m = model_fn(C)
m.fit(X[train_indices,:], np.argmax(Y[train_indices,:],1))
preds = m.predict(X[valid_indices])
actuals = np.argmax(Y[valid_indices,:],1)
accuracy = accuracy_score(actuals, preds)
if accuracy > best_acc:
best_C = C
best_acc = accuracy
m = model_fn(best_C)
m.fit(X[train_indices], np.argmax(Y[train_indices],1))
preds = m.predict(X[test_indices])
actuals = np.argmax(Y[test_indices,:],1)
accuracy = accuracy_score(actuals, preds)
f1_micro = f1_score(actuals, preds, average='micro')
f1_macro = f1_score(actuals, preds, average='macro')
print 'form: name,micro_f,macro_f,accuracy'
print '###RESULTS###: %s,%s,%.6f,%.8f,%.8f,%.8f' % (data_name, model_name, train_prop, f1_micro, f1_macro, accuracy)
def node_proportion_kernel_experiment(model, data_fn, data_name, model_name, prop_valid, prop_test):
print 'Running node experiment (%s)...' % (data_name,)
A, X, Y = data_fn()
n_nodes = A.shape[0]
indices = np.arange(n_nodes)
valid_start = int(n_nodes * (1 - (prop_valid + prop_test)))
test_start = int(n_nodes * (1 - prop_test))
valid_indices = indices[valid_start:test_start]
test_indices = indices[test_start:]
for train_prop in [x / 10.0 for x in range(1, 11)]:
train_end = int(valid_start * train_prop)
train_indices = indices[:train_end]
model.fit_with_validation(A,Y, train_indices, valid_indices, test_indices)
preds = model.predict(Y, valid_indices, test_indices)
actuals = Y[test_indices,:]
accuracy = accuracy_score(actuals, preds)
f1_micro = f1_score(actuals, preds, average='micro')
f1_macro = f1_score(actuals, preds, average='macro')
print 'form: name,micro_f,macro_f,accuracy'
print '###RESULTS###: %s,%s,%.6f,%.8f,%.8f,%.8f' % (data_name, model_name, train_prop, f1_micro, f1_macro, accuracy)
def node_proportion_structured_experiment(model, data_fn, data_name, model_name, prop_valid, prop_test):
print 'Running node experiment (%s)...' % (data_name,)
A, X, Y = data_fn()
n_nodes = A.shape[0]
indices = np.arange(n_nodes)
valid_start = int(n_nodes * (1 - (prop_valid + prop_test)))
test_start = int(n_nodes * (1 - prop_test))
valid_indices = indices[valid_start:test_start]
test_indices = indices[test_start:]
for train_prop in [x / 10.0 for x in range(1, 11)]:
train_end = int(valid_start * train_prop)
train_indices = indices[:train_end]
model.fit_with_validation(A,X,Y, train_indices, valid_indices)
preds = model.predict(A, X, test_indices)
actuals = Y[test_indices,:]
accuracy = accuracy_score(actuals, preds)
f1_micro = f1_score(actuals, preds, average='micro')
f1_macro = f1_score(actuals, preds, average='macro')
print 'form: name,micro_f,macro_f,accuracy'
print '###RESULTS###: %s,%s,%.6f,%.8f,%.8f,%.8f' % (data_name, model_name, train_prop, f1_micro, f1_macro, accuracy)
def node_proportion_crf_experiment(data_fn, data_name, model_name, prop_valid, prop_test):
print 'Running node experiment (%s)...' % (data_name,)
A, X, Y = data_fn()
n_nodes = A.shape[0]
indices = np.arange(n_nodes)
valid_start = int(n_nodes * (1 - (prop_valid + prop_test)))
test_start = int(n_nodes * (1 - prop_test))
valid_indices = indices[valid_start:test_start]
test_indices = indices[test_start:]
for train_prop in [x / 10.0 for x in range(1, 11)]:
train_end = int(valid_start * train_prop)
train_indices = indices[:train_end]
model = structured.crfmodel(A,X,Y,train_indices,valid_indices,test_indices)
model.fit_with_validation()
preds = model.predict(test_indices)
actuals = Y[test_indices,:].argmax(1)
accuracy = accuracy_score(actuals, preds)
f1_micro = f1_score(actuals, preds, average='micro')
f1_macro = f1_score(actuals, preds, average='macro')
print 'form: name,micro_f,macro_f,accuracy'
print '###RESULTS###: %s,%s,%.6f,%.8f,%.8f,%.8f' % (data_name, 'crf', train_prop, f1_micro, f1_macro, accuracy)
if __name__ == '__main__':
np.random.seed()
args = sys.argv[1:]
name_to_data = {
'cora': data.parse_cora,
'pubmed': data.parse_pubmed,
'blogcatalog': data.parse_blogcatalog,
}
transform_lookup = {
'id': None,
'rwl': util.rw_laplacian,
'l': util.laplacian,
}
baseline_models = {
'logisticl1': lambda C: LogisticRegression(penalty='l1', C=C),
'logisticl2': lambda C: LogisticRegression(penalty='l2', C=C),
}
kernel_models = {
'ked': kernel.ExponentialDiffusionKernel(),
'kled': kernel.LaplacianExponentialDiffusionKernel(),
}
structured_models = {
'crf-ssvm': structured.StructuredModel()
}
crf_models = {
'crf': None
}
data_name = args[0]
data_fn = name_to_data[data_name]
model_name = args[1]
if model_name in baseline_models:
node_proportion_baseline_experiment(baseline_models[model_name], data_fn, data_name, model_name, 0.1, 0.1)
elif model_name in kernel_models:
node_proportion_kernel_experiment(kernel_models[model_name], data_fn, data_name, model_name, 0.1, 0.1)
elif model_name in structured_models:
node_proportion_structured_experiment(structured_models[model_name], data_fn, data_name, model_name, 0.1, 0.1)
elif model_name in crf_models:
node_proportion_crf_experiment(data_fn, data_name, model_name, 0.1, 0.1)
else:
print '%s not recognized' % (model_name,)
| StarcoderdataPython |
81897 | # BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
# Copyright (C) 2007 STFC & NERC (Science and Technology Facilities Council).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later.
# http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
Classes modelling the OWS Service Identification package v1.1.0.
:author: <NAME>
"""
from cows.model.data_identification import Description
class ServiceIdentification(Description):
"""
:ivar serviceType:
:type serviceType: ows.iso19115_subset.Code
:ivar serviceTypeVersions: The supported service versions
:type serviceTypeVersions: iterable of str
:ivar profiles:
:type profiles: iterable of str
:ivar fees: defaults to 'none'
:type fees: str
:ivar accessConstraints: defaults to 'none'
:type accessConstraints: str
"""
def __init__(self, serviceType, serviceTypeVersions=[],
profiles=[], fees="none", accessConstraints="none", **kwargs):
"""
All parameters set default attributes of the instance.
"""
super(self.__class__, self).__init__(**kwargs)
self.serviceType = serviceType
self.serviceTypeVersions = serviceTypeVersions
self.profiles = profiles
self.fees = fees
self.accessConstraints = accessConstraints
| StarcoderdataPython |
1981328 | <gh_stars>1-10
import numpy as np
import imageio
import cv2
import sys, os
#Processing Original Image
def process_img(location_img):
image = imageio.imread(location_img)
image = image.astype(np.float32)/255
return image
#Load and construct Ground Truth
def read_gt(location_gt):
entries = os.listdir(location_gt)
gt_images = []
#Collect all human labelled images
for entry in entries:
ground_truth = imageio.imread(location_gt+entry)
ground_truth = ground_truth.astype(np.float64)/255
gt_images.append(ground_truth)
return gt_images
#Construct Ground Truth representation from all human labelled images
def construct_gt(location_gt):
gt_images = read_gt(location_gt)
size = gt_images[0].shape[:2]
pixels = np.zeros((size))
for k in range(len(gt_images)):
ret, bw_img = cv2.threshold(gt_images[k],0.0001,1,cv2.THRESH_BINARY)
for i in range(size[0]):
for j in range(size[1]):
if(bw_img[i,j][0]>0 and bw_img[i,j][1]==0 and bw_img[i,j][2]==0):
pixels[i][j] += 1
#Each pixel is in foreground if N-1 out of N humans labelled the pixel in the foreground, else in the background
pixels = np.where(pixels >=len(gt_images)-1, 1., 0.)
F = len(np.where(pixels>0)[0])
B = len(np.where(pixels==0)[0])
print("Foreground area of constructed Ground Truth is %d pixels"% F)
print("Background area of constructed Ground Truth is %d pixels\n"% B)
return pixels, F
| StarcoderdataPython |
3589588 | <reponame>tom-doerr/download_images_train_classifier<filename>download.py
#!/usr/bin/env python3
'''
Download images of certain class from the web.
The classes of images for which to download images are in the images_to_downloaded
list. Save the images to disk.
'''
images_to_downloaded = ['cat', 'dog']
import os
import sys
import urllib.request
import argparse
import requests
from bs4 import BeautifulSoup
import urllib.parse
import logging
logger = logging.getLogger(__name__)
def download_images(images_to_downloaded, output_dir, num_images_to_download=100):
'''
Download the images of certain classes from the web.
Args:
images_to_downloaded: list of strings, the classes of images to download
output_dir: string, path to the directory where to save the images
num_images_to_download: int, number of images to download for each class
'''
for class_name in images_to_downloaded:
class_dir = os.path.join(output_dir, class_name)
if not os.path.exists(class_dir):
os.mkdir(class_dir)
search_url = "https://www.google.co.in/search?q="+class_name+"&source=lnms&tbm=isch"
header = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
url = 'https://www.google.com/search?q=' + urllib.parse.quote(class_name) + '&source=lnms&tbm=isch'
soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(search_url, headers = header)),'html.parser')
ActualImages = []
for a in soup.find_all("img"):
json_data = a.json_data
link = a["src"]
# image_type = json_data["ity"]
image_type = link.split('.')[-1]
ActualImages.append((link, image_type))
for i, (img, Type) in enumerate(ActualImages[0:num_images_to_download]):
try:
link = img
req = urllib.request.Request(link, headers=header)
raw_img = urllib.request.urlopen(req).read()
f = open(os.path.join(class_dir, os.path.basename(link)), "wb")
f.write(urllib.request.urlopen(req).read())
f.close()
print("Image downloaded to ", class_dir)
except Exception as e:
print("could not load : "+img)
print(e)
def main():
'''
Main function.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--images-to-download', type=str, nargs='+',
help='list of classes of images to download')
parser.add_argument('--output_dir', type=str, required=True,
help='path to the directory where to save the images')
parser.add_argument('--images_to_downloaded', type=str, nargs='+', required=True,
help='list of classes of images to download')
parser.add_argument('--num_images_to_download', type=int, default=100,
help='number of images to download for each class')
args = parser.parse_args()
args = parser.parse_args()
download_images(args.images_to_downloaded, args.output_dir, args.num_images_to_download)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1734387 | from PySide2.QtWidgets import QWidget, QLabel, QGridLayout
from PySide2.QtCore import Qt
from PySide2.QtGui import QPixmap, QFont
class HomeWindow(QWidget):
def __init__(self, rboost):
super().__init__()
self.rboost = rboost
self.layout = QGridLayout()
self._display_welcome()
self._display_logo()
self.setLayout(self.layout)
def _display_welcome(self):
message1 = QLabel('Welcome to RBoost!')
message1.setFont(QFont('Arial', 48))
message2 = QLabel('The network of science')
message2.setFont(QFont('Arial', 30))
self.layout.addWidget(message1, 0, 0, Qt.AlignCenter)
self.layout.addWidget(message2, 1, 0, Qt.AlignCenter)
def _display_logo(self):
logo = QLabel()
pixmap = QPixmap(self.rboost.logo)
logo.setPixmap(pixmap)
self.layout.addWidget(logo, 2, 0, Qt.AlignCenter)
| StarcoderdataPython |
3470144 | # Copyright (C) 2018 SCARV project <<EMAIL>>
#
# Use of this source code is restricted per the MIT license, a copy of which
# can be found at https://opensource.org/licenses/MIT (or should be included
# as LICENSE.txt within the associated archive or repository).
from sca3s import backend as sca3s_be
from sca3s import middleware as sca3s_mw
from sca3s.backend.acquire import board as board
from sca3s.backend.acquire import scope as scope
from sca3s.backend.acquire import hybrid as hybrid
from sca3s.backend.acquire import driver as driver
from sca3s.backend.acquire import repo as repo
from sca3s.backend.acquire import depo as depo
import requests, urllib.parse
class APIImp( sca3s_be.share.api.APIAbs ):
def __init__( self ) :
super().__init__()
def retrieve( self ) :
url = 'api/acquire/job'
json = {}
return self._request( requests.get, url, json = json )
def complete( self, job_id, job_status, job_response ) :
if ( job_id == None ) :
return
url = urllib.parse.urljoin( 'api/acquire/job/', job_id )
json = { 'status' : job_status.hex(), 'response' : job_response }
return self._request( requests.patch, url, json = json )
def announce( self ) :
url = 'api/acquire/advertise'
json = {}
if ( self.instance != '*' ) :
json[ 'queue' ] = self.instance
json[ 'devices' ] = dict()
for ( device_id, device_spec ) in sca3s_be.share.sys.conf.get( 'device_db', section = 'job' ).items() :
json[ 'devices' ][ device_id ] = { k : device_spec[ k ] for k in [ 'board_id', 'board_desc', 'scope_id', 'scope_desc', 'roles' ] }
return self._request( requests.post, url, json = json )
| StarcoderdataPython |
3300093 | <filename>questions.py
#!/usr/bin/python
# TODO: make that external image alternative texts are shown in a different color, maybe with a link to the external image.
"""
Question 7511789 / 35823053 at 2016-09-10 02:54:33.993394 ETA: 2016-09-29 17:33:47.519406
Traceback (most recent call last):
File "./questions.py", line 130, in <module>
make_questions_html()
File "./questions.py", line 122, in make_questions_html
f.write(render_question(cursor, post_id, renderer, prev_post_id, next_post_id))
File "./questions.py", line 100, in render_question
question["NextPage"]=select_question(cursor,NextId)
File "./questions.py", line 85, in select_question
question=select_post(cursor,Id)
File "/home/rtoni/stackexchange-zim/utils.py", line 118, in select_post
post["Body"]=rewriteurl.rewrite_urls_in_html(cursor,post["Body"],stackexchange_domain)
File "/home/rtoni/stackexchange-zim/rewriteurl.py", line 168, in rewrite_urls_in_html
assert newhtml.startswith("<html><body>") and newhtml.endswith("</body></html>")
AssertionError
"""
"""
with stackoverflow.com.squashfs compressed using "mksquashfs ./stackoverflow.com.sqlite3 ./stackoverflow.com.sqlite3.squashfs -comp xz -root-owned" and mounted:
CPU usage is at around 5%, and Disk reads at around <1 MB/s.
rtoni@debian:~/stackexchange-zim$ ./questions.py
Questions at 2016-08-30 16:16:33.413784
Question 4 / 35823053 at 2016-08-30 16:29:14.114718
Question 6 / 35823053 at 2016-08-30 16:29:59.805194
Question 9 / 35823053 at 2016-08-30 16:31:36.229862
Question 11 / 35823053 at 2016-08-30 16:32:46.323628
Question 13 / 35823053 at 2016-08-30 16:33:24.535859
Question 14 / 35823053 at 2016-08-30 16:33:41.309147
Question 16 / 35823053 at 2016-08-30 16:33:46.220353
Question 17 / 35823053 at 2016-08-30 16:33:55.408994
with stackoverflow.com.squashfs compressed using "mksquashfs ./stackoverflow.com.sqlite3 ./stackoverflow.com.sqlite3.squashfs -comp xz -root-owned" and mounted:
CPU usage is at around 70%, and Disk reads at around 7 MB/s.
rtoni@debian:~/stackexchange-zim$ ./questions.py
Questions
Question 4 / 35823053 at 2016-08-30 15:40:48.185610
Question 6 / 35823053 at 2016-08-30 15:43:08.604310
Question 9 / 35823053 at 2016-08-30 15:48:25.446965
Question 11 / 35823053 at 2016-08-30 15:55:26.272059
Question 13 / 35823053 at 2016-08-30 16:03:24.333881
Question 14 / 35823053 at 2016-08-30 16:09:37.595338
Question 16 / 35823053 at 2016-08-30 16:12:25.249392
Question 17 / 35823053 at 2016-08-30 16:14:43.609224
with stackoverflow.com.squashfs compressed using "mksquashfs ./stackoverflow.com.sqlite3 ./stackoverflow.com.sqlite3.squashfs-compgzip-b4096 -comp gzip -root-owned -b 4096" and mounted:
CPU usage is at around 10%, and Disk reads at around 2 MB/s.
Questions at 2016-08-30 19:35:17.218871
Question 4 / 35823053 at 2016-08-30 20:01:36.537141
Question 6 / 35823053 at 2016-08-30 20:02:08.279820
Question 9 / 35823053 at 2016-08-30 20:03:22.721451
Question 11 / 35823053 at 2016-08-30 20:04:20.140414
Question 13 / 35823053 at 2016-08-30 20:04:54.341013
Question 14 / 35823053 at 2016-08-30 20:05:06.851642
Question 16 / 35823053 at 2016-08-30 20:05:10.724873
Question 17 / 35823053 at 2016-08-30 20:05:19.007552
"""
from utils import *
import codecs
import os
import templates
#PostTypeId="1" means question
#PostTypeId="2" means answer
#PostTypeId="4" means tag excerpt, with Tag.ExcerptPostId==Post.Id
#PostTypeId="5" means tag wiki, with Tag.WikiPostId==Post.Id
#PostTypeId="6" means election text
#PostTypeId="7" means posts made by stackexchange? (TODO)
def question_flat_filename(post_id):
return
def select_answer(cursor, Id,rootdir):
return select_post(cursor,Id,rootdir)
def select_answers_for_question(cursor,QuestionId,rootdir):
cursor.execute('select AcceptedAnswerId from Posts where Id=?', (QuestionId,))
accepted_answer_id=cursor.fetchone()["AcceptedAnswerId"]
if accepted_answer_id:
accepted_answer_id=int(accepted_answer_id)
cursor.execute('select Id from Posts where ParentId=?', (QuestionId,))
answers_id=[row["Id"] for row in cursor]
answers=[]
for answer_id in answers_id:
answer=select_answer(cursor,answer_id,rootdir)
answer["accepted"]=accepted_answer_id and answer["Id"]==accepted_answer_id
answers.append(answer)
# sort answers by score, but put the answer with Id==AcceptedAnswerId of the question in front
if accepted_answer_id:
answers_accepted=[answer for answer in answers if answer["accepted"]]
answers_without=[answer for answer in answers if not answer["accepted"]]
answers_without.sort(key=lambda x: int(x["Score"]), reverse=True)
answers=answers_accepted+answers_without
else:
answers.sort(key=lambda x: int(x["Score"]), reverse=True)
return answers
def select_question(cursor, Id, rootdir):
question=select_post(cursor,Id,rootdir)
cursor.execute('select * from Tags where Id in (select TagId from PostsTags where PostId=?)', (Id,))
question["Tags"]=cursor.fetchall()
for tag in question["Tags"]:
tag["RootDir"]=rootdir
tag["IdPath"]=convert_tag_id_to_idpath(tag["Id"],rootdir)
answers=select_answers_for_question(cursor,Id,rootdir)
question["answers"]=answers
return question
def render_question(cursor, Id, renderer, PrevId, NextId, rootdir):
question=select_question(cursor,Id,rootdir)
# doing the next two queries in `select_question` would recurse forever.
question["PrevPage"]=select_question(cursor,PrevId,rootdir)
question["NextPage"]=select_question(cursor,NextId,rootdir)
question["RootDir"]=rootdir
question["PrevPage"]["RootDir"]=rootdir
question["NextPage"]["RootDir"]=rootdir
return renderer.render("{{>question_html}}",question)
def make_questions_html(only_ids=None):
renderer=templates.make_renderer(templates.templates)
if only_ids:
post_ids=only_ids
else:
cursor.row_factory = sqlite3.Row #saves memory compared to =dict_factory
cursor.execute('select Id from Posts where PostTypeId="1"')
post_ids = [row["Id"] for row in cursor]
cursor.row_factory = dict_factory
max_Id=max(post_ids)
len_post_ids=len(post_ids)
start=datetime.datetime.now()
for (i,post_id) in enumerate(post_ids):
print "Question",post_id,"/",max_Id,"at",str(datetime.datetime.now()),"ETA:",estimated_time_arrival(start,i,len_post_ids)
flat_path=file_path+"question/"+str(post_id)+".html"
(paths,basename,n_subdirs)=split_filename_into_subdirs(flat_path)
html_path=paths+basename
if not os.access(paths,os.W_OK):
os.makedirs(paths)
with codecs.open(html_path, "w", "utf-8") as f:
prev_post_id=post_ids[(i-1)%len_post_ids]
next_post_id=post_ids[(i+1)%len_post_ids]
html=None
try:
html=render_question(cursor, post_id, renderer, prev_post_id, next_post_id,"../"*(n_subdirs+1))
except:
print "Error rendering",post_id
bugf=open("buggy-database-entries.txt", "a")
bugf.write("domain: %s post_id: %i\n" % (stackexchange_domain,post_id))
bugf.close()
if html: f.write(html)
(connection,cursor)=init_db()
with connection:
print "Questions","at",str(datetime.datetime.now())
#import profile
#profile.run("make_questions_html()",sort="tottime")
#make_questions_html([7511791,7511795]) # spurious </html> and </body> in database.
make_questions_html()
| StarcoderdataPython |
365349 | <filename>data_collection.py
colleges = [
'National Institute of Technology, Kurukshetra' ,
'National Institute of Technology Raipur' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology Calicut' ,
'Indian Institute of Engineering Science and Technology, Shibpur' ,
'National Institute of Technology Delhi' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology, Tiruchirappalli' ,
'Maulana Azad National Institute of Technology Bhopal' ,
'National Institute of Technology Hamirpur' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'National Institute of Technology, Silchar' ,
'Visvesvaraya National Institute of Technology,Nagpur' ,
'National Institute of Technology Durgapur' ,
'National Institute of Technology, Uttarakhand' ,
'Malaviya National Institute of Technology Jaipur' ,
'Motilal Nehru National Institute of TechnologyAllahabad' ,
'National Institute of Technology, Tiruchirappalli' ,
'National Institute of Technology Patna' ,
'National Institute of Technology Karnataka, Surathkal' ,
'National Institute of Technology Calicut' ,
'Sardar Vallabhbhai National Institute of Technology, Surat' ,
'National Institute of Technology Raipur' ,
'Indian Institute of Information Technology Lucknow' ,
'Punjab Engineering College, Chandigarh' ,
'Visvesvaraya National Institute of Technology, Nagpur' ,
'National Institute of Technology, Warangal' ,
'National Institute of Technology, Andhra Pradesh' ,
'Indian Institute of Engineering Science and Technology, Shibpur' ,
'National Institute of Technology, Jamshedpur' ,
'Malaviya National Institute of Technology Jaipur' ,
'Dr. <NAME> National Institute of Technology, Jalandhar' ,
'National Institute of Technology, Kurukshetra' ,
'National Institute of Technology Puducherry' ,
'University of Hyderabad' ,
'Maulana Azad National Institute of TechnologyBhopal' ,
'National Institute of Technology Goa' ,
'Pt. Dwarka Prasad Mishra Indian Institute ofInformation Technology, Design & Manufacture Jabalpur' ,
'Indian Institute of Engineering Science andTechnology, Shibpur' ,
'National Institute of Technology Delhi' ,
'National Institute of Technology, Warangal' ,
'National Institute of Technology Hamirpur' ,
'Indian Institute of Information Technology, Design &Manufacturing, Kancheepuram' ,
'Atal Bihari Vajpayee Indian Institute of Information Technology& Management Gwalior' ,
'National Institute of Technology Meghalaya' ,
'Indian Institute of Information Technology Lucknow' ,
'National Institute of Technology Raipur' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology Durgapur' ,
'National Institute of Technology, Kurukshetra' ,
'National Institute of Technology, Tiruchirappalli' ,
'Sardar Vallabhbhai National Institute of Technology,Surat' ,
'National Institute of Technology Calicut' ,
'National Institute of Technology Agartala' ,
'National Institute of Technology Karnataka, Surathkal' ,
'National Institute of Technology, Silchar' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'National Institute of Technology, Rourkela' ,
'Dr. <NAME>kar National Institute of Technology,Jalandhar' ,
'National Institute of Technology, Rourkela' ,
'Indian Institute of Engineering Science andTechnology, Shibpur' ,
'National Institute of Technology, Jamshedpur' ,
'Maulana Azad National Institute of TechnologyBhopal' ,
'National Institute of Technology Goa' ,
'Punjab Engineering College, Chandigarh' ,
'Motilal Nehru National Institute of TechnologyAllahabad' ,
'National Institute of Technology Puducherry' ,
'National Institute of Technology Hamirpur' ,
'National Institute of Technology, Tiruchirappalli' ,
'Motilal Nehru National Institute of TechnologyAllahabad' ,
'National Institute of Technology Durgapur' ,
'National Institute of Technology, Uttarakhand' ,
'Jawaharlal Nehru University, Delhi' ,
'Atal Bihari Vajpayee Indian Institute of Information Technology& Management Gwalior' ,
'National Institute of Technology Patna' ,
'National Institute of Technology, Andhra Pradesh' ,
'Indian Institute of Information Technology(IIIT)Kilohrad, Sonepat, Haryana' ,
'International Institute of Information Technology,Naya Raipur' ,
'National Institute of Technology Hamirpur' ,
'Malaviya National Institute of Technology Jaipur' ,
'National Institute of Technology Goa' ,
'National Institute of Technology, Jamshedpur' ,
'Indian Institute of Information Technology Guwahati' ,
'Punjab Engineering College, Chandigarh' ,
'Indian Institute of Information Technology Srirangam,Tiruchirappalli' ,
'National Institute of Technology, Rourkela' ,
'International Institute of Information Technology,Naya Raipur' ,
'Dr. <NAME> National Institute of Technology,Jalandhar' ,
'Indian institute of information technology, Raichur,Karnataka' ,
'National Institute of Technology Raipur' ,
'National Institute of Technology Hamirpur' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'Indian Institute of Engineering Science andTechnology, Shibpur' ,
'International Institute of Information Technology,Bhubaneswar' ,
'National Institute of Technology, Warangal' ,
'Motilal Nehru National Institute of TechnologyAllahabad' ,
'National Institute of Technology Karnataka, Surathkal' ,
'Indian Institute of Information Technology Surat' ,
'National Institute of Technology Calicut' ,
'Indian Institute of Information Technology, Design &Manufacturing, Kancheepuram' ,
'Indian Institute of Information Technology (IIIT)Kota,Rajasthan' ,
'National Institute of Technology, Srinagar' ,
'Visvesvaraya National Institute of Technology,Nagpur' ,
'National Institute of Technology Calicut' ,
'National Institute of Technology, Silchar' ,
'Indian Institute of Information Technology (IIIT) Pune' ,
'Pt. Dwarka Prasad Mishra Indian Institute ofInformation Technology, Design & Manufacture Jabalpur' ,
'Sardar Vallabhbhai National Institute of Technology,Surat' ,
'National Institute of Technology Sikkim' ,
'Indian Institute of Information Technology, Design &Manufacturing, Kancheepuram' ,
'National Institute of Technology, Rourkela' ,
'Visvesvaraya National Institute of Technology,Nagpur' ,
'National Institute of Technology Raipur' ,
'Punjab Engineering College, Chandigarh' ,
'Indian Institute of Engineering Science andTechnology, Shibpur' ,
'National Institute of Technology, Rourkela' ,
'Malaviya National Institute of Technology Jaipur' ,
'National Institute of Technology Agartala' ,
'National Institute of Technology Puducherry' ,
'Indian Institute of Information Technology(IIIT)Kilohrad, Sonepat, Haryana' ,
'National Institute of Technology Arunachal Pradesh' ,
'National Institute of Technology Patna' ,
'National Institute of Technology, Silchar' ,
'International Institute of Information Technology,Bhubaneswar' ,
'Maulana Azad National Institute of TechnologyBhopal' ,
'Indian Institute of Information Technology (IIIT), SriCity, Chittoor' ,
'National Institute of Technology, Kurukshetra' ,
'Jawaharlal Nehru University, Delhi' ,
'International Institute of Information Technology,Bhubaneswar' ,
'Sardar Vallabhbhai National Institute of Technology,Surat' ,
'National Institute of Technology, Manipur' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'National Institute of Technology, Mizoram' ,
'Maulana Azad National Institute of TechnologyBhopal' ,
'National Institute of Technology Puducherry' ,
'National Institute of Technology, Uttarakhand' ,
'National Institute of Technology Nagaland' ,
'National Institute of Technology, Uttarakhand' ,
'National Institute of Technology Durgapur' ,
'National Institute of Technology Meghalaya' ,
'National Institute of Technology Patna' ,
'National Institute of Technology Karnataka, Surathkal' ,
'National Institute of Technology, Silchar' ,
'National Institute of Technology Agartala' ,
'National Institute of Technology Durgapur' ,
'National Institute of Technology Calicut' ,
'Indian Institute of Information Technology(IIIT),Vadodara, Gujrat' ,
'Dr. <NAME>kar National Institute of Technology,Jalandhar' ,
'Indian Institute of Information Technology Bhopal' ,
'National Institute of Technology, Andhra Pradesh' ,
'National Institute of Technology, Warangal' ,
'Indian Institute of Information Technology (IIIT) Pune' ,
'National Institute of Technology, Srinagar' ,
'National Institute of Technology, Andhra Pradesh' ,
'Indian Institute of Information Technology Srirangam,Tiruchirappalli' ,
'Indian Institute of Information Technology, Design & Manufacturing, Kancheepuram' ,
'National Institute of Technology Agartala' ,
'Indian Institute of Information Technology(IIIT) Una,Himachal Pradesh' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology Agartala' ,
'International Institute of Information Technology,Naya Raipur' ,
'Indian Institute of Information Technology (IIIT), SriCity, Chittoor' ,
'National Institute of Technology, Manipur' ,
'National Institute of Technology, Jamshedpur' ,
'National Institute of Technology Durgapur' ,
'Indian Institute of Information Technology(IIIT),Vadodara, Gujrat' ,
'Dr. <NAME> National Institute of Technology,Jalandhar' ,
'Indian Institute of Information Technology (IIIT)Nagpur' ,
'National Institute of Technology Arunachal Pradesh' ,
'National Institute of Technology Hamirpur' ,
'Indian Institute of Information Technology, Design &Manufacturing, Kancheepuram' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology Meghalaya' ,
'National Institute of Technology Hamirpur' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology Sikkim' ,
'Indian Institute of Information Technology Guwahati' ,
'Indian Institute of Information Technology (IIIT)Kota,Rajasthan' ,
'National Institute of Technology Goa' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology Raipur' ,
'National Institute of Technology Nagaland' ,
'Indian Institute of Information Technology Design &Manufacturing Kurnool, Andhra Pradesh' ,
'National Institute of Technology, Kurukshetra' ,
'Dr. <NAME> National Institute of Technology,Jalandhar' ,
'National Institute of Technology Puducherry' ,
'Indian Institute of Information Technology (IIIT)Ranchi' ,
'National Institute of Technology Meghalaya' ,
'Indian Institute of Information Technology Bhopal' ,
'Indian Institute of Information Technology(IIIT)Kottayam' ,
'Indian Institute of Information Technology, Design & Manufacturing, Kancheepuram' ,
'National Institute of Technology, Rourkela' ,
'Visvesvaraya National Institute of Technology,Nagpur' ,
'National Institute of Technology, Mizoram' ,
'National Institute of Technology Calicut' ,
'Indian Institute of Information Technology(IIIT) Una,Himachal Pradesh' ,
'National Institute of Technology Raipur' ,
'National Institute of Technology, Rourkela' ,
'Indian Institute of Information Technology(IIIT)Dharwad' ,
'National Institute of Technology Calicut' ,
'National Institute of Technology Patna' ,
'Indian Institute of Information Technology Surat' ,
'National Institute of Technology, Srinagar' ,
'National Institute of Technology, Silchar' ,
'Pt. Dwarka Prasad Mishra Indian Institute ofInformation Technology, Design & Manufacture Jabalpur' ,
'Indian Institute of Information Technology(IIIT)Kalyani, West Bengal' ,
'National Institute of Technology, Uttarakhand' ,
'Central University of Rajasthan, Rajasthan' ,
'International Institute of Information Technology,Bhubaneswar' ,
'Punjab Engineering College, Chandigarh' ,
'Visvesvaraya National Institute of Technology,Nagpur' ,
'Malaviya National Institute of Technology Jaipur' ,
'National Institute of Technology Arunachal Pradesh' ,
'Indian Institute of Information Technology Bhopal' ,
'Sardar Vallabhbhai National Institute of Technology,Surat' ,
'Indian Institute of Information Technology (IIIT)Nagpur' ,
'National Institute of Technology, Andhra Pradesh' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'Pondicherry Engineering College, Puducherry' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology, Manipur' ,
'National Institute of Technology Sikkim' ,
'Indian Institute of Engineering Science andTechnology, Shibpur' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'Indian Institute of Information Technology Design &Manufacturing Kurnool, Andhra Pradesh' ,
'Maulana Azad National Institute of TechnologyBhopal' ,
'National Institute of Technology, Manipur' ,
'National Institute of Technology, Rourkela' ,
'National Institute of Technology, Jamshedpur' ,
'National Institute of Technology Hamirpur' ,
'National Institute of Technology Agartala' ,
'National Institute of Technology, Andhra Pradesh' ,
'Institute of Infrastructure, Technology, Research andManagement-Ahmedabad' ,
'National Institute of Technology Sikkim' ,
'J.K. Institute of Applied Physics & Technology,Department of Electronics & Communication, University of Allahabad- Allahabad' ,
'Sardar Vallabhbhai National Institute of Technology,Surat' ,
'Indian Institute of Information Technology(IIIT)Dharwad' ,
'Indian Institute of Information Technology, Agartala' ,
'National Institute of Technology, Rourkela' ,
'Motilal Nehru National Institute of TechnologyAllahabad' ,
'National Institute of Technology, Jamshedpur' ,
'Indian Institute of Information Technology (IIIT)Ranchi' ,
'National Institute of Technology Agartala' ,
'International Institute of Information Technology,Bhubaneswar' ,
'Indian Institute of Information Technology Bhagalpur' ,
'Indian Institute of Information Technology(IIIT) Una,Himachal Pradesh' ,
'National Institute of Technology Durgapur' ,
'National Institute of Technology Agartala' ,
'National Institute of Technology Arunachal Pradesh' ,
'National Institute of Technology, Rourkela' ,
'Punjab Engineering College, Chandigarh' ,
'National Institute of Technology Agartala' ,
'National Institute of Technology Nagaland' ,
'Indian Institute of Information Technology Manipur' ,
'National Institute of Technology, Srinagar' ,
'National Institute of Technology Meghalaya' ,
'Indian Institute of Information Technology, Design &Manufacturing, Kancheepuram' ,
'Institute of Infrastructure, Technology, Research andManagement-Ahmedabad' ,
'Indian Institute of Information Technology, Design &Manufacturing, Kancheepuram' ,
'National Institute of Technology, Srinagar' ,
'National Institute of Technology, Mizoram' ,
'Indian Institute of Engineering Science andTechnology, Shibpur' ,
'Dr. <NAME> National Institute of Technology,Jalandhar' ,
'National Institute of Technology Raipur' ,
'National Institute of Technology Patna' ,
'Central University of Rajasthan, Rajasthan' ,
'National Institute of Technology Sikkim' ,
'Indian Institute of Engineering Science andTechnology, Shibpur' ,
'Indian Institute of Information Technology Bhagalpur' ,
'Dr. <NAME> National Institute of Technology,Jalandhar' ,
'National Institute of Technology, Mizoram' ,
'National Institute of Technology, Manipur' ,
'Indian Institute of Information Technology Design &Manufacturing Kurnool, Andhra Pradesh' ,
'National Institute of Technology Agartala' ,
'National Institute of Technology Arunachal Pradesh' ,
'National Institute of Technology, Andhra Pradesh' ,
'National Institute of Technology Patna' ,
'J.K. Institute of Applied Physics & Technology,Department of Electronics & Communication, University of Allahabad- Allahabad' ,
'National Institute of Technology Raipur' ,
'Indian Institute of Information Technology, Design &Manufacturing, Kancheepuram' ,
'Assam University, Silchar' ,
'National Institute of Technology Durgapur' ,
'Sant Longowal Institute of Engineering andTechnology' ,
'National Institute of Technology Nagaland' ,
'National Institute of Technology Nagaland' ,
'Pondicherry Engineering College, Puducherry' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'National Institute of Technology, Andhra Pradesh' ,
'Indian Institute of Information Technology Manipur' ,
'Institute of Infrastructure, Technology, Research andManagement-Ahmedabad' ,
'National Institute of Technology Raipur' ,
'Dr. <NAME> National Institute of Technology,Jalandhar' ,
'Indian Institute of Information Technology Bhagalpur' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'National Institute of Technology, Mizoram' ,
'National Institute of Electronics and Information Technology,Aurangabad (Maharashtra)' ,
'National Institute of Technology Nagaland' ,
'National Institute of Technology Durgapur' ,
'National Institute of Technology, Srinagar' ,
'National Institute of Technology Durgapur' ,
'National Institute of Technology, Srinagar' ,
'Punjab Engineering College, Chandigarh' ,
'National Institute of Technology Agartala' ,
'Sardar Vallabhbhai National Institute of Technology,Surat' ,
'Institute of Technology, Guru GhasidasVishwavidyalaya (A Central University), Bilaspur, (C.G.)' ,
'Birla Institute of Technology, Mesra, Ranchi' ,
'Pondicherry Engineering College, Puducherry' ,
'National Institute of Technology Agartala' ,
'Sant Longowal Institute of Engineering andTechnology' ,
'National Institute of Technology, Rourkela' ,
'School of Engineering, Tezpur University, Napaam,Tezpur' ,
'National Institute of Technology Patna' ,
'Gurukula <NAME>aya, Haridwar' ,
'National Institute of Technology Raipur' ,
'Central institute of Technology Kokrajar, Assam' ,
'National Institute of Technology, Srinagar' ,
'HNB Garhwal University Srinagar (Garhwal)' ,
'Assam University, Silchar' ,
'Central institute of Technology Kokrajar, Assam' ,
'National Institute of Technology Agartala' ,
'Sant Longowal Institute of Engineering andTechnology' ,
'Pondicherry Engineering College, Puducherry' ,
'Shri Mata Vaishno Devi University, Katra, Jammu &Kashmir' ,
'HNB Garhwal University Srinagar (Garhwal)' ,
'Mizoram University, Aizawl' ,
'Institute of Technology, Guru GhasidasVishwavidyalaya (A Central University), Bilaspur, (C.G.)' ,
'Mizoram University, Aizawl' ,
'Institute of Technology, Guru GhasidasVishwavidyalaya (A Central University), Bilaspur, (C.G.)' ,
'School of Engineering, Tezpur University, Napaam,Tezpur' ,
'Ghani Khan Choudhary Institute of Engineering andTechnology, Malda, West Bengal' ,
'HNB Garhwal University Srinagar (Garhwal)' ,
'Central institute of Technology Kokrajar, Assam' ,
'HNB Garhwal University Srinagar (Garhwal)' ,
'School of Engineering, Tezpur University, Napaam,Tezpur' ,
'Central institute of Technology Kokrajar, Assam' ,
'Ghani Khan Choudhary Institute of Engineering andTechnology, Malda, West Bengal' ,
'School of Engineering, Tezpur University, Napaam,Tezpur' ,
'<NAME>, Haridwar' ,
'National Institute of Foundry & Forge Technology,Hatia, Ranchi' ,
'Mizoram University, Aizawl' ,
'Sant Longowal Institute of Engineering andTechnology' ,
'Mizoram University, Aizawl' ,
'Institute of Technology, Guru GhasidasVishwavidyalaya (A Central University), Bilaspur, (C.G.)' ,
'<NAME>, Haridwar' ,
'School of Engineering, Tezpur University, Napaam,Tezpur' ,
'Sant Longowal Institute of Engineering andTechnology' ,
'Shri Mata Vaishno Devi University, Katra, Jammu &Kashmir' ,
'Institute of Technology, Guru GhasidasVishwavidyalaya (A Central University), Bilaspur, (C.G.)' ,
'Institute of Technology, Guru GhasidasVishwavidyalaya (A Central University), Bilaspur, (C.G.)' ,
'HNB Garhwal University Srinagar (Garhwal)' ,
'<NAME>, Haridwar' ,
'Shri Mata Vaishno Devi University, Katra, Jammu &Kashmir' ,
'Sant Longowal Institute of Engineering andTechnology' ,
'Mizoram University, Aizawl' ,
'National Institute of Foundry & Forge Technology,Hatia, Ranchi' ,
'Assam University, Silchar' ,
'Shri Mata Vaishno Devi University, Katra, Jammu &Kashmir' ,
'Shri Mata Vaishno Devi University, Katra, Jammu &Kashmir' ,
'Central institute of Technology Kokrajar, Assam' ,
'Indian Institute of Carpet Technology, Bhadohi' ,
'Institute of Technology, Guru <NAME>aya (A Central University), Bilaspur, (C.G.)' ,
'Sant Longowal Institute of Engineering andTechnology' ,
'Ghani Khan Choudhary Institute of Engineering andTechnology, Malda, West Bengal' ,
'School of Engineering, Tezpur University, Napaam, Tezpur' ,
'National Institute of Food Technology Entrepreneurship and Management, Sonepat, Haryana' ,
]
Streams = [
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Instrumentation Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (5 Years, Bachelor and Master ofTechnology (Dual Degree))' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Instrumentation and Control Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Computer Science (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Aerospace Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science (5 Years, Integrated Master of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Integrated B. Tech.(IT) and M. Tech (IT) (5 Years, Integrated B. Tech. andM. Tech. /MBA)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Physics (5 Years, Integrated Master of Science)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Engineering Physics (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Telecommunication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Aerospace Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Production Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (5 Years, Bachelor and Master ofTechnology (Dual Degree))' ,
'Integrated B. Tech.(IT) and MBA (5 Years, Integrated B. Tech. and M. Tech./MBA)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Data Science and Artificial Intelligence (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (5 Years, Bachelor and Masterof Technology (Dual Degree))' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (5 Years, Bachelor and Master of Technology (DualDegree))' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Production and Industrial Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (5 Years, Bachelor and Master ofTechnology (Dual Degree))' ,
'Mathematics (5 Years, Integrated Master of Science)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (5 Years, Bachelor and Masterof Technology (Dual Degree))' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Mining Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Instrumentation Engineering (4 Years, Bachelor ofTechnology)' ,
'Mathematics and Computing (5 Years, Integrated Master of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Production Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'B. Tech. Electronics and Communication Engineering and M. Tech. Electronics andCommunication Engineering with specialization in VLSI Design (5 Years, Bachelor and Master of Technology (Dual Degree))' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Mining Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (5 Years, Bachelor and Master of Technology (DualDegree))' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Instrumentation and Control Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (5 Years, Bachelor and Master ofTechnology (Dual Degree))' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Industrial Design (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Engineering (4 Years, Bachelor of Technology)' ,
'Production and Industrial Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'B. Tech. Electronics and Communication Engineering and M. Tech.Electronics and Communication Engineering with specialization in Communication Systems Design (5 Years, Bachelor and Master of Technology (Dual Degree))' ,
'Mining Engineering (5 Years, Bachelor and Master of Technology (DualDegree))' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Materials Science and Engineering (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Chemistry (5 Years, Integrated Master of Science)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Telecommunication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Mining Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Physics (5 Years, Integrated Master of Science)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Bio Medical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering with specialization in Design andManufacturing (4 Years, Bachelor of Technology)' ,
'Materials Science and Metallurgical Engineering (4 Years, Bachelor ofTechnology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Ceramic Engineering (4 Years, Bachelor of Technology)' ,
'Production and Industrial Engineering (4 Years, Bachelor of Technology)' ,
'Materials Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Instrumentation Engineering (4 Years, Bachelor ofTechnology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Mathematics (5 Years, Integrated Master of Science)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Food Process Engineering (4 Years, Bachelor of Technology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Engineering Physics (5 Years, Bachelor and Master of Technology (DualDegree))' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Ceramic Engineering and M.Tech Industrial Ceramic (5 Years, Bachelor andMaster of Technology (Dual Degree))' ,
'Production and Industrial Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'B. Tech. Mechanical Engineering and M. Tech. in Mechanical Engineeringwith specialization in Product Design (5 Years, Bachelor and Master of Technology (Dual Degree))' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Smart Manufacturing (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Mining Engineering (4 Years, Bachelor of Technology)' ,
'Industrial and Production Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Physics (5 Years, Integrated Master of Science)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgy and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering with specialization in Design and Manufacturing (4 Years,Bachelor of Technology)' ,
'Physics (5 Years, Bachelor of Science and Master of Science (Dual Degree))' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Mathematics (5 Years, Integrated Master of Science)' ,
'Electronics and Communication Engineering (4 Years, Bachelor of Technology)' ,
'Mining Engineering (4 Years, Bachelor of Technology)' ,
'B. Tech. Mechanical Engineering and M. Tech. Mechanical Engineeringwith specialization in Advanced Manufacturing (5 Years, Bachelor and Master of Technology (Dual Degree))' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Instrumentation Engineering (4 Years, Bachelor ofTechnology)' ,
'Electrical and Electronics Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Production Engineering (4 Years, Bachelor of Technology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Textile Technology (4 Years, Bachelor of Technology)' ,
'Mechatronics Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (Plastic and Polymer) (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electronics System Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Chemistry (5 Years, Integrated Master of Science)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Biotechnology (5 Years, Bachelor and Master of Technology (Dual Degree))' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Materials and Metallurgical Engineering (4 Years, Bachelor of Technology)' ,
'Production Engineering (4 Years, Bachelor of Technology)' ,
'Chemistry (5 Years, Integrated Master of Science)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Bio Technology (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Biotechnology and Biochemical Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Life Science (5 Years, Integrated Master of Science)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Chemistry (5 Years, Integrated Master of Science)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Bio Medical Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Metallurgical and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Chemistry (5 Years, Bachelor of Science and Master of Science (DualDegree))' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Computer Science and Engineering (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Computer Engineering (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Information Technology (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Instrumentation Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Metallurgy and Materials Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Chemical Engineering (4 Years, Bachelor of Technology)' ,
'Electronics and Communication Engineering (4 Years, Bachelor ofTechnology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Electrical and Instrumentation Engineering (4 Years, Bachelor ofTechnology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Electrical Engineering (4 Years, Bachelor of Technology)' ,
'Instrumentation and Control Engineering (4 Years, Bachelor ofTechnology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Manufacturing Engineering (4 Years, Bachelor of Technology)' ,
'Agricultural Engineering (4 Years, Bachelor of Technology)' ,
'Mechanical Engineering (4 Years, Bachelor of Technology)' ,
'Civil Engineering (4 Years, Bachelor of Technology)' ,
'Food Engineering and Technology (4 Years, Bachelor of Technology)' ,
'Carpet and Textile Technology (4 Years, Bachelor of Technology)' ,
'Industrial and Production Engineering (4 Years, Bachelor of Technology)' ,
'Food Technology (4 Years, Bachelor of Technology)' ,
'Food Technology (4 Years, Bachelor of Technology)' ,
'Food Engineering and Technology (4 Years, Bachelor of Technology)' ,
'Food Technology and Management (4 Years, Bachelor of Technology)' ,
]
"""import itertools
i = 1
for (colle , stre) in zip(colleges,Streams):
print(colle, stre)
i += 1
print(i)""" | StarcoderdataPython |
3496303 | <gh_stars>0
# Generated by Django 3.1.5 on 2021-01-10 11:28
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import game.models
class Migration(migrations.Migration):
dependencies = [
('game', '0004_game_state'),
]
operations = [
migrations.AddField(
model_name='player',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='playerguess',
name='puzzle_selection',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='guesses', to='game.puzzleselection'),
),
migrations.AlterField(
model_name='puzzle',
name='photo',
field=models.ImageField(upload_to=game.models.upload_location),
),
migrations.AlterField(
model_name='puzzleselection',
name='game',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='selected', to='game.game'),
),
]
| StarcoderdataPython |
3392033 | #170401022 <NAME>
import socket
import os
import time
Target_IP= "192.168.2.8"
TCP_Port= 142
print("Forwardaing to targeted IP addresses..")
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
TCP_Link = (Target_IP,TCP_Port)
Message1 = "Time information "
try:
sock.connect(TCP_Link)
except:
print("Connection error")
sock.close
exit()
print("connection accepted")
timer = sock.recv(1024)
print("received time = "+timer.decode())
timer = timer.decode().split(",")
Message = "Time data has received."
sock.send(Message.encode())
time_ms = float(timer[0])/1000
time_zone = timer[1]
delay_time = sock.recv(1024)
print(delay_time.decode()+"ms")
time_ms = time_ms - float(delay_time.decode())/1000
settime = time.ctime(time_ms)
print(settime,time_zone,"\n")
set_time_command = 'sudo date --set='
set_time_command = set_time_command + '"' + settime +'"'
os.system(set_time_command)
Message = "Time data has received and system time has been set succesfully."
sock.send(Message.encode()) | StarcoderdataPython |
90551 | from ..dyck import Grammar
from ..grammar_utils import *
"""
ababaacbcbcc
W
- [W] null
- [BC] b -> c
- [AB] a -> b
BC(|ab|,|c|)
BC(ab |ab|, |c| c)
AB(abab |a|, |cb| cc)
W(ababa |acb|, cbcc)
# extra
W(abc, abc)
#
BC(|ab|, |c|)
AB(ab|a|, |cb|c)
BC(|ab|aba, cbc|c|)
AB(ababa|a|, |cb|cbcc)
"""
all_states = ['W', 'BC', 'AB'] #, 'ABC']
mcfg2 = Grammar([
r('S <- W', {(x, y)}),
O('W', {(a, b, c)}),
O('W <- W', {(x, y), (a, b, c)}),
# O('W <- W, W', {(x, y), (l, m)}),
])
# mcfg2 = Grammar([
# r('S <- W', {(x, y)}),
# O('A', {(a)}),
# O('B', {(b)}),
# O('C', {(c)}),
# O('W', {(a, b, c)}),
# O('W <- W, A, B, C', {(x, y), (l, m, q, w, r, t), (a, b, c)}),
# ])
mcfg22 = Grammar([
### Base cases
# r('S <- W', {(x, y)}),
# # all_c('W', [], orders=[(a, b, c)], left=[a, b, c]),
# all_c('W', [], orders=[(a, b, c)], right=[a, b, c]),
# all_c('BC', [], orders=[(a, b, c)], left=[a, b], right=[c]),
# all_c('AB', [], orders=[(a, b, c)], left=[a], right=[b, c]),
# # ###
# # ### SAFETY
# O('W <- BC', {(x, y)}),
# O('W <- AB', {(x, y)}),
# # O('W <- ABC', {(x, y)}),
# # ###
# # Interleaving words
# O('W <- W, W', {(x, y), (z, w)}),
# # all_c('W', ['AB', 'BC'], orders=[(x, y), (z, w)]
# all_c('AB', ['AB', 'BC'], orders=[(x, y), (z, w)], left=[x], right=[y]),
# all_c('BC', ['AB', 'BC'], orders=[(x, y), (z, w)], left=[z], right=[w]),
# # all_c('ABC', ['AB', 'BC'], orders=[(x, y), (z, w)]
# # , left=[x, z], right=[y, w]),
# all_c('W', ['AB', 'AB'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[y, w]),
# all_c('W', ['AB', 'AB'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , right=[x, z]),
# all_c('AB', ['AB', 'AB'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[x], right=[z, y, w]),
# all_c('AB', ['AB', 'AB'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[z], right=[x, y, w]),
# all_c('AB', ['AB', 'AB'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[x, z], right=[y, w]),
# all_c('AB', ['AB', 'AB'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[x, z, w], right=[y]),
# all_c('AB', ['AB', 'AB'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[x, z, y], right=[w]),
# all_c('W', ['BC', 'BC'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[y, w]),
# all_c('W', ['BC', 'BC'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , right=[x, z]),
# all_c('BC', ['BC', 'BC'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[x], right=[z, y, w]),
# all_c('BC', ['BC', 'BC'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[z], right=[x, y, w]),
# all_c('BC', ['BC', 'BC'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[x, z], right=[y, w]),
# all_c('BC', ['BC', 'BC'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[x, z, w], right=[y]),
# all_c('BC', ['BC', 'BC'], orders=[(x, y), (z, w), (x, w), (z, y)]
# , left=[x, z, y], right=[w]),
# # ABC
# # all_c('ABC', ['BC'], orders=[(x, y), (a, b, c)]
# # , left=[a, x], right=[y, b]),
# # all_c('ABC', ['AB'], orders=[(x, y), (a, b, c)]
# # , left=[x, b], right=[y, c]),
# # all_c('ABC', ['BC'], orders=[(x, y), (a, c, b), (x, c), (b, y)]
# # , left=[a, x], right=[b, c]),
# # all_c('ABC', ['AB'], orders=[(x, y), (b, a, c), (a, y), (x, b)]
# # , left=[a, b], right=[y, c]),
# # all_c('ABC', ['AB'], orders=[(x, y), (b, c, a), (a, y), (x, b)]
# # , left=[a, b], right=[y, c]),
# # ABC -> W
# all_c('W', ['ABC'], orders=[(x, y), (c, b, a), (x, b, y), (x, c), (a, y)]
# , left=[y, a]),
# all_c('W', ['ABC'], orders=[(x, y), (c, b, a), (x, b, y), (x, c), (a, y)]
# , right=[x, c]),
# # ABC -> AB
# all_c('AB', ['ABC'], orders=[(x, y), (c, b, a), (x, b, y), (x, c), (a, y)]
# , left=[c], right=[b]),
# # ABC -> BC
# all_c('BC', ['ABC'], orders=[(x, y), (c, b, a), (x, b, y), (x, c), (a, y)]
# , left=[b], right=[a]),
# # ABC -> ABC
# all_c('ABC', ['ABC'], orders=[(x, y), (c, b, a), (x, b, y), (x, c), (a, y)]
# , left=[x], right=[c]),
# all_c('ABC', ['ABC'], orders=[(x, y), (c, b, a), (x, b, y), (x, c), (a, y)]
# , left=[a], right=[y]),
# ### Insert "abc"
# # W
# O('W <- W', {(x, y), (a, b, c)}),
# # BC -> W
# all_c('W', ['BC'], orders=[(x, y), (a, b, c)]
# , right=[x, a]),
# all_c('W', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[y], right=[a]),
# all_c('W', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[c], right=[x]),
# all_c('W', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[y, c]),
# # BC -> BC
# all_c('BC', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[x], right=[y, a]),
# all_c('BC', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[a, x], right=[y, b]),
# all_c('BC', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[a, b], right=[c, x]),
# all_c('BC', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[x, b], right=[y, c]), # ^2
# all_c('BC', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[y, b], right=[c]),
# all_c('BC', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[x, c], right=[y]),
# # BC -> AB
# all_c('AB', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[a], right=[x, b]),
# all_c('AB', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[a, x], right=[y, b]),
# all_c('AB', ['BC'], orders=[(x, y), (a, b, c)]
# , left=[y, a], right=[b]),
# O('W <- AB', {(x, y), (a, b, c)}),
# # AB -> W
# all_c('W', ['AB'], orders=[(x, y), (a, b, c)]
# , right=[x, a]),
# all_c('W', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[y, c]),
# all_c('W', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[y], right=[a]),
# all_c('W', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[c], right=[x]),
# # AB -> BC
# all_c('BC', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[b], right=[x, c]),
# all_c('BC', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[x, b], right=[y, c]),
# all_c('BC', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[y, b], right=[c]),
# # AB -> AB
# all_c('AB', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[x], right=[y, a]),
# all_c('AB', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[a], right=[x, b]),
# all_c('AB', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[x, a], right=[y, b]), # ^2
# all_c('AB', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[y, a], right=[b]),
# all_c('AB', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[x, b], right=[y, c]),
# all_c('AB', ['AB'], orders=[(x, y), (a, b, c)]
# , left=[x, c], right=[y]),
# ### Insert "acb"
# # BC -> W
# all_c('W', ['BC'], orders=[(x, y), (a, c, b), (x, c), (b, y)]),
# # BC -> AB
# all_c('AB', ['BC'], orders=[(x, y), (a, c, b), (x, c), (b, y)]
# , left=[a], right=[b]),
# # BC -> BC
# all_c('BC', ['BC'], orders=[(x, y), (a, c, b), (x, c), (b, y)]
# , left=[x], right=[c]),
# all_c('BC', ['BC'], orders=[(x, y), (a, c, b), (x, c), (b, y)]
# , left=[b], right=[y]),
# ### Insert "bac"
# # AB -> W
# all_c('W', ['AB'], orders=[(x, y), (b, a, c), (a, y), (x, b)]),
# # AB -> AB
# all_c('AB', ['AB'], orders=[(x, y), (b, a, c), (a, y), (x, b)]
# , left=[x], right=[b]),
# all_c('AB', ['AB'], orders=[(x, y), (b, a, c), (a, y), (x, b)]
# , left=[a], right=[y]),
# # AB -> BC
# all_c('BC', ['AB'], orders=[(x, y), (b, a, c), (a, y), (x, b)]
# , left=[b], right=[c]),
# ### Insert "bca"
# # BC -> W
# all_c('W', ['AB'], orders=[(x, y), (b, c, a), (a, y), (x, b)]),
# # BC -> AB
# all_c('AB', ['AB'], orders=[(x, y), (b, c, a), (a, y), (x, b)]
# , left=[x], right=[b]),
# all_c('AB', ['AB'], orders=[(x, y), (b, c, a), (a, y), (x, b)]
# , left=[a], right=[y]),
# # BC -> BC
# all_c('BC', ['AB'], orders=[(x, y), (b, c, a), (a, y), (x, b)]
# , left=[b], right=[c]),
# # # Debugging
# ('$_W', ['W'], [[x, '$', y]]),
])
| StarcoderdataPython |
3546564 | # Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions used by generate_graph.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import itertools
import numpy as np
def gen_is_edge_fn(bits):
"""Generate a boolean function for the edge connectivity.
Given a bitstring FEDCBA and a 4x4 matrix, the generated matrix is
[[0, A, B, D],
[0, 0, C, E],
[0, 0, 0, F],
[0, 0, 0, 0]]
Note that this function is agnostic to the actual matrix dimension due to
order in which elements are filled out (column-major, starting from least
significant bit). For example, the same FEDCBA bitstring (0-padded) on a 5x5
matrix is
[[0, A, B, D, 0],
[0, 0, C, E, 0],
[0, 0, 0, F, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
Args:
bits: integer which will be interpreted as a bit mask.
Returns:
vectorized function that returns True when an edge is present.
"""
def is_edge(x, y):
"""Is there an edge from x to y (0-indexed)?"""
if x >= y:
return 0
# Map x, y to index into bit string
index = x + (y * (y - 1) // 2)
return (bits >> index) % 2 == 1
return np.vectorize(is_edge)
def is_full_dag(matrix):
"""Full DAG == all vertices on a path from vert 0 to (V-1).
i.e. no disconnected or "hanging" vertices.
It is sufficient to check for:
1) no rows of 0 except for row V-1 (only output vertex has no out-edges)
2) no cols of 0 except for col 0 (only input vertex has no in-edges)
Args:
matrix: V x V upper-triangular adjacency matrix
Returns:
True if the there are no dangling vertices.
"""
shape = np.shape(matrix)
rows = matrix[:shape[0] - 1, :] == 0
rows = np.all(rows, axis=1) # Any row with all 0 will be True
rows_bad = np.any(rows)
cols = matrix[:, 1:] == 0
cols = np.all(cols, axis=0) # Any col with all 0 will be True
cols_bad = np.any(cols)
return (not rows_bad) and (not cols_bad)
def num_edges(matrix):
"""Computes number of edges in adjacency matrix."""
return np.sum(matrix)
def hash_module(matrix, labeling):
"""Computes a graph-invariance MD5 hash of the matrix and label pair.
Args:
matrix: np.ndarray square upper-triangular adjacency matrix.
labeling: list of int labels of length equal to both dimensions of
matrix.
Returns:
MD5 hash of the matrix and labeling.
"""
vertices = np.shape(matrix)[0]
in_edges = np.sum(matrix, axis=0).tolist()
out_edges = np.sum(matrix, axis=1).tolist()
assert len(in_edges) == len(out_edges) == len(labeling)
hashes = list(zip(out_edges, in_edges, labeling))
hashes = [hashlib.md5(str(h).encode('utf-8')).hexdigest() for h in hashes]
# Computing this up to the diameter is probably sufficient but since the
# operation is fast, it is okay to repeat more times.
for _ in range(vertices):
new_hashes = []
for v in range(vertices):
in_neighbors = [hashes[w] for w in range(vertices) if matrix[w, v]]
out_neighbors = [hashes[w] for w in range(vertices) if matrix[v, w]]
new_hashes.append(hashlib.md5(
(''.join(sorted(in_neighbors)) + '|' +
''.join(sorted(out_neighbors)) + '|' +
hashes[v]).encode('utf-8')).hexdigest())
hashes = new_hashes
fingerprint = hashlib.md5(str(sorted(hashes)).encode('utf-8')).hexdigest()
return fingerprint
def permute_graph(graph, label, permutation):
"""Permutes the graph and labels based on permutation.
Args:
graph: np.ndarray adjacency matrix.
label: list of labels of same length as graph dimensions.
permutation: a permutation list of ints of same length as graph dimensions.
Returns:
np.ndarray where vertex permutation[v] is vertex v from the original graph
"""
# vertex permutation[v] in new graph is vertex v in the old graph
forward_perm = zip(permutation, list(range(len(permutation))))
inverse_perm = [x[1] for x in sorted(forward_perm)]
edge_fn = lambda x, y: graph[inverse_perm[x], inverse_perm[y]] == 1
new_matrix = np.fromfunction(np.vectorize(edge_fn),
(len(label), len(label)),
dtype=np.int8)
new_label = [label[inverse_perm[i]] for i in range(len(label))]
return new_matrix, new_label
def is_isomorphic(graph1, graph2):
"""Exhaustively checks if 2 graphs are isomorphic."""
matrix1, label1 = np.array(graph1[0]), graph1[1]
matrix2, label2 = np.array(graph2[0]), graph2[1]
assert np.shape(matrix1) == np.shape(matrix2)
assert len(label1) == len(label2)
vertices = np.shape(matrix1)[0]
# Note: input and output in our constrained graphs always map to themselves
# but this script does not enforce that.
for perm in itertools.permutations(range(0, vertices)):
pmatrix1, plabel1 = permute_graph(matrix1, label1, perm)
if np.array_equal(pmatrix1, matrix2) and plabel1 == label2:
return True
return False
| StarcoderdataPython |
8148501 | import serial
import json
from pynput.keyboard import Key, Listener
ser = serial.Serial("COM26", 115200, timeout=1)
print("****** Gardening / Agricultural Robot *******")
print("Controls")
print("w => Froward")
print("s => Backward")
print("d => Right")
print("a => Left")
print("e => Face Forward")
print("q => Stop")
print("x => Dig Hole")
print("z => Drop Seed")
print("esc key => close")
def on_press(key):
key_str = str(key)
if key_str == "'f'":
with open('data.csv', 'a') as writeFile:
ser.write(b'f')
y = json.loads(ser.readline().decode("utf-8")[3:-4])
data = str(y['val'])+","+input("Enter crop name ")+"\n"
writeFile.write(data)
print("Soil moisture level: "+str(y["val"]))
if key_str == "'w'":
ser.write(b'w')
if key_str == "'s'":
ser.write(b's')
if key_str == "'a'":
ser.write(b'a')
if key_str == "'d'":
ser.write(b'd')
if key_str == "'q'":
ser.write(b'q')
if key_str == "'e'":
ser.write(b'e')
if key == Key.esc:
writeFile.close()
return False
with Listener(on_press=on_press) as listener:
listener.join() | StarcoderdataPython |
9737270 | <filename>Face_Enhancement/data/base_dataset.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import random
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def get_params(preprocess_mode, load_size, crop_size, size):
w, h = size
new_h = h
new_w = w
if preprocess_mode == "resize_and_crop":
new_h = new_w = load_size
elif preprocess_mode == "scale_width_and_crop":
new_w = load_size
new_h = load_size * h // w
elif preprocess_mode == "scale_shortside_and_crop":
ss, ls = min(w, h), max(w, h) # shortside and longside
width_is_shorter = w == ss
ls = int(load_size * ls / ss)
new_w, new_h = (ss, ls) if width_is_shorter else (ls, ss)
x = random.randint(0, np.maximum(0, new_w - crop_size))
y = random.randint(0, np.maximum(0, new_h - crop_size))
flip = random.random() > 0.5
return {"crop_pos": (x, y), "flip": flip}
def get_transform(preprocess_mode, load_size, crop_size, aspect_ratio, no_flip, isTrain, params,
method=Image.BICUBIC, normalize=True, toTensor=True):
transform_list = []
if "resize" in preprocess_mode:
osize = [load_size, load_size]
transform_list.append(transforms.Resize(osize, interpolation=method))
elif "scale_width" in preprocess_mode:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, load_size, method)))
elif "scale_shortside" in preprocess_mode:
transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, load_size, method)))
if "crop" in preprocess_mode:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params["crop_pos"], opt.crop_size)))
if preprocess_mode == "none":
base = 32
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
if preprocess_mode == "fixed":
w = crop_size
h = round(crop_size / aspect_ratio)
transform_list.append(transforms.Lambda(lambda img: __resize(img, w, h, method)))
if isTrain and not no_flip:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params["flip"])))
if toTensor:
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def normalize():
return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def __resize(img, w, h, method=Image.BICUBIC):
return img.resize((w, h), method)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
def __scale_width(img, target_width, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_width:
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), method)
def __scale_shortside(img, target_width, method=Image.BICUBIC):
ow, oh = img.size
ss, ls = min(ow, oh), max(ow, oh) # shortside and longside
width_is_shorter = ow == ss
if ss == target_width:
return img
ls = int(target_width * ls / ss)
nw, nh = (ss, ls) if width_is_shorter else (ls, ss)
return img.resize((nw, nh), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
return img.crop((x1, y1, x1 + tw, y1 + th))
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
| StarcoderdataPython |
11229477 | import re
import setuptools
from os.path import join
with open("README.md", "r") as fh:
long_description = fh.read()
with open(join('pleroma_bot', '__init__.py')) as f:
line = next(l for l in f if l.startswith('__version__'))
version = re.match('__version__ = [\'"]([^\'"]+)[\'"]', line).group(1)
setuptools.setup(
name="pleroma-bot",
version=version,
author="<NAME>",
author_email='<EMAIL>',
description="Mirror one or multiple Twitter accounts in Pleroma/Mastodon",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/robertoszek/pleroma-bot",
project_urls={
"Documentation": "https://robertoszek.github.io/pleroma-bot",
},
packages=['pleroma_bot'],
package_data={'pleroma_bot': ['locale/*/*/*.mo']},
include_package_data=True,
classifiers=[
'Environment :: Console',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': ['pleroma-bot=pleroma_bot.cli:main']
},
python_requires='>=3.6',
install_requires=[
'requests>=2.24.0',
'PyYAML>=5.3.1',
'requests-oauthlib>=1.3.0'
],
extras_require={
'lint': [
'flake8',
'flake8-quotes',
],
'test': [
'tox',
'pytest',
'requests-mock',
'pytest-cov',
'python-magic-bin ; platform_system=="Windows"'
],
}
)
| StarcoderdataPython |
3523118 | from . import bzip2, gzip
| StarcoderdataPython |
6671010 | from __future__ import absolute_import, print_function
# checks
try:
import tensorflow
del tensorflow
except ModuleNotFoundError as e:
from six import raise_from
raise_from(RuntimeError('Please install TensorFlow: https://www.tensorflow.org/install/'), e)
import tensorflow, sys
from ..utils.tf import keras_import, IS_TF_1
if IS_TF_1:
try:
import keras
del keras
except ModuleNotFoundError as e:
if e.name in {'theano','cntk'}:
from six import raise_from
raise_from(RuntimeError(
"Keras is configured to use the '%s' backend, which is not installed. "
"Please change it to use 'tensorflow' instead: "
"https://keras.io/getting-started/faq/#where-is-the-keras-configuration-file-stored" % e.name
), e)
else:
raise e
K = keras_import('backend')
if K.backend() != 'tensorflow':
raise NotImplementedError(
"Keras is configured to use the '%s' backend, which is currently not supported. "
"Please configure Keras to use 'tensorflow' instead: "
"https://keras.io/getting-started/faq/#where-is-the-keras-configuration-file-stored" % K.backend()
)
# else:
# print("Found TensorFlow 2 (version %s), which might cause issues with CSBDeep." % tensorflow.__version__, file=sys.stderr)
K = keras_import('backend')
if K.image_data_format() != 'channels_last':
raise NotImplementedError(
"Keras is configured to use the '%s' image data format, which is currently not supported. "
"Please change it to use 'channels_last' instead: "
"https://keras.io/getting-started/faq/#where-is-the-keras-configuration-file-stored" % K.image_data_format()
)
del tensorflow, sys, keras_import, IS_TF_1, K
# imports
from .config import BaseConfig, Config
from .base_model import BaseModel
from .care_standard import CARE
from .care_hdf5 import HDF5CARE
from .care_upsampling import UpsamplingCARE
from .care_isotropic import IsotropicCARE
from .care_projection import ProjectionConfig, ProjectionCARE
from .pretrained import register_model, register_aliases, clear_models_and_aliases
| StarcoderdataPython |
3356596 | <gh_stars>0
from datetime import datetime
import json
import os
import sys
import uuid
SEPARATOR='###|'
HERE=os.path.dirname(os.path.realpath(__file__))
OUTPUT=os.path.join(HERE, "dist")
# Mask to parse:
# Friday, September 22, 2017 at 1:00:52 AM
DATETIME_IMPORT_MASK='%A, %B %d, %Y at %I:%M:%S %p'
DATETIME_SAVE_MASK='%Y-%m-%dT%H:%M:%SZ'
def get_datetime_string(string):
dt_obj = datetime.strptime(string, DATETIME_IMPORT_MASK)
return dt_obj.strftime(DATETIME_SAVE_MASK)
def get_bool(string):
return True if string == "true" else False
filename = sys.argv[1]
file_exists = os.path.exists(filename)
if not file_exists:
print('ERROR: file not exists')
with open(filename, mode='r') as file_raw:
file = file_raw.read()
notes = file.split(SEPARATOR)
for note in notes:
if note is None or note == '':
continue
properties = note.split('\n')
folder_name = properties[0]
note_name = properties[1]
note_creation_date = properties[2]
note_modification_date = properties[3]
note_password_protected = get_bool(properties[4])
note_plaintext = "\n".join(properties[5:-1])
note_id = str(uuid.uuid4())
note_json = {
"id": note_id,
"folder": folder_name,
"creation_date": get_datetime_string(note_creation_date),
"modification_date": get_datetime_string(note_modification_date),
"body": note_plaintext
}
if note_password_protected:
print("INFO: Note '{}' in folder '{}' is encrypted and cannot be exported".format(note_name, folder_name))
file_export = os.path.join(OUTPUT, "{}.json".format(note_id))
if os.path.exists(file_export):
print('ERROR: UUID already in use')
with open(file_export, mode='w') as f_e:
f_e.seek(0)
f_e.write(json.dumps(note_json, indent=4, separators=(',', ': '), sort_keys=True))
print("EXPORTED: Folder '{}', note '{}'".format(folder_name, note_name))
| StarcoderdataPython |
59935 | import subprocess
import PIL
from PIL import Image
import numpy as np
import os
import shutil
import re
script_path = os.path.dirname(os.path.realpath(__file__))
temp_img_dir_path = os.path.join(script_path, 'temp_imgs')
def arr_to_mp4(arr, output_path, framerate=30, resolution_str=None, temp_dir=temp_img_dir_path):
'''
arr shape should be (frames, height, width, 3)
'''
use_res = resolution_str != None
if use_res:
match = re.match(r'\d+x\d+', resolution_str)
if not match:
use_res = False
try:
os.mkdir(temp_dir)
except Exception as e:
print(e)
arr = arr.astype('uint8')
for i in range(arr.shape[0]):
imgarr = arr[i]
img = Image.fromarray(imgarr)
img.save(os.path.join(temp_dir, str(i)+'.png'))
cmd = ''
if use_res:
cmd = 'ffmpeg -framerate {0} -i {1}/%d.png -pix_fmt yuv420p -s {2} {3}'.format(framerate, temp_dir, resolution_str, output_path)
else:
cmd = 'ffmpeg -framerate {0} -i {1}/%d.png -pix_fmt yuv420p {2}'.format(framerate, temp_dir, output_path)
subprocess.call(cmd.split(' '))
shutil.rmtree(temp_dir)
if __name__ == "__main__":
arr = np.random.randint(0, 255, (120, 256, 256, 3), dtype="uint8")
arr_to_mp4(arr, 'out1.mp4', resolution_str="256x256")
# produces out.mp4 which is 4 seconds long of image noise
| StarcoderdataPython |
3481928 | print("How old are you?", end = ' ')
age = input()
print("How tall are you?", end = ' ')
height = input()
print("How much do you weigh?", end = ' ')
weight = input()
print(f"So, you're {age} years old, {height} meters tall and {weight} kilograms heavy.")
| StarcoderdataPython |
1974856 | <reponame>DVS-Lab/dmn-parcellation
from neurosynth.analysis.meta import MetaAnalysis
import nibabel as nib
import numpy as np
from copy import deepcopy
def mask_level(img, level):
""" Mask a specific level in a nifti image """
img = deepcopy(img)
data = img.get_data()
data[:] = np.round(data)
data[data != level] = 0
data[data == level] = 1
return img
def coactivation_contrast(dataset, infile, regions=None, target_thresh=0.05,
other_thresh=0.01, q=0.01, contrast=True):
""" Performs meta-analyses to contrast co-activation in a target region vs
co-activation of other regions. Contrasts every region in "regions" vs
the other regions in "regions"
dataset: Neurosynth dataset
infile: Nifti file with masks as levels
regions: which regions in image to contrast
target_thresh: activaton threshold for retrieving ids for target region
other_thresh: activation threshold for ids in other regions
- This should be proportionally lower than target thresh since
multiple regions are being contrasted to one, and thus should de-weighed
stat: which image to return from meta-analyis. Default is usually correct
returns: a list of nifti images for each contrast performed of length = len(regions) """
if isinstance(infile, str):
image = nib.load(infile)
else:
image = infile
affine = image.get_affine()
stat="pFgA_z_FDR_%s" % str(q)
if regions == None:
regions = np.arange(1, image.get_data().max() + 1)
meta_analyses = []
for reg in regions:
if contrast is True:
other_ids = [dataset.get_studies(mask=mask_level(image, a), activation_threshold=other_thresh)
for a in regions if a != reg]
joined_ids = set()
for ids in other_ids:
joined_ids = joined_ids | set(ids)
joined_ids = list(joined_ids)
else:
joined_ids = None
reg_ids = dataset.get_studies(mask=mask_level(image, reg), activation_threshold=target_thresh)
meta_analyses.append(MetaAnalysis(dataset, reg_ids, ids2=joined_ids, q=q))
return [nib.nifti1.Nifti1Image(dataset.masker.unmask(
ma.images[stat]), affine, dataset.masker.get_header()) for ma in meta_analyses]
| StarcoderdataPython |
3282108 | <gh_stars>1-10
import codecs
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
REQUIREMENTS = [
'schematics',
'protobuf',
]
TEST_REQUIREMENTS = [
'flake8',
'mock',
'tox',
'pytest',
'pytest-cache',
'pytest-cover',
'pytest-sugar',
'pytest-xdist',
]
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
README = f.read()
setup(name='proto_schematics',
version='0.1.0',
description='Converts protobuf generated messages to Python friendly schematics models.',
long_description=README,
license='Apache License (2.0)',
author='<NAME>.',
author_email='<EMAIL>',
url='https://github.com/loggi/proto-schematics',
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=[
'loggi',
'schematics',
'protobuf',
'grpc',
],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
test_suite='tests',
tests_require=TEST_REQUIREMENTS+REQUIREMENTS)
| StarcoderdataPython |
12824085 | def orbits(inp):
tree = []
index = {'COM': tree}
for a, b in inp:
if b not in index:
index[b] = []
if a not in index:
index[a] = []
index[a].append(index[b])
return tree, index
def inp():
with open('code/6.txt') as f:
raw = f.read()
return [i.split(')') for i in raw.split('\n')]
def count(tree):
done = {}
def recurse(tree, depth):
if not tree:
return depth
name = str((depth, tree))
if name in done:
return done[name]
val = sum(recurse(i, depth+1) for i in tree) + depth
done[name] = val
return val
return recurse(tree, 0)
def distance(tree, index, a='YOU', b='SAN'):
parents = {}
def get_parents(tree):
for i in tree:
parents[id(i)] = tree
get_parents(i)
def find(obj, path=[tree], tree=tree):
if tree is obj:
return path
path = list(path)
path.append(tree)
for i in tree:
found = find(obj, path, i)
if found:
return found
return None
apath = find(index[a])
bpath = find(index[b])
common = []
for ap in apath:
for bp in bpath:
if ap is bp:
common.append(ap)
break
nca = common[-1]
def depth(find, tree, cur=0):
if tree is find:
return cur
cur += 1
for i in tree:
found = depth(find, i, cur)
if found:
return found
return None
return depth(index[a], nca) + depth(index[b], nca) - 2
def part_a():
return count(orbits(inp())[0])
def part_b():
return distance(*orbits(inp()))
if __name__ == '__main__':
print('6A:', part_a())
print('6B:', part_b())
| StarcoderdataPython |
11209789 | <reponame>ponyatov/w
# Generated by Django 3.2 on 2021-04-19 06:48
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('map', '0008_auto_20210419_1016'),
]
operations = [
migrations.RemoveField(
model_name='loc',
name='area',
),
migrations.AddField(
model_name='loc',
name='bounds',
field=django.contrib.gis.db.models.fields.PolygonField(
blank=True, null=True, srid=4326, verbose_name='границы территории'),
),
migrations.AlterField(
model_name='loc',
name='loc',
field=django.contrib.gis.db.models.fields.PointField(
blank=True, null=True, srid=4326, verbose_name='географический центр'),
),
]
| StarcoderdataPython |
33171 | from django.shortcuts import render, redirect
from django.views.generic import UpdateView, DeleteView
from .models import Storage
from django_tables2 import RequestConfig
from .tables import StorageTable
from django.contrib.auth.decorators import login_required
from .forms import StorageForm, QuestionForm
def index(request):
table = Storage.objects.reverse()[0:3]
return render(request, '../templates/index.html', {'storages': table})
def about(request):
if request.method == "POST":
form = QuestionForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
return '/about'
else:
form = QuestionForm()
return render(request, '../templates/about.html', {'form': form})
def current_storages(request):
table = StorageTable(Storage.objects.all())
RequestConfig(request).configure(table)
return render(request, '../templates/current_storages.html', {'table': table})
@login_required(login_url='/accounts/login')
def create_storage(request):
if request.method == "POST":
form = StorageForm(request.POST, request.FILES)
if form.is_valid():
model_instance = form.save(commit=False)
model_instance.user_id = request.user
model_instance.save()
return redirect('/current_storages')
else:
form = StorageForm()
return render(request, '../templates/create_storage.html', {'form': form})
def storage(request, storage_id):
storage_request = Storage.objects.get(pk=storage_id)
return render(request, '../templates/storage.html', {'storage': storage_request})
class EditStorage(UpdateView):
model = Storage
form_class = StorageForm
template_name = 'storage_update_form.html'
success_url = '/current_storages'
class DeleteStorage(DeleteView):
model = Storage
template_name = 'storage_confirm_delete.html'
success_url = '/current_storages'
def show_image(request, storage_id):
photo_request = Storage.objects.get(pk=storage_id)
return render(request, '../templates/show_image.html', {'storage': photo_request})
def logout(request):
return render(request, '../templates/registration/logout.html', {})
| StarcoderdataPython |
4891174 | <gh_stars>1-10
from fforma.fforma import *
| StarcoderdataPython |
11213405 | import pytest
from fastai import *
from fastai.vision import *
@pytest.fixture(scope="module")
def path(request):
path = untar_data(URLs.MNIST_TINY)
d = defaults.device
defaults.device = torch.device('cpu')
def _final(): defaults.device = d
request.addfinalizer(_final)
return path
def test_multi_iter_broken(path):
data = image_data_from_folder(path, ds_tfms=(rand_pad(2, 28), []))
for i in range(5): x,y = next(iter(data.train_dl))
def test_multi_iter(path):
data = image_data_from_folder(path, ds_tfms=(rand_pad(2, 28), []))
data.normalize()
for i in range(5): x,y = data.train_dl.one_batch()
def test_clean_tear_down(path):
docstr = "test DataLoader iter doesn't get stuck"
data = image_data_from_folder(path, ds_tfms=(rand_pad(2, 28), []))
data.normalize()
data = image_data_from_folder(path, ds_tfms=(rand_pad(2, 28), []))
data.normalize()
def test_normalize(path):
data = image_data_from_folder(path, ds_tfms=(rand_pad(2, 28), []))
x,y = data.train_dl.one_batch()
m,s = x.mean(),x.std()
data.normalize()
x,y = data.train_dl.one_batch()
assert abs(x.mean()) < abs(m)
assert abs(x.std()-1) < abs(m-1)
with pytest.raises(Exception): data.normalize()
| StarcoderdataPython |
73747 | <filename>miri/datamodels/operations.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Arithmetic and binary operator functions for the MIRI data model.
:Reference:
The STScI jwst.datamodels documentation.
https://jwst-pipeline.readthedocs.io/en/latest/jwst/datamodels/index.html
:History:
24 Jan 2011: Created
22 Feb 2013: Zero dimensional arrays cannot be masked.
08 Oct 2013: _shrink_dq function added, to allow masking when a DQ array
is larger than a data array.
31 Oct 2013: Improved memory management by starting a mathematical
operation with an empty object rather than a copy.
Corrected the formula used by _combine_errors_divisive.
11 Dec 2013: Mask an array only when its data quality value contains
an odd number. Corrected a typo in _generate_mask().
21 May 2014: Make sure data quality arrays are integer before using
bitwise operations.
25 Sep 2014: reserved_flags replaced by master_flags.
30 Nov 2015: Tightened up a few data type conversions, to ensure that
bit masks have the same data type before being combined.
28 Jan 2016: Changed HasMask to use the .dq attribute instead of .mask
(which is now defined as an alias).
23 Mar 2016: Documentation correction.
06 Apr 2016: Replaced throughout the use of _real_cls() by __class__(),
following changes to jwst.datamodels.model_base.DataModel.
04 May 2016: noerr option added to HasDataErrAndDq.
12 Jul 2017: set_data_fill and set_err_fill options added to HasDataErrAndDq.
27 Jun 2018: Added HasDataErrAndGroups class to be used with ramp data.
12 Mar 2019: Removed use of astropy.extern.six (since Python 2 no longer used).
12 Feb 2020: Added _check_broadcastable() methods.
02 Dec 2020: Update import of jwst base model class to JwstDataModel.
28 Sep 2021: Replaced np.bool with np.bool_
@author: <NAME> (UKATC), <NAME> (UKATC)
"""
import sys
import numpy as np
import numpy.ma as ma
from miri.datamodels.dqflags import master_flags, combine_quality
# Import the STScI image model and utilities
import jwst.datamodels.util as jmutil
from jwst.datamodels import JwstDataModel
# List all classes and global functions here.
__all__ = ['are_broadcastable', 'HasMask', 'HasData', 'HasDataErrAndDq']
def are_broadcastable( *shapes ):
"""
Check whether an arbitrary list of array shapes are broadcastable.
:Parameters:
*shapes: tuple or list
A set of array shapes.
:Returns:
broadcastable: bool
True if all the shapes are broadcastable.
False if they are not broadcastable.
"""
if len(shapes) < 2:
# A single shape is always broadcastable against itself.
return True
else:
# Extract the dimensions and check they are either
# equal to each other or equal to 1.
for dim in zip(*[shape[::-1] for shape in shapes]):
if len(set(dim).union({1})) <= 2:
# Dimensions match or are 1. Try the next one.
pass
else:
# Dimensions do not match. Not broadcastable.
return False
# All dimensions are broadcastable.
return True
class HasMask(object):
"""
An abstract class which provides the binary operations relevant for
data models containing a primary mask array.
The primary mask array is assumed to be stored in an attribute
called dq.
"""
def __init__(self, dq):
if dq is not None:
self.dq = dq
# "mask" is an alias for the "dq" attribute.
@property
def mask(self):
if hasattr(self, 'dq'):
return self.dq
else:
return None
@mask.setter
def mask(self, dq):
self.dq = dq
def _check_broadcastable(self):
"""
Helper function which raises an exception if the
linked data arrays are not broadcastable.
"""
# A single data array is always broadcastable
pass
def _check_for_mask(self):
"""
Helper function which raises an exception if the object
does not contain a valid data array.
"""
if not self._isvalid(self.dq):
strg = "%s object does not contain a valid mask array" % \
self.__class__.__name__
raise AttributeError(strg)
def _isvalid(self, data):
"""
Helper function to verify that a given array, tuple or list is
not empty and has valid content.
"""
if data is None:
return False
elif isinstance(data, (list,tuple)):
if len(data) <= 0:
return False
else:
return True
elif isinstance(data, (np.ndarray)):
if data.size <= 0:
return False
else:
return True
elif not data:
return False
else:
return True
def __or__(self, other):
"""
Bitwise OR operation between this mask and another
data product or scalar.
"""
# Check this object is capable of binary operation.
self._check_for_mask()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being operated with.
# This is only a sensible operation when the scalar
# quantity is converted to an integer.
newobject.dq = self.dq | int(other)
elif isinstance(other, (np.ndarray,list,tuple)):
# A data array is being combined with this product. This should
# work provided the two arrays are broadcastable.
newobject.dq = self.dq | np.asarray(other, dtype=self.dq.dtype)
elif isinstance(other, JwstDataModel) and hasattr(other, 'dq'):
# Two mask data products are being combined together.
newobject.dq = self.dq | other.dq
else:
strg = "Cannot bitwise combine " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __xor__(self, other):
"""
Bitwise EXCLUSIVE OR operation between this mask and another
data product or scalar.
"""
# Check this object is capable of binary operation.
self._check_for_mask()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being operated with.
# This is only a sensible operation when the scalar
# quantity is converted to an integer.
newobject.dq = self.dq ^ int(other)
elif isinstance(other, (np.ndarray,list,tuple)):
# A data array is being combined with this product. This should
# work provided the two arrays are broadcastable.
newobject.dq = self.dq ^ np.asarray(other, dtype=self.dq.dtype)
elif isinstance(other, JwstDataModel) and \
hasattr(other, 'dq') and self._isvalid(other.dq):
# Two mask data products are being combined together.
newobject.dq = self.dq ^ other.dq
else:
strg = "Cannot bitwise combine " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __and__(self, other):
"""
Bitwise AND operation between this mask and another
data product or scalar.
"""
# Check this object is capable of binary operation.
self._check_for_mask()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being operated with.
# This is only a sensible operation when the scalar
# quantity is converted to an integer.
newobject.dq = self.dq & int(other)
elif isinstance(other, (np.ndarray,list,tuple)):
# A data array is being combined with this product. This should
# work provided the two arrays are broadcastable.
newobject.dq = self.dq & np.asarray(other, dtype=self.dq.dtype)
elif isinstance(other, JwstDataModel) and \
hasattr(other, 'dq') and self._isvalid(other.dq):
# Two mask data products are being combined together.
newobject.dq = self.dq & other.dq
else:
strg = "Cannot bitwise combine " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
class HasData(object):
"""
An abstract class which provides the arithmetic operations
relevant for data models containing a primary data array.
The primary data array is assumed to be stored in an attribute
called data.
"""
def __init__(self, data):
if data is not None:
self.data = data
def _check_broadcastable(self):
"""
Helper function which raises an exception if the
linked data arrays are not broadcastable.
"""
# A single data array is always broadcastable
pass
def _check_for_data(self):
"""
Helper function which raises an exception if the object
does not contain a valid data array.
"""
if not self._isvalid(self.data):
strg = "%s object does not contain a valid data array" % \
self.__class__.__name__
raise AttributeError(strg)
def _isvalid(self, data):
"""
Helper function to verify that a given array, tuple or list is
not empty and has valid content.
"""
if data is None:
return False
elif isinstance(data, (list,tuple)):
if len(data) <= 0:
return False
else:
return True
elif isinstance(data, (ma.masked_array,np.ndarray)):
if data.size <= 0:
return False
else:
return True
elif not data:
return False
else:
return True
def __add__(self, other):
"""
Add a scalar, an array or another MiriMeasuredModel object to
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being added.
newobject.data = self.data + other
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being added to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data + np.asarray(other)
elif isinstance(other, JwstDataModel):
# Two data products are being added together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data + other.data
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot add " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __sub__(self, other):
"""
Subtract a scalar, an array or another MiriMeasuredModel object
from this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being subtracted.
newobject.data = self.data - other
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being subtracted to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data - np.asarray(other)
elif isinstance(other, JwstDataModel):
# Two data products are being subtracted together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data - other.data
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot subtract " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __mul__(self, other):
"""
Multiply this MiriMeasuredModel object by a scalar, an array or
another MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being multiplied.
newobject.data = self.data * other
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data * np.asarray(other)
elif isinstance(other, JwstDataModel):
# Two data products are being multiplied together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data * other.data
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot multiply " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __truediv__(self, other):
"""
Divide this MiriMeasuredModel object by a scalar, an array or
another MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being divided.
if np.abs(other) <= sys.float_info.epsilon:
strg = "%s: Divide by scalar zero!" % self.__class__.__name__
del newobject
raise ValueError(strg)
newobject.data = self.data / other
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
# NOTE: Any divide by zero operations will be trapped by numpy.
newobject.data = self.data / np.asarray(other)
elif isinstance(other, JwstDataModel):
# The data product is being divided by another. Ensure they
# both have a valid primary data array.
# NOTE: Any divide by zero operations will be trapped by numpy.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data / other.data
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot divide " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
# In Python 3, division is the same as true division.
def __div__(self, other):
return self.__truediv__(other)
class HasDataErrAndDq(HasData):
"""
An abstract class which provides the arithmetic operations and
masking functions relevant for data models containing a data array,
error array and data quality array.
The primary, error and quality arrays are assumed to be stored in
attributes called data, err and dq.
"""
def __init__(self, data, err, dq, noerr=False):
super(HasDataErrAndDq, self).__init__(data=data)
self._data_mask = None
self._data_fill = 0.0
self._data_fill_value = None
self.noerr = noerr
if not self.noerr:
if err is not None:
self.err = err
self._err_mask = None
self._err_fill = 'max'
self._err_fill_value = None
if dq is not None:
self.dq = dq
def _check_broadcastable(self):
"""
Helper function which raises an exception if the
linked data arrays are not broadcastable.
"""
if self._isvalid(self.data):
if hasattr(self, 'err') and self._isvalid(self.err) and \
hasattr(self, 'dq') and self._isvalid(self.dq):
if not are_broadcastable( self.data.shape, self.err.shape, self.dq.shape ):
strg = "%s object does not contain broadcastable data arrays." % \
self.__class__.__name__
strg += "\n\tdata.shape=%s, err.shape=%s and dq=shape=%s" % \
(str(self.data.shape), str(self.err.shape), str(self.dq.shape))
raise TypeError(strg)
elif hasattr(self, 'err') and self._isvalid(self.err):
if not are_broadcastable( self.data.shape, self.err.shape ):
strg = "%s object does not contain broadcastable data arrays." % \
self.__class__.__name__
strg += "\n\tdata.shape=%s and err.shape=%s" % \
(str(self.data.shape), str(self.err.shape))
raise TypeError(strg)
elif hasattr(self, 'dq') and self._isvalid(self.dq):
if not are_broadcastable( self.data.shape, self.dq.shape ):
strg = "%s object does not contain broadcastable data arrays." % \
self.__class__.__name__
strg += "\n\tdata.shape=%s, and dq=shape=%s" % \
(str(self.data.shape), str(self.dq.shape))
raise TypeError(strg)
def set_data_fill(self, data_fill):
"""
Set the data fill instruction to something other than the default
of 0.0.
:Parameters:
data_fill: str or number
An instruction for how to fill the missing values within
a masked array:
* 'min': Fill with the minimum value.
* 'max': Fill with the maximum value.
* 'mean': Fill with the mean value
* 'median': Fill with the median value
* '': Fill with the default numpy value.
* Any other value is assumed to be the fill value.
"""
self._data_fill = data_fill
def set_err_fill(self, err_fill):
"""
Set the error fill instruction to something other than the default
of 'max'.
:Parameters:
err_fill: str or number
An instruction for how to fill the missing values within
a masked array:
* 'min': Fill with the minimum value.
* 'max': Fill with the maximum value.
* 'mean': Fill with the mean value
* 'median': Fill with the median value
* '': Fill with the default numpy value.
* Any other value is assumed to be the fill value.
"""
self._err_fill = err_fill
def _shrink_dq(self, dqarray):
"""
Helper function which shrinks a data quality array along
its highest axis to generate a new array of smaller size.
For example, a 3-D array of shape (3 x 3 x 2) is shrunk to
a 2-D array of shape (3 x 3). Quality flags are combined
in a bitwise manner.
"""
# Ensure the input array is of unsigned integer type
dqarray = np.asarray(dqarray, dtype=np.uint)
# The new shape has the highest dimension removed
newshape = dqarray.shape[1:]
# Start with a DQ array full of zeros
newdq = np.zeros( newshape, dtype=np.uint)
# Split the data quality array along the highest
# axis into a list of pieces.
npieces = dqarray.shape[0]
for piece in np.split(dqarray, npieces, 0):
# Convert each piece into an N-1 dimensional array of integers.
# Each should be the same size and shape as the new DQ array.
npiece = np.asarray( np.squeeze( piece ), dtype=np.uint)
# Merge each new piece into the new DQ array with a bitwise OR
newdq |= npiece
# The result should be a new mask with reduced dimensionality
return newdq
def _generate_mask(self, data, dq, bitmask=1):
"""
Use the contents of the dq array to generate a numpy mask of the
same shape as the data array.
:Parameters:
data: numpy array
The data array to be masked
dq: numpy array
The data quality array to be used to generate the mask
bitmask: unsigned int
If specified, a mask for selecting particular bits
from the data quality values.
The default of 1 will match only bit zero.
None will match any non-zero data quality value.
:Returns:
mask: numpy mask
A mask which can be used with the data array.
"""
# print("+++ Generating mask from", data, "\nand", dq,
# "\nwith bitmask", bitmask)
# A mask can only be generated when both arrays exist and
# are not empty. The DATA array and DQ array must also be
# broadcastable.
if self._isvalid(data) and dq is not None:
# Ensure the data quality array is of unsigned integer type
# so bitwise operations are possible.
dq = np.asarray(dq, dtype=np.uint)
if data.ndim < dq.ndim and jmutil.can_broadcast(dq.shape, data.shape):
# The DQ array is larger than the array being masked.
# This is a special case.
# Shrink down the DQ array until the dimensions match.
shrunk_dq = self._shrink_dq(dq)
while (shrunk_dq.ndim > data.ndim):
shrunk_dq = self._shrink_dq(shrunk_dq)
# Start with a zero (False) mask and mask off (set to True)
# all the pixels indicated by the DQ array.
maskdq = np.zeros(data.shape, dtype=np.bool_)
if bitmask is None:
# None means all bits set.
bad = np.where(shrunk_dq != 0)
else:
bad = np.where((shrunk_dq & bitmask) != 0)
maskdq[bad] = True
return maskdq
elif data.size >= dq.size and jmutil.can_broadcast(data.shape, dq.shape):
# Broadcast the DQ array onto something the same shape
# as the data array.
datadq = np.zeros(data.shape, dtype=np.uint) + dq
# Start with a zero (False) mask and mask off (set to True)
# all the pixels indicated by the DQ array.
maskdq = np.zeros(data.shape, dtype=np.bool_)
if bitmask is None:
# None means all bits set.
bad = np.where(datadq != 0)
else:
bad = np.where((datadq & bitmask) != 0)
maskdq[bad] = True
return maskdq
else:
return ma.nomask # or None
else:
return ma.nomask # or None
def _generate_fill(self, data, fill_descr):
"""
Generate a fill value for a data array based on the masked array
plus a fill description.
:Parameters:
data: numpy array
The data array to be examined.
fill_descr: str or number
An instruction for how to fill the missing values within
a masked array:
* 'min': Fill with the minimum value.
* 'max': Fill with the maximum value.
* 'mean': Fill with the mean value
* 'median': Fill with the median value
* '': Fill with the default numpy value.
* Any other value is assumed to be the fill value.
:Returns:
fill_value: number
The fill value
"""
# The data array must exist and must not be empty.
if self._isvalid(data):
if isinstance(fill_descr, str):
if fill_descr == 'min':
# Use the minimum unmasked value as the fill value
fill_value = data.min()
elif fill_descr == 'max':
# Use the maximum unmasked value as the fill value
fill_value = data.max()
elif fill_descr == 'mean':
# Use the mean unmasked value as the fill value
fill_value = data.mean()
elif fill_descr == 'median':
# Use the median unmasked value as the fill value
fill_value = data.median()
else:
# Use the default numpy fill value
fill_value = None
else:
# Assume the fill description is a number or None
fill_value = fill_descr
else:
fill_value = None
return fill_value
def _mask_array(self, data, dq, fill_value=None):
"""
Return a masked version of the given array.
NOTE: This function might introduce small rounding errors into
floating point data, so a value displayed as 3.00000005 before
masking might display as 3.000000048 afterwards. The difference
is insignificant, but it looks worse when displayed.
:Parameters:
data: numpy array
The data array to be masked
dq: numpy array
The data quality array to be used to generate the mask
fill_value: number
If specified, the value used to fill missing entries in the
data array. If not specified, a numpy default value will be
used.
:Returns:
masked_data: numpy masked array
A masked version of the original data array.
"""
maskdq = self._generate_mask(data, dq)
return ma.array(data, mask=maskdq, fill_value=fill_value)
def _combine_errors_maximum(self, error1, error2):
"""
Helper function to combine two error arrays and return the maximum.
Can be used when two data arrays are combined with a min or max
function, or are combined by resampling.
NOTE: This function is valid only when both error arrays are sampling
the same error source and you prefer to believe the most pessimistic
estimate. Use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
newerr = np.maximum(error1, error2)
else:
newerr = None
return newerr
def _combine_errors_quadrature(self, error1, error2):
"""
Helper function to combine two error arrays in quadrature.
Can be used when two data arrays are added or subtracted.
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
# NOTE: These operations might cause an overflow
# for some data types.
err1sq = np.square(error1)
err2sq = np.square(error2)
sumsq = err1sq + err2sq
newerr = np.sqrt(sumsq)
else:
newerr = None
return newerr
def _combine_errors_multiplicative(self, error1, error2, data1, data2):
"""
Helper function to combine two error arrays in quadrature,
where each error array is weighted by a sensitivity
coefficient.
This functions can be used when two data arrays are multiplied,
so the sensitivity coefficient is proportional to the other
array's measurement data.
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
if data1 is not None and data2 is not None:
# NOTE: These operations might cause an overflow
# for some data types.
data1sq = np.square(data1)
data2sq = np.square(data2)
err1sq = np.square(error1)
err2sq = np.square(error2)
sumsq = (data2sq * err1sq) + (data1sq * err2sq)
#newerr = np.sqrt(sumsq) / (data1sq+data2sq) ???
newerr = np.sqrt(sumsq)
else:
# Without the data arrays the weighting is unknown.
return self._combine_errors_quadrature(error1, error2)
else:
newerr = None
return newerr
def _combine_errors_divisive(self, error1, error2, data1, data2):
"""
Helper function to combine two error arrays in quadrature,
where each error array is weighted by a sensitivity
coefficient.
This functions is used when one data array is divided by
another, so the sensitivity coefficient for the first array
is proportional to the inverse of the second but the
sensitivity coefficient for the second array is proportional
to the first.
CHECK THE MATHS
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
if data1 is not None and data2 is not None:
# NOTE: These operations might cause an overflow
# for some data types.
data1sq = np.square(data1)
data2sq = np.square(data2)
# NOTE: The errors will blow up if any of the data2sq values
# are close to zero. There might be a divide by zero.
err1sq = np.square(error1)
err2sq = np.square(error2)
sumsq = (err1sq / data2sq) + \
((err2sq * data1sq) / (data2sq * data2sq))
# sumsq = (data2weight * err1sq) + (data1sq * err2sq)
# Comment by <NAME>:
# Shouldn't the error propagation according to Gauss be
# sqrt(err1sq*sci2weight + err2sq*sci1sq/(sci2sq*sci2sq))
# since the partial derivation of a/b on b is -a/(b*b)
newerr = np.sqrt(sumsq)
else:
# Without the data arrays the weighting is unknown.
return self._combine_errors_quadrature(error1, error2)
else:
newerr = None
return newerr
def _combine_quality(self, dq1, dq2):
"""
Helper function to combine the quality arrays of two
MiriMeasuredModel objects. Any point flagged as bad in
either of the two products is flagged as bad in the
result.
"""
return combine_quality(dq1, dq2)
def __add__(self, other):
"""
Add a scalar, an array or another JwstDataModel object to
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being added. Add to the SCI array but
# leave the ERR and DQ arrays as they are.
newobject.data = self.data + other
if not self.noerr:
newobject.err = self.err
newobject.dq = self.dq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being added to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data + np.asarray(other)
# Adding a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.dq = self.dq
elif isinstance(other, JwstDataModel):
# Two data products are being added together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data + other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = \
self._combine_errors_quadrature(self.err,
other.err)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'dq') and self._isvalid(other.dq):
newobject.dq = self._combine_quality(self.dq, other.dq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot add " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __sub__(self, other):
"""
Subtract a scalar, an array or another JwstDataModel object from
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being subtracted. Subtract from the SCI
# array but leave the ERR and DQ arrays as they are.
newobject.data = self.data - other
if not self.noerr:
newobject.err = self.err
newobject.dq = self.dq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being subtracted to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data - np.asarray(other)
# Adding a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.dq = self.dq
elif isinstance(other, JwstDataModel):
# Two data products are being subtracted. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data - other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = \
self._combine_errors_quadrature(self.err, other.err)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'dq') and self._isvalid(other.dq):
newobject.dq = self._combine_quality(self.dq, other.dq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot subtract " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __mul__(self, other):
"""
Multiply this MiriMeasuredModel object by a scalar, an array or
another JwstDataModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being multiplied. Multiply the SCI and ERR
# arrays but leave the DQ array as it is.
newobject.data = self.data * other
if not self.noerr:
newobject.err = self.err * other
newobject.dq = self.dq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data * np.asarray(other)
# Multiplying a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.dq = self.dq
elif isinstance(other, JwstDataModel):
# Two data products are being multiplied together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data * other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = self._combine_errors_multiplicative( \
self.err, other.err, self.data,
other.data)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'dq') and self._isvalid(other.dq):
newobject.dq = self._combine_quality(self.dq, other.dq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot multiply " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __truediv__(self, other):
"""
Divide this MiriMeasuredModel object by a scalar, an array or
another JwstDataModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being divided. Divide the SCI and ERR
# arrays but leave the DQ array as it is.
# Trap a divide by zero..
if np.abs(other) <= sys.float_info.epsilon:
strg = "%s: Divide by scalar zero!" % self.__class__.__name__
del newobject
raise ValueError(strg)
newobject.data = self.data / other
if not self.noerr:
newobject.err = self.err / other
newobject.dq = self.dq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
# NOTE: Any divide by zero operations will be trapped by numpy.
newobject.data = self.data / np.asarray(other)
# Dividing by a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.dq = self.dq
elif isinstance(other, JwstDataModel):
# The data product is being divided by another. Ensure they
# both have a valid primary data array.
# NOTE: Any divide by zero operations will be trapped by numpy.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data / other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = self._combine_errors_divisive( \
self.err, other.err, self.data,
other.data)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'dq') and self._isvalid(other.dq):
newobject.dq = self._combine_quality(self.dq, other.dq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot divide " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
# From Python 3, division is the same as true division.
def __div__(self, other):
return self.__truediv__(other)
@property
def data_masked(self):
# Generate the masked data on the fly. This ensures the
# masking is always up to date with the latest dq array.
# TODO: Can this result be cached and the cache invalidated
# when either the data or dq arrays change?
if self.data is not None and self.data.ndim > 0 and self.dq is not None:
if np.all(self.dq == 0):
# All data good.
return self.data
else:
self._data_mask = self._generate_mask(self.data, self.dq)
self._data_fill_value = self._generate_fill(self.data,
self._data_fill)
return ma.array(self.data, mask=self._data_mask,
fill_value=self._data_fill_value)
else:
return self.data
@property
def err_masked(self):
# Generate the masked error array on the fly. This ensures the
# masking is always up to date with the latest dq array.
# TODO: Can this result be cached and the cache invalidated
# when either the err or dq arrays change?
if self.noerr:
return None
if self.err is not None and self.err.ndim > 0 and self.dq is not None:
if np.all(self.dq == 0):
# All data good.
return self.err
else:
self._err_mask = self._generate_mask(self.err, self.dq)
self._err_fill_value = self._generate_fill(self.err,
self._err_fill)
return ma.array(self.err, mask=self._err_mask,
fill_value=self._err_fill_value)
else:
return self.err
@property
def data_filled(self):
masked = self.data_masked
if masked is not None and isinstance(masked, ma.masked_array):
return masked.filled(self._data_fill_value)
else:
return self.data
@property
def err_filled(self):
if self.noerr:
return None
masked = self.err_masked
if masked is not None and isinstance(masked, ma.masked_array):
return masked.filled(self._err_fill_value)
else:
return self.err
class HasDataErrAndGroups(HasDataErrAndDq):
"""
An abstract class which overrides the data quality masking functions
of HasDataErrAndDq for ramp data which contains PIXELDQ and RAMPDQ
arrays instead of DQ. The DQ array for ramp data is read-only.
"""
def __init__(self, data, err, noerr=False):
super(HasDataErrAndGroups, self).__init__(data=data, err=err, dq=None,
noerr=noerr )
def __add__(self, other):
"""
Add a scalar, an array or another JwstDataModel object to
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being added. Add to the SCI array but
# leave the ERR and DQ arrays as they are.
newobject.data = self.data + other
if not self.noerr:
newobject.err = self.err
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being added to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data + np.asarray(other)
# Adding a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, JwstDataModel):
# Two data products are being added together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data + other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = \
self._combine_errors_quadrature(self.err,
other.err)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'pixeldq') and self._isvalid(other.pixeldq):
newobject.pixeldq = self._combine_quality(self.pixeldq, other.pixeldq)
if hasattr(other, 'groupdq') and self._isvalid(other.groupdq):
newobject.groupdq = self._combine_quality(self.groupdq, other.groupdq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot add " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __sub__(self, other):
"""
Subtract a scalar, an array or another JwstDataModel object from
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being subtracted. Subtract from the SCI
# array but leave the ERR and DQ arrays as they are.
newobject.data = self.data - other
if not self.noerr:
newobject.err = self.err
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being subtracted to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data - np.asarray(other)
# Adding a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, JwstDataModel):
# Two data products are being subtracted. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data - other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = \
self._combine_errors_quadrature(self.err, other.err)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'pixeldq') and self._isvalid(other.pixeldq):
newobject.pixeldq = self._combine_quality(self.pixeldq, other.pixeldq)
if hasattr(other, 'groupdq') and self._isvalid(other.groupdq):
newobject.groupdq = self._combine_quality(self.groupdq, other.groupdq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot subtract " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __mul__(self, other):
"""
Multiply this MiriMeasuredModel object by a scalar, an array or
another JwstDataModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being multiplied. Multiply the SCI and ERR
# arrays but leave the DQ array as it is.
newobject.data = self.data * other
if not self.noerr:
newobject.err = self.err * other
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data * np.asarray(other)
# Multiplying a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, JwstDataModel):
# Two data products are being multiplied together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data * other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = self._combine_errors_multiplicative( \
self.err, other.err, self.data,
other.data)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'pixeldq') and self._isvalid(other.pixeldq):
newobject.pixeldq = self._combine_quality(self.pixeldq, other.pixeldq)
if hasattr(other, 'groupdq') and self._isvalid(other.groupdq):
newobject.groupdq = self._combine_quality(self.groupdq, other.groupdq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot multiply " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __truediv__(self, other):
"""
Divide this MiriMeasuredModel object by a scalar, an array or
another JwstDataModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being divided. Divide the SCI and ERR
# arrays but leave the DQ array as it is.
# Trap a divide by zero..
if np.abs(other) <= sys.float_info.epsilon:
strg = "%s: Divide by scalar zero!" % self.__class__.__name__
del newobject
raise ValueError(strg)
newobject.data = self.data / other
if not self.noerr:
newobject.err = self.err / other
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
# NOTE: Any divide by zero operations will be trapped by numpy.
newobject.data = self.data / np.asarray(other)
# Dividing by a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, JwstDataModel):
# The data product is being divided by another. Ensure they
# both have a valid primary data array.
# NOTE: Any divide by zero operations will be trapped by numpy.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data / other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = self._combine_errors_divisive( \
self.err, other.err, self.data,
other.data)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'pixeldq') and self._isvalid(other.pixeldq):
newobject.pixeldq = self._combine_quality(self.pixeldq, other.pixeldq)
if hasattr(other, 'groupdq') and self._isvalid(other.groupdq):
newobject.groupdq = self._combine_quality(self.groupdq, other.groupdq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot divide " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
# From Python 3, division is the same as true division.
def __div__(self, other):
return self.__truediv__(other)
#
# A minimal test is run when this file is run as a main program.
# For a more substantial test see miri/datamodels/tests.
#
if __name__ == '__main__':
print("Testing the operations module.")
import math
# Check that dqflags has been imported properly
print("Master data quality flags:")
for flags in master_flags:
print(flags)
data3x3 = np.array([[1.,2.,3.],[4.,5.,6.],[7.,8.,9.]])
err3x3 = np.array([[1.,1.,1.],[2.,2.,2.],[1.,1.,1.]])
dqtest = [[0,1,0],
[0,1,1],
[0,0,0]]
dqtest2 = np.array([dqtest,dqtest,dqtest,dqtest])
testobj = HasDataErrAndDq( data3x3, err3x3, dqtest2)
newdq1 = testobj._shrink_dq( dqtest2 )
print("\nData quality array:\n", dqtest2)
print("has shrunk to:\n", newdq1)
newdq2 = testobj._shrink_dq( newdq1 )
print("and has shrunk again to:\n", newdq2)
newdq3 = testobj._shrink_dq( newdq2 )
print("and has shrunk finally to:\n", newdq3)
del newdq1, newdq2, newdq3
print("Testing combination and masking of data quality arrays")
data3x3 = np.array([[1.,2.,3.],[4.,5.,6.],[7.,8.,9.]])
err3x3 = np.array([[1.,1.,1.],[2.,2.,2.],[1.,1.,1.]])
dqtest = np.array([[0,1,0], [4,2,1], [0,3,0]])
testobj = HasDataErrAndDq( data3x3, err3x3, dqtest2)
mask1 = testobj._generate_mask(data3x3, dqtest, bitmask=None)
print("\nGenerating mask from:\n", dqtest)
print("with no bitmask gives:\n", str(mask1))
mask2 = testobj._generate_mask(data3x3, dqtest, bitmask=1)
print("\nGenerating mask from:\n", dqtest)
print("with bitmask 1 gives:\n", str(mask2))
mask3 = testobj._generate_mask(data3x3, dqtest, bitmask=3)
print("\nGenerating mask from:\n", dqtest)
print("with bitmask 3 gives:\n", str(mask3))
del mask1, mask2, mask3
# Testing error combination functions
sq0 = 0.0
sq1 = 1.0
sq2 = math.sqrt(2.0)
sq3 = math.sqrt(3.0)
sq4 = 4.0
sq5 = math.sqrt(5.0)
sq6 = math.sqrt(6.0)
sq7 = math.sqrt(7.0)
sq8 = math.sqrt(8.0)
sq9 = 3.0
error1 = np.array([[sq0,sq1,sq2],[sq3,sq4,sq5],[sq7,sq8,sq9]])
error2 = np.array([[sq9,sq8,sq7],[sq5,sq4,sq3],[sq2,sq1,sq0]])
error0 = np.zeros_like(error1)
print("\nCombining error array with itself:\n", error1)
newerr = testobj._combine_errors_quadrature(error1, error1)
print("by quadrature:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error0)
newerr = testobj._combine_errors_quadrature(error1, error0)
print("by quadrature:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
newerr = testobj._combine_errors_quadrature(error1, error2)
print("by quadrature:\n", newerr)
data0 = np.array([[0,0,0],[0,0,0],[0,0,0]])
data1 = np.array([[1,1,1],[1,1,1],[1,1,1]])
data2 = np.array([[2,2,2],[2,2,2],[2,2,2]])
data_bad = np.array([[1,1,1],[1,0,1],[1,1,1]])
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted twice by:\n", data1)
newerr = testobj._combine_errors_multiplicative(error1, error2, data1, data1)
print("multiplicative:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data1)
print("and:\n", data0)
newerr = testobj._combine_errors_multiplicative(error1, error2, data1, data0)
print("multiplicative:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data1)
print("and:\n", data2)
newerr = testobj._combine_errors_multiplicative(error1, error2, data1, data2)
print("multiplicative:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted twice by:\n", data1)
newerr = testobj._combine_errors_divisive(error1, error2, data1, data1)
print("divisive:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data0)
print("and:\n", data1)
newerr = testobj._combine_errors_divisive(error1, error2, data0, data1)
print("divisive:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data1)
print("and:\n", data2)
newerr = testobj._combine_errors_divisive(error1, error2, data1, data2)
print("divisive:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data1)
print("and:\n", data_bad)
newerr = testobj._combine_errors_divisive(error1, error2, data1, data_bad)
print("divisive:\n", newerr)
print("Test finished.")
| StarcoderdataPython |
6567782 | <reponame>DeonBest/ACEPython<gh_stars>0
"""
Mic Recorder
This is a helper class for the Mic Reader.
It records microphone data and stores it in frames.
Author: <NAME>
Date: February 2021
"""
# class taken from the SciPy 2015 Vispy talk opening example
# see https://github.com/vispy/vispy/pull/928
import pyaudio
import threading
import atexit
import numpy as np
class MicrophoneRecorder(object):
"""
Initialize the recorder
Args:
framesize: Chunksize - Number of frames in the buffer
Default => 100
rate: sampling rate (/sec)
Default => 4000
"""
def __init__(self, rate=4000, framesize=100):
self.rate = rate
self.chunksize = framesize
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16,
channels=1,
rate=self.rate,
input=True,
frames_per_buffer=self.chunksize,
stream_callback=self.new_frame)
self.lock = threading.Lock()
self.stop = False
self.frames = []
self.start()
atexit.register(self.close)
"""
A new frame is available from the stream, append it to the data collection
"""
def new_frame(self, data, frame_count, time_info, status):
data = np.fromstring(data, dtype=np.int16)
# make output -1 to 1 from -32767 to 32767
dataConverted = np.array([x / 32767 for x in data])
print()
with self.lock:
self.frames.append(dataConverted)
if self.stop:
return None, pyaudio.paComplete
return None, pyaudio.paContinue
"""
Get the frames that have been collected
"""
def get_frames(self):
with self.lock:
frames = self.frames
self.frames = []
return frames
"""
Start streaming from microphone
"""
def start(self):
self.stream.start_stream()
"""
Close mic stream
"""
def close(self):
with self.lock:
self.stop = True
self.stream.close()
self.p.terminate()
| StarcoderdataPython |
6587183 | """Appication sub-layer for state synchronization."""
import collections
import enum
import logging
from typing import (
Any, Deque, Generic, Iterable, Iterator, Mapping, Optional, TypeVar
)
import attr
import betterproto
from ventserver.protocols import exceptions
from ventserver.sansio import protocols
_Index = TypeVar('_Index', bound=enum.Enum)
class Sender(protocols.Filter[Any, betterproto.Message]):
"""Interface class for state senders.
Classes which implement this interface may either take a narrower input type
or take no inputs.
"""
IndexedSender = Mapping[_Index, Optional[betterproto.Message]]
@attr.s
class MappedSenders(IndexedSender[_Index]):
"""A collection of Sender filters which can be indexed into."""
senders: Mapping[_Index, Sender] = attr.ib()
def __getitem__(self, key: _Index) -> Optional[betterproto.Message]:
"""Return the next output from the specified sender."""
return self.senders[key].output()
def __len__(self) -> int:
"""Return the number of senders."""
return len(self.senders)
def __iter__(self) -> Iterator[_Index]:
"""Iterate over all indices."""
return self.senders.__iter__()
@attr.s
class SequentialSender(Sender, Generic[_Index]):
"""State sending filter on a fixed sequence.
Does not take inputs. Outputs are state updates for the peer.
If skip_unavailable is set to True, when an index is reached which
causes indexed_sender to return None, the sequential sender will keep
advancing the output schedule until it reaches an index for which
indexed_sender doesn't return None, until it has gone through the entire
schedule.
Warning: if indexed_sender is mutated by other code, this filter is only
safe to use in synchronous environments, such as part of another Filter
which completely owns indexed_sender.
"""
_logger = logging.getLogger('.'.join((__name__, 'SequentialSender')))
index_sequence: Iterable[_Index] = attr.ib()
indexed_sender: IndexedSender[_Index] = attr.ib()
skip_unavailable: bool = attr.ib(default=False)
_schedule: Deque[_Index] = attr.ib()
_last_index: Optional[_Index] = attr.ib(default=None)
@_schedule.default
def init_schedule(self) -> Deque[_Index]:
"""Initialize the internal output schedule."""
return collections.deque(self.index_sequence)
def input(self, event: None) -> None:
"""Handle input events."""
def output(self) -> Optional[betterproto.Message]:
"""Emit the next output event."""
for _ in range(len(self._schedule)):
output = self._get_next_output()
if output is not None or not self.skip_unavailable:
return output
return None
def _get_next_output(self) -> Optional[betterproto.Message]:
"""Produce the next state in the schedule."""
try:
index = self._schedule[0]
except IndexError:
return None
try:
output_event = self.indexed_sender[index]
except KeyError as exc:
raise exceptions.ProtocolDataError(
'Scheduled index is not valid: {}'.format(index)
) from exc
self._schedule.rotate(-1)
if output_event is None:
return None
self._logger.debug('Sending: %s', output_event)
self._last_index = index
return output_event
@property
def last_index(self) -> Optional[_Index]:
"""Return the index associated with the last produced output."""
return self._last_index
@attr.s
class TimedSender(Sender, Generic[_Index]):
"""State sending filter on a fixed time interval.
Inputs are clock updates. Outputs are state updates for the peer.
Warning: if indexed_sender is mutated by other code, this filter is only
safe to use in synchronous environments, such as part of another Filter
which completely owns indexed_sender.
"""
_logger = logging.getLogger('.'.join((__name__, 'TimedSequentialSender')))
sender: SequentialSender[_Index] = attr.ib()
output_interval: float = attr.ib(default=0)
_monotonic_time: Optional[float] = attr.ib(default=None)
_last_output_time: Optional[float] = attr.ib(default=None)
def input(self, event: Optional[float]) -> None:
"""Handle input events."""
if event is None:
return
self._logger.debug('Time: %f', event)
self._monotonic_time = event
def output(self) -> Optional[betterproto.Message]:
"""Emit the next output event."""
if self.output_interval != 0 and self._last_output_time is not None:
if self._monotonic_time is None:
return None
if (
self._monotonic_time - self._last_output_time <
self.output_interval
):
return None
output = self.sender.output()
self._last_output_time = self._monotonic_time
return output
| StarcoderdataPython |
6534702 | <reponame>ropable/wastd<filename>conservation/migrations/0005_auto_20190122_1638.py<gh_stars>1-10
# Generated by Django 2.0.8 on 2019-01-22 08:38
import django.contrib.gis.db.models.fields
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taxonomy', '0016_auto_20190122_1638'),
('conservation', '0004_auto_20190122_1637'),
]
operations = [
migrations.CreateModel(
name='ManagementAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target_area', django.contrib.gis.db.models.fields.MultiPolygonField(blank=True, help_text='If this action pertains to only some but not all occurrences, indicate the target area(s) here. This management action will be automatically affiliated with all intersecting occurrence areas.', null=True, srid=4326, verbose_name='Target Area')),
('occurrence_area_code', models.CharField(blank=True, help_text='The known code for the occurrence area this management action pertains to, either a Fauna site, a Flora (sub)population ID, or a TEC/PEC boundary name.', max_length=1000, null=True, verbose_name='Occurence area code')),
('instructions', models.TextField(blank=True, help_text='Details on the intended implementation.', null=True, verbose_name='Instructions')),
('category', models.ForeignKey(blank=True, help_text='Choose the overarching category.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='conservation.ManagementActionCategory', verbose_name='Managment action category')),
('communities', models.ManyToManyField(blank=True, help_text='All communities this management action pertains to.', to='taxonomy.Community', verbose_name='Communities')),
('taxa', models.ManyToManyField(blank=True, help_text='All taxa this management action pertains to.', to='taxonomy.Taxon', verbose_name='Taxa')),
],
options={
'verbose_name': 'Management Action',
'verbose_name_plural': 'Management Actions',
},
),
migrations.AddField(
model_name='document',
name='management_actions',
field=models.ManyToManyField(blank=True, help_text='Management actions to be undertaken on all occurences of the subject as specified in the document.', related_name='management_actions_per_document', to='conservation.ManagementAction', verbose_name='Management Action'),
),
]
| StarcoderdataPython |
6462898 | from Lemmatization.utility.helper import get_stop_word_path
from Lemmatization.utility.reader import read_file
def get_from_nltk():
from nltk.corpus import stopwords
return stopwords.words('nepali')
def get_from_collections():
# Downloaded from: https://github.com/kushalzone/NepaliStopWords/
stop_word_file = get_stop_word_path()
return read_file(stop_word_file)
| StarcoderdataPython |
3586122 | <filename>codigo/Live176/exemplos_dos_slides/exemplo_06.py
"""Exemplo de como obter dados da image."""
from PIL import Image
im = Image.open('2x2px.jpg')
im.size # (2, 2)
im.mode # 'RGB'
im.bits # 8
2**8 # 256
| StarcoderdataPython |
6629080 | """
awbots.py - American West Bots for automating American West MARC record loads
"""
__author__ = '<NAME>'
from marcbots import MARCImportBot
PROXY_LOCATION='0-www.americanwest.amdigital.co.uk.tiger.coloradocollege.edu'
class AmericanWestBot(MARCImportBot):
"""
The `AmericanWestBot` reads MARC records from
American West Database, validates adds/modify fields
for a new import MARC file for loading into TIGER
"""
__name__ = 'American West Bot'
def __init__(self,
marc_file):
"""
Initializes `AmericanWestBot` for conversion
process.
:param marc_file: MARC file
"""
MARCImportBot.__init__(self,marc_file)
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for American West
MARC.
:param marc_file: MARC file
"""
marc_record = self.validate001(marc_record)
marc_record = self.validate003(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.replace007(marc_record)
marc_record = self.validate490(marc_record)
marc_record = self.processURLs(marc_record,
proxy_location=PROXY_LOCATION)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record)
marc_record = self.validate830(marc_record)
return marc_record
def validate001(self,marc_record):
"""
Method replaces AC prefix with AMP prefix for Prospector compatibility.
:param marc_file: MARC file
"""
field001 = marc_record.get_fields('001')[0]
marc_record.remove_field(field001)
raw_data = field001.data
field001.data = raw_data.replace('AC','AMP')
marc_record.add_field(field001)
return marc_record
def validate003(self,marc_record):
"""
Validates 003 field, adds control code.
:param marc_file: MARC file
"""
marc_record = self.__remove_field__(marc_record=marc_record,
tag='003')
new003 = Field(tag='003',
data='COC')
marc_record.add_field(new003)
return marc_record
def validate490(self,marc_record):
"""
Method removes all existing 490 fields.
:param marc_file: MARC file
"""
all490s = marc_record.get_fields('490')
for field in all490s:
marc_record.remove_field(field)
return marc_record
def validate710(self,
marc_record):
"""
Method validates/adds 710 fields
:param marc_file: MARC file
"""
all710s = marc_record.get_fields('710')
for field in all710s:
marc_record.remove_field(field)
first710 = Field(tag='710',
indicators=['2',' '],
subfields=['a','Newberry Library.'])
marc_record.add_field(first710)
new710 = Field(tag='710',
indicators=['2',' '],
subfields=['a','<NAME> Digital (Firm)'])
marc_record.add_field(new710)
return marc_record
def validate730(self,marc_record):
"""
Method validates 730 with American West desired text.
:param marc_file: MARC file
"""
self.__remove_field__(marc_record=marc_record,
tag='730')
field730 = Field(tag='730',
indicators=['0',' '],
subfields=['a','American West (Online Publications)'])
marc_record.add_field(field730)
return marc_record
def validate830(self,marc_record):
"""
Method removes all existing 830 fields.
:param marc_file: MARC file
"""
all830s = marc_record.get_fields('830')
for field in all830s:
marc_record.remove_field(field)
return marc_record
| StarcoderdataPython |
3404337 | #!/bin/python
#
#
#
#
#
#
# <NAME>
# created on: 2020-01-05 08:20:56
import argparse
import logging
import os
import sys
import time
from datetime import datetime
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
import pandas as pd
import pkg_resources
from scripts.check_ld_expanded_control_sets import check_ld_expanded_sets
from scripts.clump_snps import clump_snp_list, clump_snps
from scripts.combine_control_sets import combine_control_sets
from scripts.get_ldsnps_for_control_snps import get_ldsnps_for_control_snps
from scripts.helper_general import safe_mkdir, start_logger
from scripts.intersect_annotation import intersect_all_annotations
from scripts.ld_expand_all_control_sets import ld_expand_all_control_snps
from scripts.match_snps import match_snps
from scripts.helper_calc_genome_distribution_of_annotations import (
calc_genome_distribution_of_annotations,
)
from scripts.calc_trait_enrichment import calc_trait_entrichment
from scripts.organize_final_outputs import organize_final_outputs
from scripts.make_output_plots import make_output_plots
master_start = time.time()
# -----------
# Inputs and Outputs
# -----------
parser = argparse.ArgumentParser(
description="Get evolutionary signatures using GWAS summary stats."
)
parser.add_argument(
"analysis_name",
action="store",
type=str,
help="name of this analysis no spaces allowed",
)
parser.add_argument(
"gwas_summary_file",
action="store",
type=str,
help="full path to gwas summary stats file",
)
parser.add_argument(
"outputpath",
action="store",
type=str,
help="full path to create output directory named 'analysis_name'",
)
args = parser.parse_args()
analysis_name = args.analysis_name
gwas_summary_file = args.gwas_summary_file
outputpath = args.outputpath
outputdir = os.path.join(outputpath, analysis_name)
anno_summary_dir = os.path.join(outputdir, "anno_genome_summary")
print(f"Note: Will overwrite existing output files.")
print(f"Outputs saved to: {outputdir}")
# -----------
# PIPELINE PARAMTERS
# -----------
# num_control_sets: number of MAF and LD matched control snps
# lead_snp_min_gwas_pvalue: SNPs with GWAS p-value lower than this will be considered to identify potential lead snps
# ld_snps_min_gwas_pvalue: only SNPs with GWAS p-value lower than this will be considered to be in LD with the lead SNPs
# min_r2_to_clump: the r2 threshold used to identify independent GWAS regions (snps with r2 greater than this will be clumped together)
# min_kb_from_index_to_clump: the kb threshold used to identify independent GWAS regions
# ld_expand_lead_snp_min_r2: for matching, this is the r2 threshold to which we ld expand the lead snp
# ldbuds_r2_threshold: for snpsnap, the r2 threshold used to determine the number of LD buddies; must be formatted as 'friends_ld09' or 'friends_ld08' etc.
# control_snps_ld_expand_r2: r2 threshold to ld expand the control snps
# ld_thresholds: ld threshold to create matched control regions (must be in the form "ld<=1.0" or ["ld<=1.0","ld<=0.9"] if expanding to 0.8<r2≤1.
# summary_type: how to summarize the evolutionary annotation over genomic regions (accepts min, max, median, mean)
num_control_sets = 5000
lead_snp_min_gwas_pvalue = 0.00000005
ld_snps_min_gwas_pvalue = 0.00000005
min_r2_to_clump = 0.1
min_kb_from_index_to_clump = 500
ld_expand_lead_snp_min_r2 = 0.9
ldbuds_r2_threshold = "friends_ld09"
control_snps_ld_expand_r2 = 0.9
ld_thresholds = ["ld<=1.0"]
summary_type = "max"
# -----------
# DEPENDENCIES
# -----------
# data_path = pkg_resources.resource_filename("gsel_vec", "data/")
data_path = "./data"
snpsnap_db_file = os.path.join(data_path, "snpsnap_database/ld0.9_collection.tab.gz")
thous_gen_file = os.path.join(data_path, "1kg/EUR.chr{}.phase3.nodups")
anno_dir = os.path.join(data_path, "anno_dict")
# snpsnap_db_file = "/dors/capra_lab/projects/gwas_allele_age_evolution/scripts/pipeline/dev/gsel_vec/data/snpsnap_database/ld0.9_collection.tab.gz"
# thous_gen_file = "/dors/capra_lab/projects/gwas_allele_age_evolution/scripts/pipeline/dev/gsel_vec/data/1kg/EUR.chr{}.phase3.nodups"
# anno_dir = "/dors/capra_lab/projects/gwas_allele_age_evolution/scripts/pipeline/dev/gsel_vec/data/anno_dict"
# anno_summary_dir = "/dors/capra_lab/projects/gwas_allele_age_evolution/scripts/pipeline/dev/gsel_vec/data/anno_genome_summary"
anno_path_dict = {
"argweave": os.path.join(anno_dir, "argweave_snpsnap_eur_ld0.1_collection.pickle"),
"betascore": os.path.join(
anno_dir, "betascore_snpsnap_eur_ld0.1_collection.pickle"
),
"fst_eas_afr": os.path.join(
anno_dir, "fst_eas_afr_snpsnap_eur_ld0.1_collection.pickle"
),
"fst_eur_afr": os.path.join(
anno_dir, "fst_eur_afr_snpsnap_eur_ld0.1_collection.pickle"
),
"fst_eur_eas": os.path.join(
anno_dir, "fst_eur_eas_snpsnap_eur_ld0.1_collection.pickle"
),
"gerp": os.path.join(anno_dir, "gerp_snpsnap_eur_ld0.1_collection.pickle"),
"iES_Sabeti": os.path.join(
anno_dir, "iES_Sabeti_snpsnap_eur_ld0.1_collection.pickle"
),
"linsigh": os.path.join(anno_dir, "linsigh_snpsnap_eur_ld0.1_collection.pickle"),
"phastCon100": os.path.join(
anno_dir, "phastCon100_snpsnap_eur_ld0.1_collection.pickle"
),
"phyloP100": os.path.join(
anno_dir, "phyloP100_snpsnap_eur_ld0.1_collection.pickle"
),
"xpehh_afr2_eas": os.path.join(
anno_dir, "xpehh_afr2_eas_snpsnap_eur_ld0.1_collection.pickle"
),
"xpehh_afr2_eur": os.path.join(
anno_dir, "xpehh_afr2_eur_snpsnap_eur_ld0.1_collection.pickle"
),
"xpehh_eas_eur": os.path.join(
anno_dir, "xpehh_eas_eur_snpsnap_eur_ld0.1_collection.pickle"
),
"geva_allele_age": os.path.join(
anno_dir, "geva_allele_age_snpsnap_eur_ld0.1_collection.pickle"
),
"B2": os.path.join(anno_dir, "B2_snpsnap_eur_ld0.1_collection.pickle"),
}
two_sided_bool_dict = {
"argweave": True,
"betascore": False,
"fst_eas_afr": False,
"fst_eur_afr": False,
"fst_eur_eas": False,
"gerp": False,
"iES_Sabeti": False,
"linsigh": False,
"phastCon100": False,
"phyloP100": True,
"xpehh_afr2_eas": False,
"xpehh_afr2_eur": False,
"xpehh_eas_eur": False,
"xpehh_eas_eur": False,
"geva_allele_age": True,
"B2": False,
}
# -----------
# START LOGGER
# -----------
# TO DO: exit program if output dir already exists. ask for a different name
safe_mkdir(outputdir)
intermediate_dir = os.path.join(outputdir, "intermediate_analyses")
final_output_dir = os.path.join(outputdir, "final_outputs")
safe_mkdir(intermediate_dir)
safe_mkdir(final_output_dir)
logfile = os.path.join(outputdir, f"{analysis_name}.log")
logger = start_logger(logfile)
logger.debug(
f"""
Settings for this run:
---- Inputs/Outputs ---
gwas summary file: {gwas_summary_file}
output directory: {outputpath}
---- Parameters ---
num_control_sets: {num_control_sets}
lead_snp_min_gwas_pvalue: {lead_snp_min_gwas_pvalue}
ld_snps_min_gwas_pvalue: {ld_snps_min_gwas_pvalue}
min_r2_to_clump: {min_r2_to_clump}
min_kb_from_index_to_clump: {min_kb_from_index_to_clump}
ld_expand_lead_snp_min_r2: {ld_expand_lead_snp_min_r2}
ldbuds_r2_threshold: {ldbuds_r2_threshold}
control_snps_ld_expand_r2: {control_snps_ld_expand_r2}
ld_thresholds: {ld_thresholds}
summary_type: {summary_type}
"""
)
# -----------
# START PIPELINE
# -----------
# clump snps based on LD
OutObj = clump_snps(
gwas_summary_file,
intermediate_dir,
thous_gen_file,
lead_snp_min_gwas_pvalue=lead_snp_min_gwas_pvalue,
ld_snps_min_gwas_pvalue=ld_snps_min_gwas_pvalue,
min_r2_to_clump=min_r2_to_clump,
min_kb_from_index_to_clump=min_kb_from_index_to_clump,
ld_expand_lead_snp_min_r2=ld_expand_lead_snp_min_r2,
)
# match snps
match_OutObj = match_snps(
OutObj.get("lead_snp_ld_pairs_r2"),
num_control_sets,
ldbuds_r2_threshold,
snpsnap_db_file,
intermediate_dir,
)
# get ld snps for control snps
csnps_file = match_OutObj.get("matched_snps_file")
ldexp_OutObj = get_ldsnps_for_control_snps(
csnps_file, thous_gen_file, intermediate_dir, control_snps_ld_expand_r2
)
# ld expand control snps for each input/lead snp
lead_snps_ld_counts_file = OutObj.get("bin_by_ld_file")
gwas_snps_r2_file = OutObj.get("lead_snp_ld_pairs_r2")
matched_file = match_OutObj.get("matched_snps_file")
control_ld_dir = ldexp_OutObj.get("ld_snps_for_control_snps_dir")
ldexp_match_OutObj = ld_expand_all_control_snps(
lead_snps_ld_counts_file,
gwas_snps_r2_file,
matched_file,
control_ld_dir,
intermediate_dir,
ld_thresholds=ld_thresholds,
)
# check ld expanded control sets
ld_expanded_control_sets_file = ldexp_match_OutObj.get("ld_expanded_output")
ld_expanded_control_sets_r2_file = ldexp_match_OutObj.get("ld_r2_expanded_output")
(
match_summary_by_params_df,
ldscore_lead_and_ld_df,
match_quality_per_lead_snp_df,
) = check_ld_expanded_sets(
snpsnap_db_file,
ld_expanded_control_sets_file,
lead_snps_ld_counts_file,
ld_expanded_control_sets_r2_file,
ldbuds_r2_threshold,
intermediate_dir,
)
# intersect annotation
intersectAnnoOutputObj = intersect_all_annotations(
anno_path_dict,
two_sided_bool_dict,
summary_type,
ld_expanded_control_sets_file,
intermediate_dir,
)
# calculate genome wide summary of annotations
anno_genom_summary_file = calc_genome_distribution_of_annotations(
anno_path_dict, anno_summary_dir
)
# calculate trait-wide enrichment
TraitEnrichOutObj = calc_trait_entrichment(
intersectAnnoOutputObj, anno_genom_summary_file, anno_path_dict, intermediate_dir
)
# organize final outputs
finalOutObj = organize_final_outputs(
intersectAnnoOutputObj,
anno_path_dict,
match_quality_per_lead_snp_df,
match_summary_by_params_df,
ldbuds_r2_threshold,
final_output_dir,
)
# make final plots
intersect_ouputs = make_output_plots(
intersectAnnoOutputObj, TraitEnrichOutObj, anno_path_dict, final_output_dir
)
logger.debug(
"[status] Done with master.py. Took {:.2f} minutes.".format(
(time.time() - master_start) / 60
)
)
| StarcoderdataPython |
5096990 | # -*- coding: utf-8 -*-
"""
Resting state networks atlas container class
"""
from collections import OrderedDict
import nilearn.image as niimg
import nilearn.plotting as niplot
class RestingStateNetworks:
""" A container class to parse and return useful values/images
from RSN templates.
Parameters
----------
img_file: str
Path to a img-like RSN templates file.
txt_file: str
Path to a text file with the labels for `img_file`.
The expected structure of each line of
this file is <name of the RSN>, <list of indices>.
For example:
<NAME>, 21
Auditory, 17
Sensorimotor, 7, 23, 24, 38, 56, 29
Visual, 46, 64, 67, 48, 39, 59
Default-Mode, 50, 53, 25, 68
Attentional, 34, 60, 52, 72, 71, 55
Frontal, 42, 20, 47, 49
start_from_one: bool
If True it means that the `txt_file` volume indices start from 1.
From 0 otherwise. Be careful, the default is True!
"""
def __init__(self, img_file, txt_file, start_from_one=True):
self._img_file = img_file
self._txt_file = txt_file
self._start_from_one = start_from_one
self.network_names = self._network_names()
self._img = niimg.load_img(self._img_file)
self._self_check()
def iter_networks(self):
"""Yield idx (what is in the text_file) and
image of each RSN volume from the text file."""
for idx, name in self.network_names.items():
yield idx, self._get_img(idx)
def iter_networks_names(self):
"""Yield idx (what is in the text_file) and
image of each RSN volume from the text file."""
for idx, name in self.network_names.items():
yield idx, name
def _network_names(self):
"""Return OrderedDict[int]->str, with the index and the name of each
RSN."""
names_idx = self._read_labels_file()
return OrderedDict([(idx, name) for name, idxs in names_idx.items()
for idx in idxs])
def _get_img(self, network_index):
""" Return one RSN given the index in the labels file."""
img_idx = self._img_index(network_index)
return niimg.index_img(self._img, img_idx)
def _img_index(self, network_index):
"""Return the correspoding image index for the given network index."""
if self._start_from_one:
return network_index - 1
return network_index
def _self_check(self):
"""Simple content check."""
n_labels = len(self.network_names)
n_images = self._img.shape[-1]
if n_labels == n_images:
return
if n_labels > n_images:
raise ValueError('The number of labels is larger than the number '
'of images. Got {} and {}.'.format(n_labels, n_images))
# print('The number of volumes in the image is different from the number '
# 'of labels in the text file.\n I am going to use only the ones '
# ' in the text file.')
def _read_labels_file(self):
""" Read the text file and return a dict[str->List[int]] with network
names and blob indices.
"""
lines = [l.rstrip('\n') for l in open(self._txt_file).readlines()]
netblobs = OrderedDict()
for l in lines:
pcs = l.split(',')
netname = pcs[0]
blobs = [int(idx) for idx in pcs[1:]]
netblobs[netname] = blobs
return netblobs
def plot_all(self):
names = self.network_names
for idx, rsn in enumerate(niimg.iter_img(self._img)):
disp = niplot.plot_roi(rsn, title=names.get(idx, None))
def join_networks(self, network_indices):
"""Return a NiftiImage containing a binarised version of the sum of
the RSN images of each of the `network_indices`."""
oimg = self._get_img(network_indices[0]).get_data()
for idx in network_indices[1:]:
oimg += self._get_img(idx).get_data()
return niimg.new_img_like(self._get_img(network_indices[0]),
oimg.astype(bool))
def __iter__(self):
return (img for idx, img in self.iter_networks())
def __len__(self):
return len(self.network_names)
| StarcoderdataPython |
3530778 | <filename>ensembler/commands/evaluate.py
from ensembler.Dataset import Dataset
from ensembler.datasets import Datasets
from argh import arg
import os
import numpy as np
from ensembler.utils import classwise
from ensembler.p_tqdm import p_uimap as mapper
from functools import partial
import pandas as pd
metrics = {
"iou":
lambda y_hat, y: -1 if not y.max() > 0 else (y_hat * y).sum() /
(y_hat + y).astype(bool).sum(),
"recall":
lambda y_hat, y: -1 if not y.max() > 0 else (y_hat * y).sum() / y.sum(),
"precision":
lambda y_hat, y: -1 if not y.max() > 0 else 0
if not y_hat.max() > 0 else (y_hat * y).sum() / y_hat.sum()
}
def process_sample(ground_truth, dataset, prediction_dir, threshold, classes):
(image, mask), image_name = ground_truth
val_dir = os.path.join(prediction_dir, "val")
test_dir = os.path.join(prediction_dir, "test")
if image_name in dataset.val_images:
image_type = "val"
indir = val_dir
elif image_name in dataset.test_images:
image_type = "test"
indir = test_dir
else:
raise AttributeError(f"Unknown image set for {image_name}")
prediction_file = os.path.join(indir, "{}.npz".format(image_name))
if not os.path.exists(prediction_file):
print("Can't find prediction for {}".format(image_name))
return []
image = np.moveaxis(np.array(image).squeeze(0), 0, -1)
mask = np.moveaxis(np.array(mask).squeeze(0), 0, -1)
prediction = np.load(prediction_file)["predicted_mask"].astype(
mask.dtype) / 255
prediction = np.where(prediction > threshold, 1., 0.)
results = []
for metric, metric_func in metrics.items():
result = classwise(prediction, mask, metric=metric_func)
results_list = list(zip(classes, result))
for clazz, result in results_list:
if result > -1:
results.append({
"image": image_name,
"class": clazz,
"threshold": threshold,
"metric": metric,
"type": image_type,
"value": result
})
return results
@arg('dataset', choices=Datasets.choices())
def evaluate(dataset: Datasets, base_dir: str, threshold: float = 0.5):
prediction_dir = os.path.join(os.path.abspath(base_dir), "predictions")
dataset = Datasets[dataset]
datamodule = Dataset(dataset=dataset, batch_size=1)
dataset = Datasets.get(dataset.value)
dataloader = datamodule.test_dataloader()
image_names = datamodule.test_data.dataset.get_image_names()
results = []
func = partial(process_sample,
dataset=datamodule.test_data.dataset,
prediction_dir=prediction_dir,
threshold=threshold,
classes=dataset.classes)
for sample_results in mapper(
func,
zip(dataloader, image_names),
total=len(image_names),
num_cpus=4,
):
results += sample_results
outfile = os.path.join(os.path.abspath(base_dir), "metrics.csv")
df = pd.DataFrame(results)
df.to_csv(outfile, index=False)
| StarcoderdataPython |
5118578 | <gh_stars>0
import datetime
import importlib
import os
import random
import sys
from typing import Dict
import factory
import factory.random
import pandas as pd
from faker import Faker
from pydata_factory.classes import GenFactory, GenModel, Model
from pydata_factory.schema import Schema
Faker.seed(42)
factory.random.reseed_random(42)
class GenData:
@staticmethod
def _get_factory_extra(schema: dict, storage: dict) -> dict:
extra = {}
for k_attr, v_attr in schema["attributes"].items():
if v_attr.get("depends-on"):
dep_klass, dep_attr = v_attr.get("depends-on").split(".")
extra[k_attr] = random.choice(storage[dep_klass])[dep_attr]
return extra
@staticmethod
def generate(
schemas: dict, rows: dict = {}, priorities: list = []
) -> Dict[str, pd.DataFrame]:
"""
Generate fake data from a dataset file.
"""
tmp_dir = "/tmp/pydata_factory_classes"
os.makedirs(tmp_dir, exist_ok=True)
script_name = datetime.datetime.now().strftime("pydf_%Y%m%d_%H%M%S_%f")
script_path = f"{tmp_dir}/{script_name}.py"
header = (
"from __future__ import annotations\n"
"import datetime\n"
"from dataclasses import dataclass\n"
"import random\n\n"
"import factory\n"
"import factory.random\n"
"from factory.fuzzy import FuzzyDate, FuzzyDateTime\n"
"from faker import Faker\n\n"
"from pydata_factory.classes import Model\n\n"
"Faker.seed(42)\n"
"\n"
"factory.random.reseed_random(42)\n\n\n"
)
model_script = ""
factory_script = ""
for k_schema, schema in schemas.items():
name = schema["name"]
namespace = schema.get("namespace", "")
model_script += GenModel.generate(schema) + "\n"
factory_script += (
GenFactory.generate(schema, script_name, schemas) + "\n"
)
if name not in rows or not rows[name]:
rows[name] = 1
for k, v in schema["attributes"].items():
if "count" not in v:
continue
rows[name] = int(max(rows[name], v["count"]))
script = model_script + factory_script
with open(f"{tmp_dir}/__init__.py", "w") as f:
f.write("")
with open(script_path, "w") as f:
f.write(header + script)
sys.path.insert(0, tmp_dir)
lib_tmp = importlib.import_module(script_name)
dfs = {}
if not priorities:
priorities = list(schemas.keys())
storage: dict = {}
for k_schema in priorities:
schema = schemas[k_schema]
name = schema["name"]
physical_name = schema["physical-name"]
namespace = schema.get("namespace", "")
class_name = name
storage[class_name] = []
df = Schema.to_dataframe(schema)
physical_dtypes = {
k_attr: v_attr["physical-dtype"]
for k_attr, v_attr in schema["attributes"].items()
if v_attr.get("physical-dtype")
}
for i in range(rows[name]):
klass = getattr(lib_tmp, f"{class_name}Factory")
obj = klass(**GenData._get_factory_extra(schema, storage))
data = obj.__dict__
data = {
k: v.id if isinstance(v, Model) else v # type: ignore
for k, v in data.items()
}
storage[class_name].append(data)
qualified_name = (
physical_name
if not namespace
else f"{namespace}.{physical_name}"
)
dfs[qualified_name] = pd.concat(
[df, pd.DataFrame(storage[class_name]).drop_duplicates()]
).astype(physical_dtypes)
return dfs
| StarcoderdataPython |
5018083 | from equipmentloans.models import EquipmentLoan
from rest_framework import serializers
from equipmentloans.models import EquipmentLoan
class EquipmentLoanSerializer(serializers.ModelSerializer):
equipment = serializers.SerializerMethodField()
class Meta:
model = EquipmentLoan
fields = (
'id',
'user',
'equipment',
'loan_date',
'devolution_date',
'fee'
)
def get_equipment(self, object):
return {
"id" : object.equipment.id,
"name": object.equipment.name,
"type": object.equipment.type,
"quantity": object.equipment.quantity
} | StarcoderdataPython |
11223663 | from math import floor
from typing import Callable
import numpy as np
import plotly.express as px
from dtw import stepPattern as sp
# yapf: disable
# DTW
#: a symmetric pattern for DTW
symmetric = sp.StepPattern(
sp._c(
# diagonal
1, 1, 1, -1,
1, 0, 0, 3,
# vertical
2, 1, 0, -1,
2, 0, 0, 2,
# horizontal
3, 0, 1, -1,
3, 0, 0, 2,
# 1 vertical + diagonal
4, 2, 1, -1,
4, 1, 0, 2,
4, 0, 0, 2,
# 1 horizontal + diagonal
5, 1, 2, -1,
5, 0, 1, 2,
5, 0, 0, 2,
),
"NA")
#: an asymmetric pattern which favours the horizontal paths (changing column is
#: easier than changing row)
asymmetric_hor = sp.StepPattern(
sp._c(
# diagonal
1, 1, 1, -1,
1, 0, 0, 3,
# vertical
2, 1, 0, -1,
2, 0, 0, 2,
# horizontal
3, 0, 1, -1,
3, 0, 0, 1,
# 1 vertical + diagonal
4, 2, 1, -1,
4, 1, 0, 2,
4, 0, 0, 2,
# 1 horizontal + diagonal
5, 1, 2, -1,
5, 0, 1, 2,
5, 0, 0, 1,
),
"NA")
#: an asymmetric pattern which favours the vertical paths (changing row is
#: easier than changing column)
asymmetric_ver = sp.StepPattern(
sp._c(
# diagonal
1, 1, 1, -1,
1, 0, 0, 3,
# vertical
2, 1, 0, -1,
2, 0, 0, 1,
# horizontal
3, 0, 1, -1,
3, 0, 0, 2,
# 1 vertical + diagonal
4, 2, 1, -1,
4, 1, 0, 2,
4, 0, 0, 1,
# 1 horizontal + diagonal
5, 1, 2, -1,
5, 0, 1, 2,
5, 0, 0, 2,
),
"NA")
#: an asymmetric pattern which favours the vertical paths (changing row is
#: easier than changing column); this is like dtw.stepPattern.asymmetric, but
#: that one favours horizontal paths
asymmetric1 = sp.StepPattern(
sp._c(
# diagonal
1, 1, 1, -1,
1, 0, 0, 1,
# vertical
2, 0, 1, -1,
2, 0, 0, 1,
# second diagonal
3, 2, 1, -1,
3, 0, 0, 1
),
"N")
# yapf: enable
symmetric1 = sp.symmetric1
symmetric2 = sp.symmetric2
asymmetric2 = sp.asymmetric
step_patterns = (
asymmetric_hor,
asymmetric_ver,
symmetric,
symmetric1,
symmetric2,
asymmetric1,
asymmetric2,
)
def avg_dist(dist: Callable, dist_args: dict):
def new_dist(x: list, y: list):
out = 0
for i in range(len(x)):
out += dist(x[i], y[i], **dist_args)
return out / len(x)
return new_dist
def idx_range(idx, radius, length):
"""
given an idx, a radius and a maximum length, returns starting and ending
indices of a a window centered at that idx and having that radius, without
indices > length nor < 0
"""
return max(0, idx - radius), min(length, idx + radius + 1)
class FedeWindow(object):
"""
A windowing function which computes a different slanted-band at each point
based on the local difference of the main slanted diagonal; the local
radius is computed as:
`max(
min_radius,
floor(
alpha * avg_dist_fn(
x[i - beta : i + beta],
y[j - beta : j + beta]
)
)
)`
where:
* N is the length of x
* M is the length of y
* avg_dist_fn is the average of dist_fn on each corresponding sample
* j = floor(i * M / N)
By words, `beta` is half the length of a sliding window used to compute
distances between excerpts of `x` and `y` taken along the slanted diagonal.
The distance is multiplied by `alpha` to get the local radius length.
`x` and `y` are sequences with shape ([M, N], features)
"""
def __init__(self,
x,
y,
dist_fn: Callable,
alpha=5,
beta=5,
min_radius=5,
dist_args: dict = {}):
self.alpha = alpha
self.beta = beta
self.min_radius = min_radius
self.dist_fn = avg_dist(dist_fn, dist_args)
self.compute_mask(x, y)
def compute_mask(self, x, y):
# take the distance function
N = len(x)
M = len(y)
transpose = False
if M > N:
# x should always be longer than y
x, y = y, x
N, M = M, N
# if we swap x and y, we need to swap the mask too
transpose = True
# a mask to remember points
self.mask = np.zeros((len(x), len(y)), dtype=np.bool8)
# for each point in x
for i in range(N):
# compute the point in y along the diagonal
j = floor(i * M / N)
# compute the sliding windows
start_x, end_x = idx_range(i, self.beta, N)
start_y, end_y = idx_range(j, self.beta, M)
_x = x[start_x:end_x]
_y = y[start_y:end_y]
# pad the windows
if start_x == 0:
_x = [[0]] * (self.beta - i) + _x
elif end_x == N:
_x = _x + [[0]] * (i + self.beta - N)
if start_y == 0:
_y = [[0]] * (self.beta - j) + _y
elif end_y == M:
_y = _y + [[0]] * (j + self.beta - M)
# compute the local radius
lr = max(self.min_radius,
floor(self.alpha * self.dist_fn(_x, _y)))
# set the points inside the local radius to True
self.mask[slice(*idx_range(i, lr, N)),
slice(*idx_range(j, lr, M))] = True
if transpose:
self.mask = self.mask.T
def __call__(self, i, j, query_size=None, reference_size=None):
return self.mask[i, j]
def plot(self):
"""
Return a plotly Figure object representing the heatmap of the mask
"""
return px.imshow(self.mask, aspect='auto')
def _remove_conflicting_match(arr_x: np.ndarray, arr_y: np.ndarray,
graph_matrix: np.ndarray, target: int):
"""
1. look for repeated values in `arr_x` or `arr_y`, depending on `target`
2. look for the maximum value in `graph_matrix[1]`, at the indices in
`arr_x` and `arr_y` relative to the repeated values
3. among the repeated values in the target, chose the ones corresponding to
the maximum in `graps_matrix[1]`
4. return `arr_x` and `arr_y` without the removed indices
"""
if target == 0:
_target = arr_x
elif target == 1:
_target = arr_y
else:
raise RuntimeError(f"`target` should be 0 or 1, used {target} instead")
arr_mask = np.ones(_target.shape[0], dtype=np.bool8)
unique_vals, unique_count = np.unique(_target, return_counts=True)
for unique_val in unique_vals[unique_count > 1]:
conflicting_idx = np.nonzero(_target == unique_val)[0]
to_keep_idx_of_idx = np.argmax(graph_matrix[1, arr_x[conflicting_idx],
arr_y[conflicting_idx]])
arr_mask[conflicting_idx] = 0
arr_mask[conflicting_idx[to_keep_idx_of_idx]] = 1
return arr_x[arr_mask], arr_y[arr_mask]
def merge_matching_indices(args):
"""
Takes a list of mapping indices, fills the graph matrix counting the number
of times a match happens in the mappings. Then start taking matching from
the most matched and iteratively adding new matching. If two conflicting
matching have the same number of counts, takes the matching which appears
in the longest mapping; in case of parity the first one is taken
"""
# creating the matrix
num_notes = np.max([arg[:, 0].max() for arg in args]) + 1, np.max(
[arg[:, 1].max() for arg in args]) + 1
# dim 0 records the counts, dim 1 records the most long mapping containing
# the matching
graph_matrix = np.zeros((2, num_notes[0], num_notes[1]), dtype=np.int64)
# filling the matrix
for arg in args:
# the count
graph_matrix[0, arg[:, 0], arg[:, 1]] += 1
# the length
L = arg.shape[0]
graph_matrix[1, arg[:, 0], arg[:, 1]] = np.maximum(
graph_matrix[1, arg[:, 0], arg[:, 1]], L)
# merging
# two indices which records references to the original matrix
index_rows = np.arange(num_notes[0])
index_cols = np.arange(num_notes[1])
merged = []
for k in range(len(args), 0, -1):
# take matchings that appear `k` times
candidates_row, candidates_col = np.nonzero(graph_matrix[0] == k)
# remove conflicting candidates
candidates_row, candidates_col = _remove_conflicting_match(
candidates_row, candidates_col, graph_matrix, 0)
candidates_row, candidates_col = _remove_conflicting_match(
candidates_row, candidates_col, graph_matrix, 1)
# add candidates to the output
merged.append(
np.stack([index_rows[candidates_row], index_cols[candidates_col]],
axis=1))
# remove matched notes from graph_matrix
mask_rows = np.ones(graph_matrix.shape[1], dtype=np.bool8)
mask_cols = np.ones(graph_matrix.shape[2], dtype=np.bool8)
mask_rows[candidates_row] = 0
mask_cols[candidates_col] = 0
graph_matrix = graph_matrix[:, mask_rows]
graph_matrix = graph_matrix[:, :, mask_cols]
# remove matched notes from the index
index_rows = index_rows[mask_rows]
index_cols = index_cols[mask_cols]
# re-sort everything and return
merged = np.concatenate(merged, axis=0)
# print(f"Added notes from merging: {len(ref) - L}")
return merged[merged[:, 0].argsort()]
| StarcoderdataPython |
1862100 | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
from __future__ import division, print_function
# viability imports
import pyviability as viab
from pyviability import helper
from pyviability import libviability as lv
from pyviability import tsm_style as topo
# model imports
import examples.AWModel as awm
import examples.ConsumptionModel as cm
import examples.FiniteTimeLakeModel as ftlm
import examples.FiniteTimeLakeModel2 as ftlm2
import examples.GravityPendulumModel as gpm
import examples.PlantModel as pm
import examples.PopulationAndResourceModel as prm
import examples.SwingEquationModel as sqm
import examples.TechChangeModel as tcm
# other useful stuff
import argparse
try:
import argcomplete
except ImportError:
with_argcomplete = False
else:
with_argcomplete = True
import datetime as dt
import functools as ft
import matplotlib as mpl
import matplotlib.pyplot as plt
import numba as nb
import numpy as np
import scipy.optimize as opt
import time
import sys
PRINT_VERBOSITY = 2
def save_figure(filename, fig=None):
if fig is None:
fig = plt.gcf()
print("saving to {!r} ... ".format(filename), end="", flush=True)
fig.savefig(filename)
print("done")
def plotPhaseSpace( evol, boundaries, steps = 2000, xlabel = "", ylabel = "", colorbar = True, style = {}, alpha = None , maskByCond = None, invertAxes = False, ax = plt, lwspeed = False):
# separate the boundaries
Xmin, Ymin, Xmax, Ymax = boundaries
# check boundaries sanity
assert Xmin < Xmax
assert Ymin < Ymax
# build the grid
X = np.linspace(Xmin, Xmax, steps)
Y = np.linspace(Ymin, Ymax, steps)
XY = np.array(np.meshgrid(X, Y))
# if Condition give, set everything to zero that fulfills it
if maskByCond:
mask = maskByCond(XY[0], XY[1])
XY[0] = np.ma.array(XY[0], mask = mask)
XY[1] = np.ma.array(XY[1], mask = mask)
## dummy0 = np.zeros((steps,steps))
## XY[0] = np.where(mask, XY[0], dummy0)
## XY[1] = np.where(mask, XY[1], dummy0)
# calculate the changes ... input is numpy array
dX, dY = evol(XY,0) # that is where deriv from Vera is mapped to
if invertAxes:
data = [Y, X, np.transpose(dY), np.transpose(dX)]
else:
data = [X, Y, dX, dY]
# separate linestyle
linestyle = None
if type(style) == dict and "linestyle" in style.keys():
linestyle = style["linestyle"]
style.pop("linestyle")
# do the actual plot
if style == "dx":
c = ax.streamplot(*data, color=dX, linewidth=5*dX/dX.max(), cmap=plt.cm.autumn)
elif style:
speed = np.sqrt(data[2]**2 + data[3]**2)
if "linewidth" in style and style["linewidth"] and lwspeed:
style["linewidth"] = style["linewidth"] * speed/np.nanmax(speed)
## print speed
## print np.nanmax(speed)
c = ax.streamplot(*data, **style)
else:
# default style formatting
speed = np.sqrt(dX**2 + dY**2)
c = ax.streamplot(*data, color=speed, linewidth=5*speed/speed.max(), cmap=plt.cm.autumn)
# set opacity of the lines
if alpha:
c.lines.set_alpha(alpha)
# set linestyle
if linestyle:
c.lines.set_linestyle(linestyle)
# add labels if given
if invertAxes:
temp = xlabel
xlabel = ylabel
ylabel = temp
if xlabel:
if ax == plt:
ax.xlabel(xlabel)
else:
ax.set_xlabel(xlabel)
if ylabel:
if ax == plt:
ax.ylabel(ylabel)
else:
ax.set_ylabel(ylabel)
# add colorbar
if colorbar:
assert not "color" in style.keys(), "you want a colorbar for only one color?"
ax.colorbar()
def generate_example(default_rhss,
management_rhss,
sunny_fct,
boundaries,
default_parameters=[],
management_parameters=[],
periodicity=[],
default_rhssPS=None,
management_rhssPS=None,
out_of_bounds=True,
compute_eddies=False,
rescaling_epsilon=1e-6,
stepsize=None,
xlabel=None,
ylabel=None,
set_ticks=None,
):
"""Generate the example function for each example.
:param default_rhss: list of callables
length 1, right-hand-side function of the default option. For future compatibiility, this was chosen to be a list already.
:param management_rhss: list of callables
right-hand-side functions of the management options
:param sunny_fct: callable
function that determines whether a point / an array of points is in the sunny region
:param boundaries: array-like, shape : (dim, 2)
for each dimension of the model, give the lower and upper boundary
:param default_parameters: list of dict, optional
length 1, the dict contains the parameter values for the default option. For future compatibiility, this was chosen to be a list already.
:param management_parameters: list of dict, optional
each dict contains the parameter values for the each management option respectively
:param periodicity: list, optional
provide the periodicity of the model's phase space
:param default_rhssPS: list of callables, optional
if the default_rhss are not callable for arrays (which is necessary for the plotting of the phase space), then provide a corresponding (list of) function(s) here
:param management_rhssPS:list of callables, optional
if the management_rhss are not callable for arrays (which is necessary for the plotting of the phase space), then provide a corresponding (list of) function(s) here
:param out_of_bounds: bool, default : True
If going out of the bundaries is interpreted as being in the undesirable region.
:param compute_eddies:
Should the eddies be computed? (Becaus the computation of Eddies might take long, this is skipped for models where it's know that there are no Eddies.)
:param stepsize
step size used during the viability kernel computation
:param rescaling_epsilon:
The epsilon for the time homogenization, see https://arxiv.org/abs/1706.04542 for details.
:param xlabel:
:param ylabel:
:param set_ticks:
:return: callable
function that when being called computes the specific example
"""
plotPS = lambda rhs, boundaries, style: plotPhaseSpace(rhs, [boundaries[0][0], boundaries[1][0], boundaries[0][1], boundaries[1][1]], colorbar=False, style=style)
if not default_parameters:
default_parameters = [{}] * len(default_rhss)
if not management_parameters:
management_parameters = [{}] * len(management_rhss)
xlim, ylim = boundaries
if default_rhssPS is None:
default_rhssPS = default_rhss
if management_rhssPS is None:
management_rhssPS = management_rhss
def example_function(example_name,
grid_type="orthogonal",
backscaling=True,
plotting="points",
run_type="integration",
save_to="",
n0=80,
hidpi=False,
use_numba=True,
stop_when_finished="all",
flow_only=False,
mark_fp=None,
):
plot_points = (plotting == "points")
plot_areas = (plotting == "areas")
grid, scaling_factor, offset, x_step = viab.generate_grid(boundaries,
n0,
grid_type,
periodicity = periodicity) #noqa
states = np.zeros(grid.shape[:-1], dtype=np.int16)
NB_NOPYTHON = False
default_runs = [viab.make_run_function(
nb.jit(rhs, nopython=NB_NOPYTHON),
helper.get_ordered_parameters(rhs, parameters),
offset,
scaling_factor,
returning=run_type,
rescaling_epsilon=rescaling_epsilon,
use_numba=use_numba,
) for rhs, parameters in zip(default_rhss, default_parameters)] #noqa
management_runs = [viab.make_run_function(
nb.jit(rhs, nopython=NB_NOPYTHON),
helper.get_ordered_parameters(rhs, parameters),
offset,
scaling_factor,
returning=run_type,
rescaling_epsilon=rescaling_epsilon,
use_numba=use_numba,
) for rhs, parameters in zip(management_rhss, management_parameters)] #noqa
sunny = viab.scaled_to_one_sunny(sunny_fct, offset, scaling_factor)
# adding the figure here already in case VERBOSE is set
# this makes only sense, if backscaling is switched off
if backscaling:
figure_size = np.array([7.5, 7.5])
else:
figure_size = np.array([7.5, 2.5 * np.sqrt(3) if grid_type == "simplex-based" else 7.5 ])
if hidpi:
figure_size = 2 * figure_size
figure_size = tuple(figure_size.tolist())
if (not backscaling) and plot_points:
# figure_size = (15, 5 * np.sqrt(3) if grid_type == "simplex-based" else 15)
# figure_size = (15, 5 * np.sqrt(3) if grid_type == "simplex-based" else 15)
fig = plt.figure(example_name, figsize=figure_size, tight_layout=True)
# print(lv.STEPSIZE)
# lv.STEPSIZE = 2 * x_step
if stepsize is None:
lv.STEPSIZE = 2 * x_step * max([1, np.sqrt( n0 / 30 )]) # prop to 1/sqrt(n0)
else:
lv.STEPSIZE = stepsize
print(lv.STEPSIZE)
# print(lv.STEPSIZE)
# assert False
print("STEPSIZE / x_step = {:5.3f}".format(lv.STEPSIZE / x_step))
start_time = time.time()
viab.topology_classification(grid, states, default_runs, management_runs, sunny,
periodic_boundaries = periodicity,
grid_type=grid_type,
compute_eddies=compute_eddies,
out_of_bounds=out_of_bounds,
stop_when_finished=stop_when_finished,
verbosity=PRINT_VERBOSITY,
)
time_diff = time.time() - start_time
print("run time: {!s}".format(dt.timedelta(seconds=time_diff)))
if backscaling:
grid = viab.backscaling_grid(grid, scaling_factor, offset)
if plot_points:
fig = plt.figure(example_name, figsize=figure_size, tight_layout=True)
# fig = plt.figure(figsize=(15, 15), tight_layout=True)
if not flow_only:
viab.plot_points(grid, states, markersize=30 if hidpi else 15)
if ARGS.title:
plt.gca().set_title('example: ' + example_name, fontsize=20)
[plotPS(ft.partial(rhs, **parameters), boundaries, topo.styleDefault) #noqa
for rhs, parameters in zip(default_rhssPS, default_parameters)] #noqa
[plotPS(ft.partial(rhs, **parameters), boundaries, style)
for rhs, parameters, style in zip(management_rhssPS, management_parameters, [topo.styleMod1, topo.styleMod2])] #noqa
if set_ticks is not None:
set_ticks()
else:
plt.xlim(xlim)
plt.ylim(ylim)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if save_to:
save_figure(save_to)
if plot_areas:
fig = plt.figure(example_name, figsize=figure_size, tight_layout=True)
if not flow_only:
viab.plot_areas(grid, states)
if ARGS.title:
plt.gca().set_title('example: ' + example_name, fontsize=20)
[plotPS(ft.partial(rhs, **parameters), boundaries, topo.styleDefault) #noqa
for rhs, parameters in zip(default_rhssPS, default_parameters)] #noqa
[plotPS(ft.partial(rhs, **parameters), boundaries, style)
for rhs, parameters, style in zip(management_rhssPS, management_parameters, [topo.styleMod1, topo.styleMod2])] #noqa
if set_ticks is not None:
set_ticks()
else:
plt.xlim(xlim)
plt.ylim(ylim)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if save_to:
save_figure(save_to)
else:
plot_x_limits = [0, 1.5 if grid_type == "simplex-based" else 1]
plot_y_limits = [0, np.sqrt(3)/2 if grid_type == "simplex-based" else 1]
default_PSs = [viab.make_run_function(rhs, helper.get_ordered_parameters(rhs, parameters), offset, scaling_factor, returning="PS") #noqa
for rhs, parameters in zip(default_rhssPS, default_parameters)] #noqa
management_PSs = [viab.make_run_function(rhs, helper.get_ordered_parameters(rhs, parameters), offset, scaling_factor, returning="PS") #noqa
for rhs, parameters in zip(management_rhssPS, management_parameters)] #noqa
if plot_points:
# figure already created above
if not flow_only:
viab.plot_points(grid, states, markersize=30 if hidpi else 15)
if ARGS.title:
plt.gca().set_title('example: ' + example_name, fontsize=20)
[plotPS(rhs, [plot_x_limits, plot_y_limits], topo.styleDefault) for rhs, parameters in zip(default_PSs, default_parameters)]
[plotPS(rhs, [plot_x_limits, plot_y_limits], style) for rhs, parameters, style in zip(management_PSs, management_parameters, [topo.styleMod1, topo.styleMod2])] #noqa
plt.axis("equal")
plt.xlim(plot_x_limits)
plt.ylim(plot_y_limits)
if save_to:
save_figure(save_to)
if plot_areas:
fig = plt.figure(example_name, figsize=(15, 15), tight_layout=True)
if not flow_only:
viab.plot_areas(grid, states)
if ARGS.title:
plt.gca().set_title('example: ' + example_name, fontsize=20)
[plotPS(rhs, [plot_x_limits, plot_y_limits], topo.styleDefault) for rhs, parameters in zip(default_PSs, default_parameters)]
[plotPS(rhs, [plot_x_limits, plot_y_limits], style) for rhs, parameters, style in zip(management_PSs, management_parameters, [topo.styleMod1, topo.styleMod2])] #noqa
plt.axis("equal")
plt.xlim(plot_x_limits)
plt.ylim(plot_y_limits)
if save_to:
save_figure(save_to)
print()
viab.print_evaluation(states)
return example_function
EXAMPLES = {
"finite-time-lake":
generate_example([ftlm.rhs_default],
[ftlm.rhs_management],
ftlm.sunny,
[[-5, 5],[-5, 5]],
out_of_bounds=True,
default_rhssPS=[ftlm.rhs_default_PS],
management_rhssPS=[ftlm.rhs_management_PS],
),
"finite-time-lake2":
generate_example(
[ftlm2.rhs_default],
[ftlm2.rhs_management],
ftlm2.sunny,
[[-5, 5],[-5, 5]],
out_of_bounds=True,
xlabel="$x$",
ylabel="$y$",
),
"aw-model-dg":
generate_example([awm.AW_rescaled_rhs],
[awm.AW_rescaled_rhs],
awm.AW_rescaled_sunny,
[[1e-3, 1 - 1e-3],[1e-3, 1 - 1e-3]],
default_parameters=[{"beta":awm.beta_default, "theta":awm.theta_default}],
management_parameters=[{"beta":awm.beta_DG, "theta":awm.theta_default}],
out_of_bounds=False,
xlabel=r"excess atmospheric carbon $A$ [GtC]",
ylabel=r"economic production $Y$ [trillion US\$]",
set_ticks=awm.set_ticks,
stepsize=0.055,
),
"aw-model-dg-bifurc":
generate_example([awm.AW_rescaled_rhs],
[awm.AW_rescaled_rhs],
awm.AW_rescaled_sunny,
[[1e-3, 1 - 1e-3],[1e-3, 1 - 1e-3]],
default_parameters=[{"beta":awm.beta_default, "theta":awm.theta_default}],
management_parameters=[{"beta":0.035, "theta":awm.theta_default}],
out_of_bounds=False,
compute_eddies=True,
xlabel=r"excess atmospheric carbon $A$ [GtC]",
ylabel=r"economic production $Y$ [trillion US\$]",
set_ticks=awm.set_ticks,
),
"aw-model-srm":
generate_example([awm.AW_rescaled_rhs],
[awm.AW_rescaled_rhs],
awm.AW_rescaled_sunny,
[[1e-8, 1 - 1e-8],[1e-8, 1 - 1e-8]],
default_parameters=[{"beta":awm.beta_default, "theta":awm.theta_default}],
management_parameters=[{"beta":awm.beta_default, "theta":awm.theta_SRM}],
out_of_bounds=False,
compute_eddies=True,
),
## The Pendulum example was taken out, because it is hamiltonian, making the whole algorithm getting unstable.
## This would be a future task to fix with an algorithm that does not simply linearly approximate.
# "pendulum":
# generate_example([gpm.pendulum_rhs],
# [gpm.pendulum_rhs],
# gpm.pendulum_sunny,
# [[0, 2*np.pi],[-2.2,1.2]],
# default_parameters=[{"a":0.0}],
# management_parameters=[{"a":0.6}],
# periodicity=[1, -1],
# compute_eddies=True,
# rescaling_epsilon=1e-3,
# ),
"swing-eq":
generate_example([sqm.swing_rhs],
[sqm.swing_rhs],
sqm.swing_sunny,
[[-0.5*np.pi, 1.5*np.pi],[-1, 1]],
default_parameters=[{"alpha":0.2, "P":0.3, "K":0.5}],
management_parameters=[{"alpha":0.2, "P":0.0, "K":0.5}],
periodicity=[1, -1],
compute_eddies=False,
rescaling_epsilon=1e-3,
out_of_bounds=False, # set because it creates a nice picture for these specific parameters
stepsize=0.035,
),
"plants":
generate_example([pm.plants_rhs],
[pm.plants_rhs]*2,
pm.plants_sunny,
[[0, 1],[0, 1]],
default_parameters=[{"ax":0.2, "ay":0.2, "prod":2}],
management_parameters=[{"ax":0.1, "ay":0.1, "prod":2}, {"ax":2, "ay":0, "prod":2}],
out_of_bounds=False,
stepsize=0.035,
),
## Taken out because it contains a critical point.
# "tech-change":
# generate_example([tcm.techChange_rhs],
# [tcm.techChange_rhs],
# tcm.techChange_sunny,
# [[0, 1], [0, 2]],
# default_parameters=[
# dict(rvar = 1, pBmin = 0.15, pE = 0.3, delta = 0.025, smax = 0.3, sBmax = 0.)],
# management_parameters=[
# dict(rvar = 1, pBmin = 0.15, pE = 0.3, delta = 0.025, smax = 0.3, sBmax = 0.5)],
# management_rhssPS = [tcm.techChange_rhsPS],
# ),
"easter-a":
generate_example([prm.easter_rhs],
[prm.easter_rhs],
ft.partial(prm.easter_sunny, xMinimal=1000, yMinimal=3000),
[[0, 35000],[0, 18000]],
default_parameters=[
dict(phi = 4, r = 0.04, gamma = 4 * 10 ** (-6), delta = -0.1, kappa = 12000)],
management_parameters=[
dict(phi = 4, r = 0.04, gamma = 2.8 * 10 ** (-6), delta = -0.1, kappa = 12000)],
out_of_bounds=[[False, True], [False, True]],
),
"easter-b":
generate_example([prm.easter_rhs],
[prm.easter_rhs],
ft.partial(prm.easter_sunny, xMinimal=1200, yMinimal=2000),
[[0, 9000], [0, 9000]],
default_parameters=[
dict(phi = 4, r = 0.04, gamma = 8 * 10 ** (-6), delta = -0.15, kappa = 6000)],
management_parameters=[
dict(phi = 4, r = 0.04, gamma = 13.6 * 10 ** (-6), delta = -0.15, kappa = 6000)],
),
"easter-c":
generate_example([prm.easter_rhs],
[prm.easter_rhs],
ft.partial(prm.easter_sunny, xMinimal=4000, yMinimal=3000),
[[0, 9000],[0, 9000]],
default_parameters=[
dict(phi = 4, r = 0.04, gamma = 8 * 10 ** (-6), delta = -0.15, kappa = 6000)],
management_parameters=[
dict(phi = 4, r = 0.04, gamma = 16 * 10 ** (-6), delta = -0.15, kappa = 6000)],
compute_eddies=True,
),
"easter-d":
generate_example([prm.easter_rhs],
[prm.easter_rhs],
ft.partial(prm.easter_sunny, xMinimal=4000, yMinimal=3000),
[[0, 9000], [0, 9000]],
default_parameters=[
dict(phi = 4, r = 0.04, gamma = 8 * 10 ** (-6), delta = -0.15, kappa = 6000)],
management_parameters=[
dict(phi = 4, r = 0.04, gamma = 11.2 * 10 ** (-6), delta = -0.15, kappa = 6000)],
compute_eddies=True,
),
"consum":
generate_example([],
[cm.consum_rhs]*2,
cm.consum_sunny,
[[0, 2], [0, 3]],
default_parameters = [],
management_parameters = [dict(u = -0.5),
dict(u = 0.5)],
management_rhssPS = [cm.consum_rhsPS]*2,
),
}
AVAILABLE_EXAMPLES = sorted(EXAMPLES)
assert not "all" in AVAILABLE_EXAMPLES
MODEL_CHOICES = ["all"] + AVAILABLE_EXAMPLES
GRID_CHOICES = ["orthogonal", "simplex-based"]
PLOT_CHOICES = ["points", "areas"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""A test script for the standard examples
If you would like to know more details on the actual meaning of the examples,
please contact the author. Generally, you can understand the dynamics of the
models by carefully analyzing the flows, that are plotted. The default flow
is shown with think lines in light blue, the management flows with thin, dark blue,
dotted (or dashed) lines.
"""
)
parser.add_argument("models", metavar="model", nargs="+",
choices=MODEL_CHOICES,
help="the model to be run or all; space separated list\n"
"allowed values are: " + ", ".join(MODEL_CHOICES))
parser.add_argument("-b", "--no-backscaling", action="store_false", dest="backscaling",
help="omit backscaling after the topology/viability computation")
parser.add_argument("-f", "--force", action="store_true",
help="overwrite existing files")
parser.add_argument("--flow-only", action="store_true",
help="plot only the models flow, nothing else")
parser.add_argument("--follow", nargs=2, metavar=("point", "dist"),
help="follow the points that are at most 'dist' away from 'point")
parser.add_argument("-g", "--grid", choices=GRID_CHOICES, default=GRID_CHOICES[0],
help="grid type")
parser.add_argument("--hidpi", action="store_true",
help="fix some things so everything looks okai on Hi-DPI screens")
parser.add_argument("-i", "--integrate", action="store_const", dest="run_type",
const="integration", default="linear",
help="integrate instead of using linear approximation")
parser.add_argument("--mark-fp", nargs=1, metavar="fp-approximation",
help="mark the fixed point of the dynamics which is close to 'fp-approximation'")
parser.add_argument("-n", "--num", type=int, default=80,
help="number of points in each dimension")
parser.add_argument("--no-title", dest="title", action="store_false",
help="remove the title from the plot")
parser.add_argument("--paper", action="store_true",
help="create a plot that has been used for the paper")
parser.add_argument("-p", "--plot", choices=PLOT_CHOICES, default=PLOT_CHOICES[0],
help="how to plot the results")
parser.add_argument("-r", "--remember", action="store_true",
help="remember already calculated values in a dict" \
" (might be slow for a large grids)")
parser.add_argument("-s", "--save", metavar="output-file", nargs="?", default="",
help="save the picture; if no 'output-file' is given, a name is generated")
parser.add_argument("--stop-when-finished", default=lv.TOPOLOGY_STEP_LIST[-1], metavar="computation-step",
choices=lv.TOPOLOGY_STEP_LIST,
help="stop when the computation of 'computation-step' is finished, choose from: " ", ".join(lv.TOPOLOGY_STEP_LIST) )
parser.add_argument("--no-numba", dest="use_numba", action="store_false",
help="do not use numba jit-compiling")
if with_argcomplete:
# use argcomplete auto-completion
argcomplete.autocomplete(parser)
ARGS = parser.parse_args()
if "all" in ARGS.models:
ARGS.models = AVAILABLE_EXAMPLES
if len(ARGS.models) > 1 and ARGS.save:
parser.error("computing multiple models but giving only one file name " \
"where the pictures should be save to doesn't make sense " \
"(to me at least)")
if ARGS.paper:
ARGS.hidpi = True
ARGS.title = False
ARGS.num = 200
mpl.rcParams["axes.labelsize"] = 36
mpl.rcParams["xtick.labelsize"] = 32
mpl.rcParams["ytick.labelsize"] = 32
ARGS.mark_fp = "[0.5,0.5]"
if ARGS.mark_fp is not None:
ARGS.mark_fp = np.array(eval(ARGS.mark_fp))
for model in ARGS.models:
save_to = ARGS.save
if save_to is None: # -s or --save was set, but no filename was given
save_to = "_".join([model, ARGS.grid, ARGS.plot]) + ".jpg"
print()
print("#"*80)
print("computing example: " + model)
print("#"*80)
EXAMPLES[model](model,
grid_type=ARGS.grid,
backscaling=ARGS.backscaling,
plotting=ARGS.plot,
run_type=ARGS.run_type,
save_to=save_to,
n0=ARGS.num,
hidpi=ARGS.hidpi,
use_numba=ARGS.use_numba,
stop_when_finished=ARGS.stop_when_finished,
flow_only=ARGS.flow_only,
)
plt.show()
| StarcoderdataPython |
1930319 | """
Process Management
==================
Ensure a process matching a given pattern is absent.
.. code-block:: yaml
httpd-absent:
process.absent:
- name: apache2
"""
def __virtual__():
if "ps.pkill" in __salt__:
return True
return (False, "ps module could not be loaded")
def absent(name, user=None, signal=None):
"""
Ensures that the named command is not running.
name
The pattern to match.
user
The user to which the process belongs
signal
Signal to send to the process(es).
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
running = __salt__["ps.pgrep"](name, user=user)
ret["result"] = None
if running:
ret["comment"] = "{} processes will be killed".format(len(running))
else:
ret["comment"] = "No matching processes running"
return ret
if signal:
status = __salt__["ps.pkill"](name, user=user, signal=signal, full=True)
else:
status = __salt__["ps.pkill"](name, user=user, full=True)
ret["result"] = True
if status:
ret["comment"] = "Killed {} processes".format(len(status["killed"]))
ret["changes"] = status
else:
ret["comment"] = "No matching processes running"
return ret
| StarcoderdataPython |
4920562 | <gh_stars>1-10
# linear regression in stanford university channel
import numpy as np
import tensorflow as tf
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
'''
Good ole linear regression: find the best linear fit to our data
'''
def generate_dataset():
# data is generated by y = 2x + e
# where 'e' is sampled from a normal distribution
x_batch = np.linspace(-1, 1, 101)
y_batch = 2 * x_batch + np.random.randn(*x_batch.shape) * 0.3
return x_batch, y_batch
def linear_regression():
x = tf.placeholder(tf.float32, shape=(None,), name='x')
y = tf.placeholder(tf.float32, shape=(None,), name='y')
with tf.variable_scope('lreg') as scope:
w = tf.Variable(np.random.normal(), name='W')
y_pred = tf.mul(w, x)
loss = tf.reduce_mean(tf.square(y_pred - y))
return x, y, y_pred, loss
def run():
x_batch, y_batch = generate_dataset()
x, y, y_pred, loss = linear_regression()
optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
feed_dict = {x: x_batch, y: y_batch}
for _ in range(30):
loss_val, _ = sess.run([loss, optimizer], feed_dict=feed_dict)
print('loss:', loss_val.mean())
y_pred_batch = sess.run(y_pred, {x: x_batch})
plt.figure(1)
plt.scatter(x_batch, y_batch)
plt.plot(x_batch, y_pred_batch)
plt.savefig('plot.png')
if __name__ == '__main__':
run()
| StarcoderdataPython |
8062890 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from DQM.SiPixelPhase1Common.HistogramManager_cfi import *
SiPixelPhase1HitsTofR = DefaultHisto.clone(
name = "tof_r",
title = "Time of flight vs r",
range_min = 0, range_max = 60, range_nbins = 2500,
range_y_min = 0.0, range_y_max = 100.0, range_y_nbins = 100,
xlabel = "r", ylabel = "Time of flight",
topFolderName = "PixelPhase1V/Hits",
dimensions = 2,
specs = VPSet(
Specification().groupBy("").save(),
)
)
SiPixelPhase1HitsEnergyLoss = DefaultHisto.clone(
name = "eloss",
title = "Energy loss",
range_min = 0, range_max = 0.001, range_nbins = 100,
xlabel = "Energy Loss",
dimensions = 1,
topFolderName = "PixelPhase1V/Hits",
specs = VPSet(
Specification().groupBy("PXBarrel/PXLayer").save(),
Specification().groupBy("PXForward/PXDisk").save(),
StandardSpecification2DProfile,
)
)
SiPixelPhase1HitsEntryExitX = DefaultHisto.clone(
name = "entry_exit_x",
title = "Entryx-Exitx",
range_min = -0.03, range_max = 0.03, range_nbins = 100,
xlabel = "",
dimensions = 1,
topFolderName = "PixelPhase1V/Hits",
specs = VPSet(
Specification().groupBy("PXBarrel/PXLayer").save(),
Specification().groupBy("PXForward/PXDisk").save(),
StandardSpecification2DProfile,
)
)
SiPixelPhase1HitsEntryExitY = SiPixelPhase1HitsEntryExitX.clone(
name = "entry_exit_y",
title = "Entryy-Exity",
xlabel = "",
range_min = -0.03, range_max = 0.03, range_nbins = 100,
)
SiPixelPhase1HitsEntryExitZ = SiPixelPhase1HitsEntryExitX.clone(
name = "entry_exit_z",
title = "Entryz-Exitz",
xlabel = "",
range_min = 0.0, range_max = 0.05, range_nbins = 100,
)
SiPixelPhase1HitsPosX = DefaultHisto.clone(
name = "local_x",
title = "X position of Hits",
range_min = -3.5, range_max = 3.5, range_nbins = 100,
xlabel = "Hit position X dimension",
dimensions = 1,
topFolderName = "PixelPhase1V/Hits",
specs = VPSet(
Specification().groupBy("PXBarrel/PXLayer").save(),
Specification().groupBy("PXForward/PXDisk").save(),
StandardSpecification2DProfile,
)
)
SiPixelPhase1HitsPosY = SiPixelPhase1HitsPosX.clone(
name = "local_y",
title = "Y position of Hits",
xlabel = "Hit position Y dimension",
range_min = -3.5, range_max = 3.5, range_nbins = 100,
)
SiPixelPhase1HitsPosZ = SiPixelPhase1HitsPosX.clone(
name = "local_z",
title = "Z position of Hits",
xlabel = "Hit position Z dimension",
range_min = -0.05, range_max = 0.05, range_nbins = 100,
)
SiPixelPhase1HitsPosPhi = SiPixelPhase1HitsPosX.clone(
name = "local_phi",
title = "Phi position of Hits",
xlabel = "Hit position phi dimension",
range_min = -3.5, range_max = 3.5, range_nbins = 100,
)
SiPixelPhase1HitsPosEta = SiPixelPhase1HitsPosX.clone(
name = "local_eta",
title = "Eta position of Hits",
xlabel = "Hit position Eta dimension",
range_min = -0.1, range_max = 0.1, range_nbins = 100,
)
SiPixelPhase1HitsEfficiencyTrack = DefaultHistoTrack.clone(
name = "trackefficiency",
title = "Track Efficiency (by hits)",
xlabel = "#valid/(#valid+#missing)",
dimensions = 1,
topFolderName = "PixelPhase1V/Hits",
specs = VPSet(
StandardSpecification2DProfile,
)
)
SiPixelPhase1HitsConf = cms.VPSet(
SiPixelPhase1HitsTofR,
SiPixelPhase1HitsEnergyLoss,
SiPixelPhase1HitsEntryExitX,
SiPixelPhase1HitsEntryExitY,
SiPixelPhase1HitsEntryExitZ,
SiPixelPhase1HitsPosX,
SiPixelPhase1HitsPosY,
SiPixelPhase1HitsPosZ,
SiPixelPhase1HitsPosPhi,
SiPixelPhase1HitsPosEta,
SiPixelPhase1HitsEfficiencyTrack,
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SiPixelPhase1HitsAnalyzerV = DQMEDAnalyzer('SiPixelPhase1HitsV',
pixBarrelLowSrc = cms.InputTag("g4SimHits","TrackerHitsPixelBarrelLowTof"),
pixBarrelHighSrc = cms.InputTag("g4SimHits","TrackerHitsPixelBarrelHighTof"),
pixForwardLowSrc = cms.InputTag("g4SimHits","TrackerHitsPixelEndcapLowTof"),
pixForwardHighSrc = cms.InputTag("g4SimHits","TrackerHitsPixelEndcapHighTof"),
# Hit Efficiency stuff
associateRecoTracks = cms.bool(True),
tracksTag = cms.InputTag("generalTracks"),
tpTag = cms.InputTag("mix","MergedTrackTruth"),
trackAssociatorByHitsTag = cms.InputTag("quickTrackAssociatorByHits"),
associateStrip = cms.bool(True),
associatePixel = cms.bool(True),
ROUList = cms.vstring('g4SimHitsTrackerHitsPixelBarrelLowTof',
'g4SimHitsTrackerHitsPixelBarrelHighTof',
'g4SimHitsTrackerHitsPixelEndcapLowTof',
'g4SimHitsTrackerHitsPixelEndcapHighTof'),
# Track assoc. parameters
histograms = SiPixelPhase1HitsConf,
geometry = SiPixelPhase1Geometry
)
from Configuration.ProcessModifiers.premix_stage2_cff import premix_stage2
premix_stage2.toModify(SiPixelPhase1HitsAnalyzerV, tpTag = "mixData:MergedTrackTruth")
SiPixelPhase1HitsHarvesterV = DQMEDHarvester("SiPixelPhase1Harvester",
histograms = SiPixelPhase1HitsConf,
geometry = SiPixelPhase1Geometry
)
| StarcoderdataPython |
6549235 | import crypto_trading_lib as cl
df = cl.load_data()
fees = cl.create_trade_fee_table(df)
cl.save_table(fees, "crypto_fees.csv")
buys = cl.create_crypto_buy_table(df)
cl.save_table(buys, "crypto_buy_table.csv")
sells = cl.create_crypto_sell_table(df)
cl.save_table(sells, "crypto_sell_table.csv") | StarcoderdataPython |
11267925 | <filename>python/wx/monitor_mq.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
import setproctitle
import time
import sys
import logging
from urllib import urlencode
reload(sys)
sys.setdefaultencoding('utf8')
mq_host="10.106.x.x"
mq_port=15672
mq_user="monitor"
mq_pass="<PASSWORD>"
queue_url="http://%s:%d/api/queues" % (mq_host, mq_port)
# 获取rabbitmq的所有队列信息
def get_queue_info(user, password, url):
try:
respones=requests.get(url=url, auth=(user, password))
#print respones.status_code
data=json.loads(respones.content.decode())
return data
except Exception as e:
print(e)
# 发送告警信息
def send_msg(data, robot_webhook_url):
#logging.info('进入send_msg')
#queue_names = []
queue_ready = {}
try:
for i in data:
queue_ready[i['name']] = i['messages_ready']
for name in queue_ready.keys():
if queue_ready[name] > 1000000:
#print("queue name is: {}, lag num is: {}".format(name, queue_ready[name]))
str_msg = '生产环境' + "项目队列: " + name + '消息积压超过100万,当前值为: ' + str(queue_ready[name]) + ', 请及时检查.'
values = {
"msgtype": 'text',
"text": {'content': str_msg},
}
msg = json.dumps(values)
headers={'Content-Type':'application/json'}
resp = requests.post(url=robot_webhook_url, data=msg, headers=headers)
logging.info(resp)
logging.info(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' 队列超过100万调用')
except Exception as e:
print(e)
#return 'ok'
def main():
#logging.info('进入main函数')
setproctitle.setproctitle('monitormq')
robot_webhook = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=<KEY>'
data = get_queue_info(mq_user, mq_pass, queue_url)
#print(data)
send_msg(data, robot_webhook)
if __name__ == '__main__':
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
logging.basicConfig(filename='/opt/monitor/monitor.log', level=logging.DEBUG, format=LOG_FORMAT, datefmt=DATE_FORMAT)
#pid = os.fork()
#if pid != 0:
# logging.info('退出父进程')
# sys.exit(0)
#else:
# #main()
# logging.info('进入主程序')
#time.sleep(300)
while True:
main()
time.sleep(300)
| StarcoderdataPython |
6447095 | import csv
from django.core.management import BaseCommand
from extlinks.links.models import URLPattern
from extlinks.organisations.models import Organisation, Collection
from extlinks.programs.models import Program
class Command(BaseCommand):
help = """
Imports Programs, Orgs, Collections, and URLPatterns from The Wikipedia
Library's old metrics collection system"""
def add_arguments(self, parser):
parser.add_argument('file_path', nargs='+', type=str)
def handle(self, *args, **options):
file_path = options['file_path'][0]
# Check TWL program exists, if it doesn't, create it.
try:
twl_program = Program.objects.get(name='The Wikipedia Library')
except Program.DoesNotExist:
twl_program = Program(
name='The Wikipedia Library'
)
twl_program.save()
with open(file_path, 'r') as input_file:
csv_reader = csv.reader(input_file)
next(csv_reader)
for row in csv_reader:
organisation = row[0]
collection = row[1]
urlpattern = row[2]
twl_link = row[3]
print(row)
# Create Organisation
try:
organisation_object = Organisation.objects.get(
name=organisation
)
except Organisation.DoesNotExist:
organisation_object = Organisation(
name=organisation
)
organisation_object.save()
if twl_link == 'x':
organisation_object.program.add(twl_program)
# Create Collection
try:
collection_object = Collection.objects.get(
organisation=organisation_object,
name=collection,
)
except Collection.DoesNotExist:
collection_object = Collection(
name=collection,
organisation=organisation_object
)
collection_object.save()
# Create URLPattern
# We shouldn't have any duplicates here but let's be safe.
try:
urlpattern_object = URLPattern.objects.get(
url=urlpattern
)
except URLPattern.DoesNotExist:
urlpattern_object = URLPattern(
url=urlpattern,
collection=collection_object
)
urlpattern_object.save()
| StarcoderdataPython |
3410178 | import numpy as np
from scipy import sparse
from . import auxiliary_function as ax
from . import comdet_functions as cd
from . import cp_functions as cp
from . import solver
class DirectedGraph:
def __init__(
self,
adjacency=None,
edgelist=None,
):
self.n_nodes = None
self.n_edges = None
self.adjacency = None
self.is_sparse = False
self.edgelist = None
self.degree_sequence_out = None
self.degree_sequence_in = None
self.strength_sequence_out = None
self.strength_sequence_in = None
self.nodes_dict = None
self.is_initialized = False
self.is_weighted = False
self._initialize_graph(
adjacency=adjacency,
edgelist=edgelist,
)
def _initialize_graph(
self,
adjacency=None,
edgelist=None,
):
if adjacency is not None:
if not isinstance(
adjacency, (list, np.ndarray)
) and not sparse.isspmatrix(adjacency):
raise TypeError(
"The adjacency matrix must be passed as a list or numpy"
" array or scipy sparse matrix."
)
if isinstance(
adjacency, list
):
self.adjacency = np.array(adjacency)
elif isinstance(
adjacency, np.ndarray
):
self.adjacency = adjacency
else:
self.adjacency = adjacency
self.is_sparse = True
elif edgelist is not None:
if not isinstance(edgelist, (list, np.ndarray)):
raise TypeError(
"The edgelist must be passed as a list or numpy array."
)
elif len(edgelist) > 0:
if len(edgelist[0]) == 2:
self.adjacency = ax.from_edgelist(edgelist,
self.is_sparse,
True)
self.edgelist = edgelist
elif len(edgelist[0]) == 3:
self.adjacency = ax.from_weighted_edgelist(edgelist,
self.is_sparse,
True)
self.edgelist = edgelist
else:
raise ValueError(
"This is not an edgelist. An edgelist must be a list"
" or array of couples of nodes with optional weights."
" Is this an adjacency matrix?"
)
else:
raise TypeError(
"UndirectedGraph is missing one positional argument"
" adjacency.")
ax.check_adjacency(self.adjacency, self.is_sparse, True)
if np.sum(self.adjacency) == np.sum(self.adjacency > 0):
self.degree_sequence_in, self.degree_sequence_out = ax.compute_degree(
self.adjacency,
True
)
self.degree_sequence_in = self.degree_sequence_in.astype(np.int64)
self.degree_sequence_out = self.degree_sequence_out.astype(
np.int64)
else:
self.degree_sequence_in, self.degree_sequence_out = ax.compute_degree(
self.adjacency,
True
)
self.degree_sequence_in = self.degree_sequence_in.astype(np.int64)
self.degree_sequence_out = self.degree_sequence_out.astype(
np.int64)
self.strength_sequence_in, self.strength_sequence_out = ax.compute_strength(
self.adjacency,
True
)
self.strength_sequence_in = self.strength_sequence_in.astype(
np.float64)
self.strength_sequence_out = self.strength_sequence_out.astype(
np.float64)
self.adjacency_weighted = self.adjacency
self.adjacency = (self.adjacency_weighted.astype(bool)).astype(
np.int16)
self.is_weighted = True
self.n_nodes = len(self.degree_sequence_out)
self.n_edges = int(np.sum(self.degree_sequence_out) / 2)
self.is_initialized = True
def set_adjacency_matrix(self, adjacency):
if self.is_initialized:
raise ValueError(
"Graph already contains edges or has a degree sequence."
" Use 'clean_edges()' first."
)
else:
self._initialize_graph(adjacency=adjacency)
def set_edgelist(self, edgelist):
if self.is_initialized:
raise ValueError(
"Graph already contains edges or has a degree sequence."
" Use 'clean_edges()' first."
)
else:
self._initialize_graph(edgelist=edgelist)
def clean_edges(self):
self.adjacency = None
self.edgelist = None
self.is_initialized = False
def run_enhanced_cp_detection(self,
initial_guess="random",
num_sim=2,
sorting_method="default",
print_output=False):
self._initialize_problem_cp(
initial_guess=initial_guess,
enhanced=True,
weighted=True,
sorting_method=sorting_method)
sol = solver.solver_cp(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=self.flipping_function,
print_output=print_output)
self._set_solved_problem(sol)
def run_discrete_cp_detection(self,
initial_guess="random",
weighted=None,
num_sim=2,
sorting_method="default",
print_output=False):
self._initialize_problem_cp(
initial_guess=initial_guess,
enhanced=False,
weighted=weighted,
sorting_method=sorting_method)
sol = solver.solver_cp(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=self.flipping_function,
print_output=print_output)
self._set_solved_problem(sol)
def _initialize_problem_cp(self,
initial_guess,
enhanced,
weighted,
sorting_method):
self._set_initial_guess_cp(initial_guess)
if weighted is None:
if self.is_weighted:
self.aux_adj = self.adjacency_weighted
self.method = "weighted"
else:
self.aux_adj = self.adjacency
self.method = "binary"
elif weighted:
if enhanced:
self.method = "enhanced"
else:
self.method = "weighted"
if hasattr(self, "adjacency_weighted"):
self.aux_adj = self.adjacency_weighted
cond2 = (self.aux_adj.astype(np.int64).sum() !=
self.aux_adj.sum())
if cond2:
raise ValueError("The selected method works for discrete "
"weights, but the initialised graph has "
"continuous weights.")
else:
raise TypeError(
"You choose weighted core peryphery detection but the"
" graph you initialised is binary.")
else:
self.aux_adj = self.adjacency
self.method = "binary"
if (sorting_method == "default") and self.is_weighted:
sorting_method = "random"
elif (sorting_method == "default") and (not self.is_weighted):
sorting_method = "jaccard"
sort_func = {
"random": lambda x: ax.shuffled_edges(x, True),
"degrees": None,
"strengths": None,
}
try:
self.sorting_function = sort_func[sorting_method]
except Exception:
raise ValueError(
"Sorting method can be 'random', 'degrees' or 'strengths'.")
surp_fun = {
"binary": lambda x, y: cp.calculate_surprise_logsum_cp_bin(
x,
y,
True),
"weighted": lambda x, y: cp.calculate_surprise_logsum_cp_weigh(
x,
y,
True),
"enhanced": lambda x, y: cp.calculate_surprise_logsum_cp_enhanced(
x,
y,
True),
}
try:
self.surprise_function = surp_fun[self.method]
except Exception:
raise ValueError("CP method can be 'binary' or 'weighted'.")
self.flipping_function = lambda x: cp.flipping_function_cp(x, 1)
self.partition_labeler = lambda x, y: cp.labeling_core_periphery(x, y)
def _set_initial_guess_cp(self, initial_guess):
# TODO: Sistemare parte pesata
if isinstance(initial_guess, str):
if initial_guess == "random":
self.init_guess = np.ones(self.n_nodes, dtype=np.int32)
aux_n = int(np.ceil((5 * self.n_nodes) / 100))
self.init_guess[:aux_n] = 0
np.random.shuffle(self.init_guess[:aux_n])
elif initial_guess == "ranked":
self.init_guess = np.ones(self.n_nodes, dtype=np.int32)
aux_n = int(np.ceil((5 * self.n_nodes) / 100))
if self.is_weighted:
self.init_guess[
self.strength_sequence_out.argsort()[-aux_n:]] = 0
else:
self.init_guess[
self.degree_sequence_out.argsort()[-aux_n:]] = 0
elif initial_guess == "eigenvector":
self.init_guess = ax.eigenvector_init_guess(self.adjacency,
False)
else:
raise ValueError("Valid values of initial guess are 'random', "
"eigenvector or a custom initial guess ("
"np.ndarray or list).")
elif isinstance(initial_guess, np.ndarray):
self.init_guess = initial_guess
elif isinstance(initial_guess, list):
self.init_guess = np.array(initial_guess)
if np.unique(self.init_guess).shape[0] != 2:
raise ValueError("The custom initial_guess passed is not valid."
" The initial guess for core-periphery detection"
" must have nodes' membership that are 0 or 1."
" Pay attention that at least one node has to "
"belong to the core (0) or the periphery (1).")
if self.init_guess.shape[0] != self.n_nodes:
raise ValueError(
"The length of the initial guess provided is different from"
" the network number of nodes.")
def run_continuous_community_detection(self,
method="aglomerative",
initial_guess="random",
approx=None,
num_sim=2,
num_clusters=None,
prob_mix=0.1,
sorting_method="default",
print_output=False
):
self._initialize_problem_cd(
method=method,
num_clusters=num_clusters,
initial_guess=initial_guess,
enhanced=False,
weighted=True,
continuous=True,
sorting_method=sorting_method)
if method == "aglomerative":
sol = solver.solver_com_det_aglom(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
prob_mix=prob_mix,
flipping_function=cd.flipping_function_comdet_agl_new,
approx=approx,
is_directed=True,
print_output=print_output)
elif method == "fixed-clusters":
sol = solver.solver_com_det_divis(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=cd.flipping_function_comdet_div_new,
approx=approx,
is_directed=True,
print_output=print_output)
else:
raise ValueError("Method can be 'aglomerative' or 'fixed-clusters'.")
self._set_solved_problem(sol)
def run_enhanced_community_detection(self,
method="aglomerative",
initial_guess="random",
num_sim=2,
num_clusters=None,
prob_mix=0.1,
sorting_method="default",
print_output=False
):
self._initialize_problem_cd(
method=method,
num_clusters=num_clusters,
initial_guess=initial_guess,
enhanced=True,
weighted=True,
continuous=False,
sorting_method=sorting_method)
if method == "aglomerative":
sol = solver.solver_com_det_aglom(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
prob_mix=prob_mix,
flipping_function=cd.flipping_function_comdet_agl_new,
approx=None,
is_directed=True,
print_output=print_output)
elif method == "fixed-clusters":
sol = solver.solver_com_det_divis(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=cd.flipping_function_comdet_div_new,
approx=None,
is_directed=True,
print_output=print_output)
else:
raise ValueError("Method can be 'aglomerative' or 'fixed-clusters'.")
self._set_solved_problem(sol)
def run_discrete_community_detection(self,
method="aglomerative",
initial_guess=None,
weighted=None,
num_sim=None,
num_clusters=2,
prob_mix=0.1,
sorting_method="default",
print_output=False):
self._initialize_problem_cd(
method=method,
num_clusters=num_clusters,
initial_guess=initial_guess,
enhanced=False,
weighted=weighted,
continuous=False,
sorting_method=sorting_method)
if method == "aglomerative":
sol = solver.solver_com_det_aglom(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
prob_mix=prob_mix,
flipping_function=cd.flipping_function_comdet_agl_new,
approx=None,
is_directed=True,
print_output=print_output)
elif method == "fixed-clusters":
sol = solver.solver_com_det_divis(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=cd.flipping_function_comdet_div_new,
approx=None,
is_directed=True,
print_output=print_output)
else:
raise ValueError("Method can be 'aglomerative' or 'fixed-clusters'.")
self._set_solved_problem(sol)
def _initialize_problem_cd(self,
method,
num_clusters,
initial_guess,
enhanced,
weighted,
continuous,
sorting_method):
self._set_initial_guess_cd(method, num_clusters, initial_guess)
if weighted is None:
if self.is_weighted:
self.aux_adj = self.adjacency_weighted
self.method = "weighted"
else:
self.aux_adj = self.adjacency
self.method = "binary"
elif weighted:
if enhanced:
self.method = "enhanced"
elif continuous:
self.method = "continuous"
else:
self.method = "weighted"
if hasattr(self, "adjacency_weighted"):
self.aux_adj = self.adjacency_weighted
cond1 = (self.method == "enhanced" or
self.method == "weighted")
cond2 = (self.aux_adj.astype(np.int64).sum() !=
self.aux_adj.sum())
if cond1 and cond2:
raise ValueError("The selected method works for discrete "
"weights, but the initialised graph has "
"continuous weights.")
else:
raise TypeError(
"You choose weighted community detection but the"
" graph you initialised is binary.")
else:
self.aux_adj = self.adjacency
self.method = "binary"
if (sorting_method == "default") and self.is_weighted:
sorting_method = "random"
elif (sorting_method == "default") and (not self.is_weighted):
sorting_method = "random"
sort_func = {
"random": lambda x: ax.shuffled_edges(x, True),
"strengths": None,
}
try:
self.sorting_function = sort_func[sorting_method]
except Exception:
raise ValueError(
"Sorting method can be 'random' or 'strengths'.")
surp_fun = {
"binary": cd.calculate_surprise_logsum_clust_bin_new,
"weighted": cd.calculate_surprise_logsum_clust_weigh_new,
"enhanced": cd.calculate_surprise_logsum_clust_enhanced_new,
"continuous": cd.calculate_surprise_logsum_clust_weigh_continuos,
}
self.surprise_function = surp_fun[self.method]
# self.flipping_function = lambda x: CD.flipping_function_comdet(x)
# self.flipping_function = cd.flipping_function_comdet_new
self.partition_labeler = lambda x: cd.labeling_communities(x)
def _set_initial_guess_cd(self,
method,
num_clusters,
initial_guess):
if num_clusters is None and method == "fixed-clusters":
raise ValueError("When 'fixed-clusters' is passed as clustering 'method'"
" the 'num_clusters' argument must be specified.")
if isinstance(initial_guess, str):
if initial_guess == "random":
if method == "aglomerative":
self.init_guess = np.array(
[k for k in np.arange(self.n_nodes, dtype=np.int32)])
elif method == "fixed-clusters":
self.init_guess = np.random.randint(
low=num_clusters,
size=self.n_nodes)
elif (initial_guess == "common-neigh-weak") or \
(initial_guess == "common-neighbours"):
if method == "aglomerative":
self.init_guess = ax.common_neigh_init_guess_weak(
self.adjacency)
elif method == "fixed-clusters":
self.init_guess = ax.fixed_clusters_init_guess_cn(
adjacency=self.adjacency,
n_clust=num_clusters)
elif initial_guess == "common-neigh-strong":
if method == "aglomerative":
self.init_guess = ax.common_neigh_init_guess_strong(
self.adjacency)
elif method == "fixed-clusters":
self.init_guess = ax.fixed_clusters_init_guess_cn(
adjacency=self.adjacency,
n_clust=num_clusters)
else:
raise ValueError(
"The 'initial_guess' selected is not a valid."
"Initial guess can be an array specifying nodes membership"
" or an initialisation method ['common-neighbours',"
" 'random', 'common-neigh-weak', 'common-neigh-strong']."
" For more details see documentation.")
elif isinstance(initial_guess, np.ndarray):
self.init_guess = initial_guess
elif isinstance(initial_guess, list):
self.init_guess = np.array(initial_guess)
if self.init_guess.shape[0] != self.n_nodes:
raise ValueError(
"The length of the initial guess provided is different"
" from the network number of nodes.")
if (method == "fixed-clusters" and
np.unique(self.init_guess).shape[0] != num_clusters):
raise ValueError("The number of clusters of a custom initial guess"
" must coincide with 'num_clusters' when the "
" fixed-clusters method is applied.")
def _set_solved_problem(self, sol):
self.solution = sol[0]
self.log_surprise = sol[1]
self.surprise = 10 ** (-self.log_surprise)
| StarcoderdataPython |
5019325 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: <NAME> (<EMAIL>)
import pickle
import traceback
from fabric_cf.actor.core.common.constants import Constants
from fabric_cf.actor.core.common.exceptions import DatabaseException
from fabric_cf.actor.core.core.unit import Unit
from fabric_cf.actor.core.plugins.db.server_actor_database import ServerActorDatabase
from fabric_cf.actor.core.apis.abc_substrate_database import ABCSubstrateDatabase
from fabric_cf.actor.core.util.id import ID
class SubstrateActorDatabase(ServerActorDatabase, ABCSubstrateDatabase):
def get_unit(self, *, uid: ID):
result = None
try:
unit_dict = self.db.get_unit(unt_uid=str(uid))
if unit_dict is not None:
pickled_unit = unit_dict.get(Constants.PROPERTY_PICKLE_PROPERTIES)
return pickle.loads(pickled_unit)
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(e)
return result
def add_unit(self, *, u: Unit):
try:
if u.get_resource_type() is None:
raise DatabaseException(Constants.INVALID_ARGUMENT)
self.lock.acquire()
if self.get_unit(uid=u.get_id()) is not None:
self.logger.info("unit {} is already present in database".format(u.get_id()))
return
slice_id = str(u.get_slice_id())
parent = self.get_unit(uid=u.get_parent_id())
parent_id = None
if parent is not None:
parent_id = parent['unt_id']
res_id = str(u.get_reservation_id())
properties = pickle.dumps(u)
self.db.add_unit(slc_guid=slice_id, rsv_resid=res_id,
unt_uid=str(u.get_id()), unt_unt_id=parent_id,
unt_state=u.get_state().value, properties=properties)
finally:
if self.lock.locked():
self.lock.release()
def get_units(self, *, rid: ID):
result = None
try:
self.lock.acquire()
result = []
unit_dict_list = self.db.get_units(rsv_resid=str(rid))
if unit_dict_list is not None:
for u in unit_dict_list:
pickled_unit = u.get(Constants.PROPERTY_PICKLE_PROPERTIES)
unit_obj = pickle.loads(pickled_unit)
result.append(unit_obj)
return result
except Exception as e:
self.logger.error(e)
finally:
if self.lock.locked():
self.lock.release()
return result
def remove_unit(self, *, uid: ID):
try:
self.lock.acquire()
self.db.remove_unit(unt_uid=str(uid))
finally:
if self.lock.locked():
self.lock.release()
def update_unit(self, *, u: Unit):
try:
self.lock.acquire()
properties = pickle.dumps(u)
self.db.update_unit(unt_uid=str(u.get_id()), properties=properties)
finally:
if self.lock.locked():
self.lock.release()
| StarcoderdataPython |
1905464 | <filename>examples/ThunderRemoteBasic.py
from examples.RemoteControlTest import RemoteControlTest
from thunder_remote.RemoteControl import RemoteControl
if __name__ == "__main__":
remote = RemoteControl(profile='default', start_sleeping=True, in_proc=False)
rct = RemoteControlTest(remote)
if remote.is_available:
RemoteControl.events.wake_up += rct.wake_up
RemoteControl.events.on_west += rct.sleep
RemoteControl.events.on_stick_left_y += rct.print_val
RemoteControl.events.on_stick_right_y += rct.print_val
remote.activate()
while remote.remote_online:
remote.listen()
if rct.manualControl:
remote.active_control()
| StarcoderdataPython |
6407161 | <reponame>cambel/ur3<gh_stars>10-100
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: <NAME>
from ur_control import transformations
from ur_control.arm import Arm
import argparse
import rospy
import timeit
import numpy as np
np.set_printoptions(suppress=True)
np.set_printoptions(linewidth=np.inf)
def move_joints(wait=True):
# desired joint configuration 'q'
q = [2.37191, -1.88688, -1.82035, 0.4766, 2.31206, 3.18758]
# go to desired joint configuration
# in t time (seconds)
# wait is for waiting to finish the motion before executing
# anything else or ignore and continue with whatever is next
arm.set_joint_positions(position=q, wait=wait, t=0.5)
def follow_trajectory():
traj = [
[2.4463, -1.8762, -1.6757, 0.3268, 2.2378, 3.1960],
[2.5501, -1.9786, -1.5293, 0.2887, 2.1344, 3.2062],
[2.5501, -1.9262, -1.3617, 0.0687, 2.1344, 3.2062],
[2.4463, -1.8162, -1.5093, 0.1004, 2.2378, 3.1960],
[2.3168, -1.7349, -1.6096, 0.1090, 2.3669, 3.1805],
[2.3168, -1.7997, -1.7772, 0.3415, 2.3669, 3.1805],
[2.3168, -1.9113, -1.8998, 0.5756, 2.3669, 3.1805],
[2.4463, -1.9799, -1.7954, 0.5502, 2.2378, 3.1960],
[2.5501, -2.0719, -1.6474, 0.5000, 2.1344, 3.2062],
]
for t in traj:
arm.set_joint_positions(position=t, wait=True, t=1.0)
def move_endeffector(wait=True):
# get current position of the end effector
cpose = arm.end_effector()
# define the desired translation/rotation
deltax = np.array([0., 0., 0.04, 0., 0., 0.])
# add translation/rotation to current position
cpose = transformations.pose_euler_to_quaternion(cpose, deltax, ee_rotation=True)
# execute desired new pose
# may fail if IK solution is not found
arm.set_target_pose(pose=cpose, wait=True, t=1.0)
def move_gripper():
# very different than simulation
from robotiq_urcap_control.msg import Robotiq2FGripper_robot_input as inputMsg
from robotiq_urcap_control.msg import Robotiq2FGripper_robot_output as outputMsg
from robotiq_urcap_control.robotiq_urcap_control import RobotiqGripper
print("Connecting to gripper")
robot_ip = rospy.get_param("/ur_hardware_interface/robot_ip")
gripper = RobotiqGripper(robot_ip=robot_ip)
# The Gripper status is published on the topic named 'Robotiq2FGripperRobotInput'
pub = rospy.Publisher('Robotiq2FGripperRobotInput', inputMsg, queue_size=1)
# The Gripper command is received from the topic named 'Robotiq2FGripperRobotOutput'
rospy.Subscriber('Robotiq2FGripperRobotOutput', outputMsg, gripper.send_command)
gripper.connect()
gripper.activate()
# gripper.move_and_wait_for_pos(position=100, speed=10, force=10)
# gripper.move_and_wait_for_pos(position=0, speed=10, force=10)
# gripper.move_and_wait_for_pos(position=255, speed=10, force=10)
gripper.disconnect()
def main():
""" Main function to be run. """
parser = argparse.ArgumentParser(description='Test force control')
parser.add_argument('-m', '--move', action='store_true',
help='move to joint configuration')
parser.add_argument('-t', '--move_traj', action='store_true',
help='move following a trajectory of joint configurations')
parser.add_argument('-e', '--move_ee', action='store_true',
help='move to a desired end-effector position')
parser.add_argument('-g', '--gripper', action='store_true',
help='Move gripper')
parser.add_argument('-r', '--rotation', action='store_true',
help='Rotation slerp')
parser.add_argument('--relative', action='store_true', help='relative to end-effector')
parser.add_argument('--rotation_pd', action='store_true', help='relative to end-effector')
args = parser.parse_args()
rospy.init_node('ur3e_script_control')
global arm
arm = Arm(
ft_sensor=True, # get Force/Torque data or not
gripper=True, # Enable gripper
)
real_start_time = timeit.default_timer()
ros_start_time = rospy.get_time()
if args.move:
move_joints()
if args.move_traj:
follow_trajectory()
if args.move_ee:
move_endeffector()
if args.gripper:
move_gripper()
print("real time", round(timeit.default_timer() - real_start_time, 3))
print("ros time", round(rospy.get_time() - ros_start_time, 3))
if __name__ == "__main__":
main()
| StarcoderdataPython |
8157867 | from flask_sqlalchemy import SQLAlchemy
# New database instance
db = SQLAlchemy() | StarcoderdataPython |
12841965 | from sklearn.datasets import load_svmlight_file
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn import metrics
param_grid = {'C':[0.001,0.01,0.1,1,10,100]}
X,y = load_svmlight_file('MachineLearning/disorder.libsvm.dat')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5, random_state = 0)
grid_search = GridSearchCV(SVC(kernel = 'linear'), param_grid, scoring= 'roc_auc')
grid_search.fit(X_train, y_train)
best_c = grid_search.best_params_
print(best_c)
print('best_auc: {}'.format(metrics.roc_auc_score(y_test, grid_search.decision_function(X_test)))) | StarcoderdataPython |
8126368 | <filename>pygvisuals/widgets/entry.py
# --- imports
# pygame imports
import pygame
# local imports
from .selection_text_widget import *
from ..designs import getDefaultDesign
from ..util import inherit_docstrings_from_superclass
class Entry(SelectionTextWidget):
"""
Entry-fields that accept keyboard-input.
"""
def __init__(self, x, y, width, height, text="", font=getDefaultDesign().font, editable=True, validation_function=(lambda *x: True), selection_overlay=getDefaultDesign().selection_overlay):
"""
Initialisation of an Entry.
Args:
x: An integer specifing the x-coordinate of the widget.
This is the horizontal distance from the left reference point.
y: An integer specifing the y-coordinate of the widget.
This is the vertical distance from the top reference point.
width: An integer specifing the width of the widget.
height: An integer specifing the height of the widget.
text: A string specifing the content of the widget.
The default value is an empty string.
font: A font-like object that can be interpreted by pygame.font as a Font;
this is used as the font for rendering text.
The default value is the global default for fonts.
editable: A boolean indicating whether the widget's content is editable by the user.
The default value is True, meaning it can be edited by user-input.
validation_function: A function that validates changed content.
It will receive three arguments (the new content, the old content and the widget-object)
and should return a boolean indicating whether the change is valid (True when valid).
The old content can be None if it was not set before; the new content can be anything that is being passed to setText().
The default value is a function that accepts every change.
selection_overlay: A color-like object that can be interpreted as a color by pygame (such as a tuple with RGB values);
this is used as an overlay for content that has been selected.
The default value is the global default for the selection-color.
"""
super(Entry, self).__init__(x, y, width, height, text, font, editable, validation_function, selection_overlay)
def update(self, *args):
"""
Additionally handles keyboard-input.
inherit_doc::
"""
if len(args) > 0 and self.isActive() and self.isFocused():
event = args[0]
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.moveCursor(-1)
elif event.key == pygame.K_RIGHT:
self.moveCursor(1)
elif self.isEditable():
if event.key not in (pygame.K_BACKSPACE, pygame.K_DELETE):
char = event.unicode
valid = False
if hasattr(char, "isprintable"):
valid = char.isprintable()
elif char == " " or not char.isspace():
valid = True
if char and valid:
s, c = self._sort(SELECTION, CURSOR)
if self.setText(self.text[:s] + char + self.text[c:], True):
self.setCursor(s + 1)
super(Entry, self).update(*args)
def _getAppearance(self, *args):
"""
Additionally renders the entry's text, cursor and selection.
inherit_doc::
"""
surface = super(Entry, self)._getAppearance(*args)
linesize = self.font.get_linesize()
surface.blit(self._render(self.text), (0, (self.bounds.height - linesize) / 2))
if self.isFocused():
cursor_pos = self._indexToPos(CURSOR)[0]
selection_pos = self._indexToPos(SELECTION)[0]
cursor = pygame.Surface((2, linesize))
cursor.fill(self.foreground)
surface.blit(cursor, (cursor_pos, (self.bounds.height - linesize) / 2))
selection = pygame.Surface((abs(cursor_pos - selection_pos), linesize), pygame.SRCALPHA, 32)
selection.fill(self.selection_overlay)
surface.blit(selection, (self._sort(cursor_pos, selection_pos, False)[0], (self.bounds.height - linesize) / 2))
return surface
# inherit docs from superclass
Entry = inherit_docstrings_from_superclass(Entry)
| StarcoderdataPython |
3546364 | <gh_stars>10-100
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from twilio import TwilioRestException
from mock import MagicMock
from mock import ANY
import mongomock
import app as flask_app
from totp_auth import TotpAuth
class TestFlaskApp(unittest.TestCase):
def setUp(self):
users = [{'uid': "user.app_no.sms_no",
'password_hash': (
<PASSWORD>"
"<PASSWORD>")
},
{'uid': "user.app_no.sms_yes",
'phone_number': "(415) 555-1212",
'totp_secret': "<KEY>",
'totp_enabled_via_sms': True,
'password_hash': (
<PASSWORD>"
"<PASSWORD>")
},
{'uid': "user.app_yes.sms_no",
'totp_secret': "VR<KEY>",
'totp_enabled_via_app': True,
'password_hash': (
<PASSWORD>"
"<PASSWORD>")
},
{'uid': "user.app_yes.sms_yes",
'totp_secret': "<KEY>",
'totp_enabled_via_app': True,
'phone_number': "(415) 555-1213",
'totp_enabled_via_sms': True,
'password_hash': (
<PASSWORD>5.nsOt"
"gDslFYjf34U2PH7JG6OeJacIFjx.e.")
},
{'uid': "user2",
'totp_secret': "<KEY>",
'password_hash': (
<PASSWORD>"
"<PASSWORD>")
},
{'uid': "user",
'totp_secret': "<KEY>",
'password_hash': (
<PASSWORD>"
"<PASSWORD>")
}]
connection = mongomock.Connection()
db = connection['tfa'].users
self.db = db
for user in users:
db.insert(user)
test_config = {'secret_key': 'testing',
'twilio_from_number': '+14155551212'}
flask_app.konf.use_dict(test_config)
flask_app.connection = connection
flask_app.twilio = MagicMock(name='mock_twilio')
create_sms_mock = MagicMock(name='mock_twilio.sms.messages.create')
def side_effect(*args, **kwargs):
"""Simulate errors on bad inputs"""
for num in ['Fake', '+14155551212']:
if kwargs['to'] == num:
raise TwilioRestException
create_sms_mock.side_effect = side_effect
flask_app.twilio.sms.messages.create = create_sms_mock
self.create_sms_mock = create_sms_mock
self.app = flask_app.app.test_client()
def tearDown(self):
pass
def test_has_default_route(self):
path = "/"
rv = self.app.get(path)
self.assertEquals("200 OK", rv.status)
self.assertIn("Don't have an account?", rv.data)
def test_main_page(self):
path = "/"
rv = self.app.get(path)
self.assertEquals("200 OK", rv.status)
# has "log in" link
self.assertIn("Log in", rv.data)
# has "sign up " link
self.assertIn("Sign up", rv.data)
# has text explaining example
text = ("This is a demonstration of how "
"to add TOTP based Two-Factor Authentication "
"to an existing application.")
self.assertIn(text, rv.data)
# has link to GitHub repo
# self.assertEquals('href="https://github.com/"', rv.data)
def login(self, username, password):
return self.app.post('/', data=dict(
username=username,
password=password
), follow_redirects=True)
def sign_up(self, username, password1, <PASSWORD>):
return self.app.post('/sign-up', data=dict(
username=username,
password1=<PASSWORD>,
password2=<PASSWORD>
), follow_redirects=True)
def logout(self):
return self.app.get('/logout', follow_redirects=True)
def test_sign_in(self):
# Form gives error if username or password is bad
rv = self.login('user', '<PASSWORD>')
self.assertIn("Incorrect Username or Password.", rv.data)
rv = self.login('baduser', 'password')
self.assertIn("Incorrect Username or Password.", rv.data)
rv = self.login('baduser', '<PASSWORD>')
self.assertIn("Incorrect Username or Password.", rv.data)
rv = self.login('user', 'password')
self.assertIn("You are logged in", rv.data)
self.logout()
def test_one_sign_in_with_tfa(self):
"""If TFA enabled, user redirected to a "Verify TFA" page"""
rv = self.login('user.app_no.sms_yes', 'password')
self.assertIn("Account Verification", rv.data)
self.logout()
def test_sign_up(self):
"""Person enters username and password on signup page"""
# Error prompt if passwords don't match
rv = self.sign_up('newuser', 'password', '<PASSWORD>')
self.assertIn("Passwords do not match", rv.data)
# Success
rv = self.sign_up('newuser', 'password', 'password')
self.assertIn("You are logged in", rv.data)
# Error prompt if username already exists
rv = self.sign_up('user', 'password', 'password')
self.assertIn("That username is already in use", rv.data)
def test_sign_up_case_insensitive(self):
rv = self.sign_up('CaseInsensitive', 'password', 'password')
self.assertIn("You are logged in", rv.data)
# Error prompt if username already exists
self.logout()
rv = self.login('caseinsensitive', 'password')
self.assertIn("You are logged in", rv.data)
def test_logged_in_no_tfa(self):
"""User presented with page 'you are logged in!'"""
# Page has "You are logged in!"
rv = self.login('user', 'password')
self.assertIn("You are logged in", rv.data)
# Page has "Enable Two-Factor Authentication" if TFA isn't enabled
self.assertIn("Enable app based authentication", rv.data)
# Page has "Enable SMS Authentication" if SMS auth isn't enabled
self.assertIn("Enable SMS based authentication", rv.data)
# Page has "log out" link
self.assertIn("Log out", rv.data)
def test_sign_in_tfa_permutations(self):
# app: no
# sms: no
rv = self.login('user.app_no.sms_no', 'password')
self.assertIn("You are logged in", rv.data)
self.logout()
# app: no
# sms: yes
rv = self.login('user.app_no.sms_yes', 'password')
self.assertNotIn("You are logged in", rv.data)
self.assertIn("Account Verification", rv.data)
self.assertNotIn("Google Authenticator", rv.data)
self.assertIn("SMS that was just sent to you", rv.data)
self.assertIn("Enter your verification code here", rv.data)
self.logout()
# app: yes
# sms: no
rv = self.login('user.app_yes.sms_no', 'password')
self.assertNotIn("You are logged in", rv.data)
self.assertIn("Account Verification", rv.data)
self.assertIn("Google Authenticator", rv.data)
self.assertNotIn("SMS that was just sent to you", rv.data)
self.assertIn("Enter your verification code here", rv.data)
self.logout()
# app: yes
# sms: yes
rv = self.login('user.app_yes.sms_yes', 'password')
self.assertNotIn("You are logged in", rv.data)
self.assertIn("Account Verification", rv.data)
self.assertIn("Google Authenticator", rv.data)
self.assertIn("SMS that was just sent to you", rv.data)
self.assertIn("Enter your verification code here", rv.data)
self.logout()
# def test_logged_in_permutations(self):
# # app: no
# # SMS: no
# rv = self.login('user.tfa_no.sms_no', 'password')
# self.assertIn("You are logged in", rv.data)
# self.assertIn("Enable Two-Factor Authentication", rv.data)
# self.assertIn("Enable SMS Authentication", rv.data)
# self.logout()
#
# # app: no
# # SMS: yes
# rv = self.login('user.tfa_no.sms_yes', 'password')
# self.assertIn("You are logged in", rv.data)
# self.assertIn("Enable Two-Factor Authentication", rv.data)
# self.assertIn("Disable SMS Authentication", rv.data)
# self.logout()
#
# # app: yes
# # SMS: no
# rv = self.login('user.tfa_yes.sms_no', 'password')
# self.assertIn("You are logged in", rv.data)
# self.assertIn("Disable Two-Factor Authentication", rv.data)
# self.assertIn("Enable SMS Authentication", rv.data)
# self.logout()
#
# # app: yes
# # SMS: yes
# rv = self.login('user.tfa_yes.sms_yes', 'password')
# self.assertIn("You are logged in", rv.data)
# self.assertIn("Disable Two-Factor Authentication", rv.data)
# self.assertIn("Disable SMS Authentication", rv.data)
# self.logout()
def test_enable_tfa_via_app(self):
self.login('user', 'password')
path = "/enable-tfa-via-app"
rv = self.app.get(path)
self.assertIn("200 OK", rv.status)
self.assertIn("Install Google Authenticator", rv.data)
self.assertIn("Open the Google Authenticator app", rv.data)
self.assertIn('Tap menu, then tap "Set up account"', rv.data)
self.assertIn('then tap "Scan a barcode"', rv.data)
self.assertIn("scan the barcode below", rv.data)
text = ("Once you have scanned the barcode, "
"enter the 6-digit code below")
self.assertIn(text, rv.data)
self.assertIn("Submit", rv.data)
self.assertIn("Cancel", rv.data)
def make_token(self, username):
user = self.db.find_one({'uid': username})
auth = TotpAuth(user['totp_secret'])
return auth.generate_token()
def test_enable_tfa_via_app_setup(self):
self.login('user', 'password')
token = self.make_token('user')
rv = self.app.post('/enable-tfa-via-app', data=dict(
token=token
), follow_redirects=True)
self.assertIn('You are set up', rv.data)
self.assertIn('via Google Authenticator', rv.data)
self.login('user2', 'password')
bad_token = str(int(token) + 1)
rv = self.app.post('/enable-tfa-via-app', data=dict(
token=bad_token
), follow_redirects=True)
self.assertIn('There was an error verifying your token', rv.data)
def enable_sms_auth(self, phone_number):
return self.app.post('/enable-tfa-via-sms', data=dict(
phone_number=phone_number
), follow_redirects=True)
def test_enable_sms_auth(self):
self.login('user', 'password')
rv = self.enable_sms_auth('+14155551212')
self.assertIn("200 OK", rv.status)
self.assertIn("Enter your mobile phone number", rv.data)
self.assertIn("A 6-digit verification code will be sent", rv.data)
self.assertIn("Enter your verification code", rv.data)
self.assertIn("Submit and verify", rv.data)
self.assertIn("Cancel", rv.data)
def test_enable_sms_auth_validation(self):
self.login('user', 'password')
# submit a bad phone number to the form
for num in ['Fake', '+14155551212']:
# make sure we get an error
rv = self.enable_sms_auth(num)
self.assertIn('There was an error sending', rv.data)
# submit a good phone number to the form
num = '+14158675309'
self.enable_sms_auth(num)
# make sure the SMS method mock was called
self.create_sms_mock.assert_called_with(to=num,
from_='+14155551212',
body=ANY)
# take the contents of the call to the SMS mock
called_with = self.create_sms_mock.call_args
body = called_with[1]['body']
# 'Use this code to log in: 123456'
token = body.split(': ')[1]
# submit the contents in the form
rv = self.app.post('/enable-tfa-via-sms', data=dict(
token=token
), follow_redirects=True)
# test for success
self.assertIn('You are set up', rv.data)
self.assertIn('via Twilio SMS', rv.data)
self.logout()
self.login('user2', 'password')
self.enable_sms_auth(num)
# send back a bad number (old token + 1)
bad_token = str(int(token) + 1)
rv = self.app.post('/enable-tfa-via-sms', data=dict(
token=bad_token
), follow_redirects=True)
self.assertIn('There was an error verifying your token', rv.data)
| StarcoderdataPython |
104661 | <filename>plugins/dns/client.py
"""
Client
Run by the evaluator, tries to make a GET request to a given server
"""
import argparse
import logging
import os
import random
import socket
import sys
import time
import traceback
import urllib.request
import dns.resolver
import requests
import actions.utils
from plugins.plugin_client import ClientPlugin
class DNSClient(ClientPlugin):
"""
Defines the DNS client.
"""
name = "dns"
def __init__(self, args):
"""
Initializes the DNS client.
"""
ClientPlugin.__init__(self)
self.args = args
@staticmethod
def get_args(command):
"""
Defines required args for this plugin
"""
super_args = ClientPlugin.get_args(command)
parser = argparse.ArgumentParser(description='DNS Client')
parser.add_argument('--use-tcp', action='store_true', help='leverage TCP for this plugin')
parser.add_argument('--dns-server', action='store', default="8.8.8.8", help='domain server to connect to')
parser.add_argument('--query', action='store', default="facebook.com", help='censored domain to query')
parser.add_argument('--timeout', action='store', default="3", type=int, help='how long in seconds the client should wait for a response')
parser.add_argument('--port', action='store', default="53", type=int, help='port the DNS server is running on (must be 53)')
args, _ = parser.parse_known_args(command)
args = vars(args)
super_args.update(args)
return super_args
def run(self, args, logger, engine=None):
"""
Try to make a forbidden DNS query.
"""
fitness = 0
to_lookup = args.get("query", "facebook.com")
dns_server = args.get("dns_server", "8.8.8.8")
use_tcp = args.get("use_tcp", False)
assert dns_server, "Cannot launch DNS test with no DNS server"
assert to_lookup, "Cannot launch DNS test with no server to query"
fitness = -1000
try:
fitness = self.dns_test(to_lookup, dns_server, args["output_directory"], args["environment_id"], logger, timeout=args.get("timeout", 3), use_tcp=use_tcp)
except Exception:
logger.exception("Exception caught in DNS test to resolver %s.", dns_server)
fitness += -100
# When performing a DNS test, a timeout is indistinguishable from
# a reset, which means we can't tell if the strategy broke the packet
# stream, or if the censor caught us. Strategies that break the stream
# should be punished more harshly, so raise the fitness slightly
# if the engine detected censorship for failed DNS tests.
if use_tcp and engine and engine.censorship_detected and fitness < 0:
fitness += 10
return fitness * 4
def dns_test(self, to_lookup, dns_server, output_dir, environment_id, logger, timeout=3, use_tcp=False):
"""
Makes a DNS query to a given censored domain.
"""
# Make the path an absolute path
if not output_dir.startswith("/"):
output_dir = os.path.join(actions.utils.PROJECT_ROOT, output_dir)
resolver = dns.resolver.Resolver()
protocol = "UDP"
if use_tcp:
protocol = "TCP"
logger.debug("Querying %s to DNS server %s over %s" % (to_lookup, dns_server, protocol))
resolver.nameservers = [dns_server]
# Setup the timeout and lifetime for this resolver
resolver.timeout = timeout
resolver.lifetime = 3
try:
answer = resolver.query(to_lookup, "A", tcp=use_tcp)[0]
logger.debug("Got IP address: %s" % answer)
# At this point, we've been given an IP address by the DNS resolver, but we don't
# yet know if this IP address is a bogus injected response, or legitimate. Further,
# because we are likely running this code from within a censored regime which might
# employ secondary censorship at the IP level, we cannot check if this IP is legit
# here. Instead, we write it out to a file for the evaluator to extract and check for us.
with open(os.path.join(output_dir, "flags", environment_id)+".dnsresult", "w") as dnsfile:
dnsfile.write(str(answer))
# For now, set fitness to a positive metric, though the evaluator will lower it if
# the IP address we were given was bogus.
fitness = 100
except dns.exception.Timeout:
logger.error("DNS query timed out.")
fitness = -100
except dns.resolver.NoNameservers:
logger.error("DNS server failed to respond")
fitness = -100
return fitness
| StarcoderdataPython |
8098105 | #cluster tica data into clusters
import pyemma.coordinates as coor
import numpy as np
sys = 'fdis'
tica_data = coor.load('tica_data_05/fdis_tica_data.h5')
n_clusters = 100
cl = coor.cluster_kmeans(tica_data, k=n_clusters, max_iter=50)
#cl.save(f'cluster_data/{sys}_{n_clusters}_mini_cluster_object.h5', overwrite=True)
cl.write_to_hdf5(f'cluster_data_11/{sys}_{n_clusters}_cluster_dtrajs22.h5')
| StarcoderdataPython |
6414120 | from typing import Text
from .base import PackageManager
from .requirements import SimpleSubstitution
class PriorityPackageRequirement(SimpleSubstitution):
name = ("cython", "numpy", "setuptools", )
optional_package_names = tuple()
def __init__(self, *args, **kwargs):
super(PriorityPackageRequirement, self).__init__(*args, **kwargs)
# check if we need to replace the packages:
priority_packages = self.config.get('agent.package_manager.priority_packages', None)
if priority_packages:
self.__class__.name = priority_packages
priority_optional_packages = self.config.get('agent.package_manager.priority_optional_packages', None)
if priority_optional_packages:
self.__class__.optional_package_names = priority_optional_packages
def match(self, req):
# match both Cython & cython
return req.name and (req.name.lower() in self.name or req.name.lower() in self.optional_package_names)
def replace(self, req):
"""
Replace a requirement
:raises: ValueError if version is pre-release
"""
if req.name in self.optional_package_names:
# noinspection PyBroadException
try:
if PackageManager.out_of_scope_install_package(str(req)):
return Text(req)
except Exception:
pass
return Text('')
PackageManager.out_of_scope_install_package(str(req))
return Text(req)
| StarcoderdataPython |
6564333 | <gh_stars>10-100
#!/usr/bin/env python
# Copyright 2017 <NAME> and <NAME>
'''Functions to run and interface with bwa'''
import subprocess
import os
import sys
from collections import namedtuple
BWA = namedtuple('BWA', ['mapped', 'positions'])
MAX_FASTMAP_HITS = 100
# Creates a bwa index, if it does not already exist
def bwa_index(fasta_file):
suffix_list = [".amb", ".ann", ".bwt", ".pac", ".sa"]
create_index = 0
for index_file in [fasta_file + suf for suf in suffix_list]:
if not os.path.isfile(index_file):
create_index = 1
break
if create_index:
command = "bwa index " + fasta_file
try:
subprocess.run(command, shell=True, check=True, stderr=subprocess.DEVNULL)
except AttributeError:
# python prior to 3.5
devnull = open(os.devnull, 'w')
subprocess.check_call(command, shell=True, stderr=devnull)
# Runs bwa, iterates over results and parses them
def bwa_iter(reference, fasta, algorithm):
if algorithm == "mem":
command = "bwa mem -v 1 -k 8 '" + reference + "' '" + fasta + "'"
elif algorithm == "fastmap":
command = "bwa fastmap -w %d -l 9 '" % (MAX_FASTMAP_HITS) + reference + "' '" + fasta + "'"
else:
sys.stderr.write("Unknown algorithm type for bwa\n")
raise ValueError(algorithm)
bwa_p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, universal_newlines=True)
prev_record = None
# read sam file from bwa mem
if algorithm == "mem":
for sam_line in bwa_p.stdout:
sam_fields = sam_line.rstrip().split("\t")
# discard header
if sam_fields[0][0] == "@":
continue
# ignore supplementary alignments
if int(sam_fields[1]) & 2048 == 2048:
continue
if sam_fields[0] == prev_record:
sys.stderr.write("WARNING: Found same k-mer line multiple times in SAM file\n")
continue
else:
prev_record = sam_fields[0]
positions = []
if int(sam_fields[1]) & 4 == 4:
mapped = False
else:
mapped = True
# primary mapping
if int(sam_fields[1]) & 16 == 16:
strand = "-"
else:
strand = "+"
if len(sam_fields) < 10:
mapped = False
positions = True
else:
positions.append((sam_fields[2], sam_fields[3], int(sam_fields[3]) + len(sam_fields[9]) - 1, strand))
# secondary mappings (as good as primary - same CIGAR string)
if len(sam_fields) > 15:
try:
secondary = sam_fields[15].split(":")
if secondary[0] == "XA" and secondary[1] == "Z":
for secondary_mapping in secondary[2].split(";"):
if secondary_mapping != '':
(contig, pos, cigar, edit_distance) = secondary_mapping.split(",")
if cigar == sam_fields[5]:
strand = pos[0]
positions.append((contig, pos[1:], int(pos[1:]) + len(sam_fields[9]) - 1, strand))
# Ignore secondary mappings which don't match the expected format
except ValueError:
pass
yield(BWA(mapped, positions))
# read bwa fastmap output
else:
mapped = False
positions = []
first_line = bwa_p.stdout.readline().rstrip().split("\t")
if first_line == ['']:
return
(sq, idx, length) = first_line
while True:
fastmap_line = bwa_p.stdout.readline()
fastmap_line = fastmap_line.rstrip()
if fastmap_line == "//":
next_line = bwa_p.stdout.readline().rstrip().split("\t")
fastmap_hit = BWA(mapped, positions)
if len(next_line) < 3: # EOF reached
yield(fastmap_hit)
return
else:
(sq, idx, length) = next_line
mapped = False
positions = []
yield(fastmap_hit)
else:
hits = []
fastmap_fields = fastmap_line.split("\t")
# in case a line is missing a few fields
if len(fastmap_fields) < 5:
continue
#
if fastmap_fields[1] == '0' and fastmap_fields[2] == length: # full hits only
mapped = True
for hit in fastmap_fields[4:]:
# too many hits, skip this entry
# (see: https://bioinformatics.stackexchange.com/a/13052/123)
if hit == '*':
sys.stderr.write("Skipping fastmap entry with more than %d hits\n" % MAX_FASTMAP_HITS)
# corner case: if still not mapped, flag this k-mer as unmapped
# as we don't have a small enough set of mapping positions
if not mapped:
mapped = False
#
continue
(contig, pos) = hit.split(":")
strand = pos[0]
positions.append((contig, int(pos[1:]), int(pos[1:]) + int(length) - 1, strand))
| StarcoderdataPython |
241419 | import dj_database_url
SECRET_KEY = 'django-pgpubsub'
# Install the tests as an app so that we can make test models
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'pgpubsub',
'pgpubsub.tests',
'pgtrigger',
]
# Database url comes from the DATABASE_URL env var
DATABASES = {'default': dj_database_url.config()}
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '0.0.0.0:8000']
| StarcoderdataPython |
3465059 | <gh_stars>10-100
# validated.py
from mongoframes import *
__all__ = [
'InvalidDocument',
'ValidatedFrame'
]
class FormData:
"""
A class that wraps a dictionary providing a request like object that can be
used as the `formdata` argument when initializing a `Form`.
"""
def __init__(self, data):
self._data = {}
for key, value in data.items():
if key not in self._data:
self._data[key] = []
if isinstance(value, list):
self._data[key] += value
else:
self._data[key].append(value)
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __contains__(self, name):
return (name in self._data)
def get(self, key, default=None):
if key in self._data:
return self._data[key][0]
return default
def getlist(self, key):
return self._data.get(key, [])
class InvalidDocument(Exception):
"""
An exception raised when `save` is called and the document fails validation.
"""
def __init__(self, errors):
super(InvalidDocument, self).__init__(str(errors))
self.errors = errors
class ValidatedFrame(Frame):
# The form attribute should be assigned a WTForm class
_form = None
def save(self, *fields):
"""Validate the document before inserting/updating it"""
# If no form is defined then validation is skipped
if not self._form:
return self.upsert(*fields)
# Build the form data
if not fields:
fields = self._fields
data = {f: self[f] for f in fields if f in self}
# Build the form to validate our data with
form = self._form(FormData(data))
# Reduce the form fields to match the data being saved
for field in form:
if field.name not in data:
delattr(form, field.name)
# Validate the document
if not form.validate():
raise InvalidDocument(form.errors)
# Document is valid, save the changes :)
self.upsert(*fields) | StarcoderdataPython |
9619720 | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
"""
This subpackage is used for reading and writing an :class:`AtomArray` or
:class:`AtomArrayStack` using the internal NPZ file format. This binary
format is used to store `NumPy` arrays. Since atom arrays and stacks are
completely built on `NumPy` arrays, this format is preferable for
Biotite internal usage due to fast I/O operations and preservation
of all atom annotation arrays.
"""
__name__ = "biotite.structure.io.npz"
__author__ = "<NAME>"
from .file import * | StarcoderdataPython |
6425477 | <reponame>mjsiers/practice-data-cpcapaper
import logging
import numpy as np
from scipy.stats import norm
logging.basicConfig(level="INFO", format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("data")
def baseline_generator(num, x, noise=0.00100):
# initialize the output array for the specified number of curves
bexponents = np.zeros(num)
baselines = np.zeros((num, x.shape[0]))
logger.debug('BCurves shape: [%s]', baselines.shape)
for i in range(num):
# generate random value for the exponent and compute baseline curve
bexponents[i] = np.random.uniform(2.1, 2.2)
logger.debug('Baseline Exponent: [%.4f]', bexponents[i])
blc = (-1e-7*x**bexponents[i])
bl = blc + np.min(blc)*-1.0
# determine if we need to add in some random noise
if noise > 0.0001:
xnum = x.shape[0]
bnoise = noise * np.random.normal(size=xnum)
bl = bl + bnoise
# save off the generated baseline curve into the output array
baselines[i] = bl
return bexponents, baselines
def signal_generator(x, cpeaks, noise=0.00075):
# create the required signal curves
S_1 = norm.pdf(x, loc=310.0, scale=40.0)
S_2 = norm.pdf(x, loc=390.0, scale=20.0)
S_true = np.vstack((S_1, S_2))
# initialize the output array for the specified number of curves
cnum = cpeaks.shape[0]
signals = np.zeros((cnum, x.shape[0]))
logger.debug('Signals shape: [%s]', signals.shape)
for i in range(cnum):
# generate the signals from the input concentration levels
s = np.dot(cpeaks[i], S_true)
if noise > 0.0001:
# generate the random noise
snoise = noise * np.random.normal(size=x.shape[0])
s = s + snoise
# save out the generated signal
signals[i] = s
return signals
def data_generator_signals(c, xnum=600, noise=0.0):
# setup the x-axis values
x = np.arange(0, xnum, 1.0)
# compute weight value for each signal peak from given concentration levels
cpeaks = np.vstack((c, (1.0-c))).T
signals = signal_generator(x, cpeaks, noise=noise)
return signals
def data_generator_levels(c, xnum=600, baselineonly=False):
# setup the x-axis values
x = np.arange(0, xnum, 1.0)
# compute weight value for each signal peak from given concentration levels
cpeaks = np.vstack((c, (1.0-c))).T
logger.debug('CLevels shape: [%s]', cpeaks.shape)
# generate the requested baselines and signals
bexps, baselines = baseline_generator(c.shape[0], x)
if baselineonly:
results = baselines
else:
signals = signal_generator(x, cpeaks)
results = baselines+signals
logger.debug('Results shape: [%s]', results.shape)
return x, c, bexps, results
def data_generator(cnum, xnum=600, baselineonly=False):
# generate some random concentration levels and compute weight value for each signal peak
c = np.random.random(cnum)
return data_generator_levels(c, xnum, baselineonly=baselineonly)
if __name__ == "__main__":
cvals = np.array([0.25, 0.50, 0.75])
xvals, targets, blexps, ydata = data_generator_levels(cvals)
xvals, targets, blexps, ydata = data_generator(5)
xvals, targets, blexps, ydata = data_generator(10)
| StarcoderdataPython |
6469941 | from django import forms
from django.utils.translation import ugettext as _
class WriteInForm(forms.Form):
write_in_form = forms.CharField(widget=forms.TextInput, required=False, max_length=200,label="Add a write in option")
class MultiVoteForm(forms.Form):
def __init__(self, *args, **kwargs):
dynamic_form = kwargs.pop('extra')
self.form_type = dynamic_form['type']
self.max_choices = dynamic_form['max_choices']
super(MultiVoteForm, self).__init__(*args, **kwargs)
if self.form_type == 'ranked':
choices = []
for i in range(1,len(dynamic_form['option_list'])+1):
choices.append((i,i))
choices.append(('',''))
for option in dynamic_form['option_list']:
print(option)
self.fields['choice_{}-{}'.format(option['type'], option['obj'].id)] = forms.ChoiceField(label=option['obj'].option_text, choices=choices, required=False)
if self.form_type == 'checkbox':
for option in dynamic_form['option_list']:
print(option)
self.fields['choice_{}-{}'.format(option['type'], option['obj'].id)] = forms.BooleanField(label=option['obj'].option_text, required=False)
if self.form_type == 'radio':
choices = []
for option in dynamic_form['option_list']:
#options types are 'normal' or 'write_in'
choices.append(('{}-{}'.format(option['type'], option['obj'].id),option['obj'].option_text,))
self.fields['choice'] = forms.ChoiceField( widget=forms.RadioSelect(), choices=choices, required=False)
def dynamic_answers(self):
for name, value in self.cleaned_data.items():
if name.startswith('choice_'):
yield (self.fields[name].label, value)
def clean(self):
cleaned_data = super(MultiVoteForm, self).clean()
if self.form_type == "ranked":
#make sure no duplicate ranks exist
no_dups = {}
for key, value in cleaned_data.items():
if value:
if value not in no_dups:
no_dups[value] = [key]
else:
#duplicate value, raise error
no_dups[value].append(key)
for value,key in no_dups.items():
if len(key) > 1:
for k in key:
self.add_error(k,"Two options cannot have the same rank.")
if self.form_type == "checkbox":
#make sure no more than max choices
print("hiiiiiii")
print(self.max_choices)
count_selected = 0
for item, value in cleaned_data.items():
if value:
count_selected += 1
print(count_selected)
if self.max_choices:
if count_selected > self.max_choices:
print("raising error")
raise forms.ValidationError(_("The maximum number of choices is {}.".format(self.max_choices)), code="MORE_THAN")
| StarcoderdataPython |
4831826 | from sanic.response import json
def invoke():
return json({"status": "online"}) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.