hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790843272d4c56a5b15a6c0691204d0cfaecea76
| 1,459
|
py
|
Python
|
Configuration/GenProduction/python/ThirteenTeV/GMSB_noSLHA/GMSB_L350TeV_Ctau400cm_Pythia8_13TeV_cff.py
|
zhangzc11/cms-gmsb-sps8-configs
|
838e6aac1d13251e050c0ee8c4ed26ca0c6cef7e
|
[
"Apache-2.0"
] | null | null | null |
Configuration/GenProduction/python/ThirteenTeV/GMSB_noSLHA/GMSB_L350TeV_Ctau400cm_Pythia8_13TeV_cff.py
|
zhangzc11/cms-gmsb-sps8-configs
|
838e6aac1d13251e050c0ee8c4ed26ca0c6cef7e
|
[
"Apache-2.0"
] | null | null | null |
Configuration/GenProduction/python/ThirteenTeV/GMSB_noSLHA/GMSB_L350TeV_Ctau400cm_Pythia8_13TeV_cff.py
|
zhangzc11/cms-gmsb-sps8-configs
|
838e6aac1d13251e050c0ee8c4ed26ca0c6cef7e
|
[
"Apache-2.0"
] | null | null | null |
with open("/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/GMSB_SHLA/GMSB_Lambda350TeV_CTau400cm.slha") as f:
SLHA_TABLE = f.read()
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8PSweightsSettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'SUSY:all on',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| 42.911765
| 138
| 0.632625
|
with open("/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/GMSB_SHLA/GMSB_Lambda350TeV_CTau400cm.slha") as f:
SLHA_TABLE = f.read()
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8PSweightsSettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'SUSY:all on',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| true
| true
|
7908440e92b1d4474e49dd13dab36db956ff6086
| 16,282
|
py
|
Python
|
homeassistant/helpers/entity_platform.py
|
leloberg/Home-Assistant
|
5800b57791ac7767d19a50dcd07ea40353d925fc
|
[
"Apache-2.0"
] | 1
|
2019-05-29T15:43:01.000Z
|
2019-05-29T15:43:01.000Z
|
homeassistant/helpers/entity_platform.py
|
leloberg/Home-Assistant
|
5800b57791ac7767d19a50dcd07ea40353d925fc
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/helpers/entity_platform.py
|
leloberg/Home-Assistant
|
5800b57791ac7767d19a50dcd07ea40353d925fc
|
[
"Apache-2.0"
] | null | null | null |
"""Class to manage the entities for a single platform."""
import asyncio
from homeassistant.const import DEVICE_DEFAULT_NAME
from homeassistant.core import callback, valid_entity_id, split_entity_id
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
from .event import async_track_time_interval, async_call_later
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
PLATFORM_NOT_READY_RETRIES = 10
class EntityPlatform:
"""Manage the entities for a single platform."""
def __init__(self, *, hass, logger, domain, platform_name, platform,
scan_interval, entity_namespace,
async_entities_added_callback):
"""Initialize the entity platform.
hass: HomeAssistant
logger: Logger
domain: str
platform_name: str
scan_interval: timedelta
entity_namespace: str
async_entities_added_callback: @callback method
"""
self.hass = hass
self.logger = logger
self.domain = domain
self.platform_name = platform_name
self.platform = platform
self.scan_interval = scan_interval
self.entity_namespace = entity_namespace
self.async_entities_added_callback = async_entities_added_callback
self.config_entry = None
self.entities = {}
self._tasks = []
# Method to cancel the state change listener
self._async_unsub_polling = None
# Method to cancel the retry of setup
self._async_cancel_retry_setup = None
self._process_updates = asyncio.Lock()
# Platform is None for the EntityComponent "catch-all" EntityPlatform
# which powers entity_component.add_entities
if platform is None:
self.parallel_updates = None
self.parallel_updates_semaphore = None
return
self.parallel_updates = getattr(platform, 'PARALLEL_UPDATES', None)
# semaphore will be created on demand
self.parallel_updates_semaphore = None
def _get_parallel_updates_semaphore(self):
"""Get or create a semaphore for parallel updates."""
if self.parallel_updates_semaphore is None:
self.parallel_updates_semaphore = asyncio.Semaphore(
self.parallel_updates if self.parallel_updates else 1,
loop=self.hass.loop
)
return self.parallel_updates_semaphore
async def async_setup(self, platform_config, discovery_info=None):
"""Set up the platform from a config file."""
platform = self.platform
hass = self.hass
@callback
def async_create_setup_task():
"""Get task to set up platform."""
if getattr(platform, 'async_setup_platform', None):
return platform.async_setup_platform(
hass, platform_config,
self._async_schedule_add_entities, discovery_info
)
# This should not be replaced with hass.async_add_job because
# we don't want to track this task in case it blocks startup.
return hass.loop.run_in_executor(
None, platform.setup_platform, hass, platform_config,
self._schedule_add_entities, discovery_info
)
await self._async_setup_platform(async_create_setup_task)
async def async_setup_entry(self, config_entry):
"""Set up the platform from a config entry."""
# Store it so that we can save config entry ID in entity registry
self.config_entry = config_entry
platform = self.platform
@callback
def async_create_setup_task():
"""Get task to set up platform."""
return platform.async_setup_entry(
self.hass, config_entry, self._async_schedule_add_entities)
return await self._async_setup_platform(async_create_setup_task)
async def _async_setup_platform(self, async_create_setup_task, tries=0):
"""Set up a platform via config file or config entry.
async_create_setup_task creates a coroutine that sets up platform.
"""
logger = self.logger
hass = self.hass
full_name = '{}.{}'.format(self.domain, self.platform_name)
logger.info("Setting up %s", full_name)
warn_task = hass.loop.call_later(
SLOW_SETUP_WARNING, logger.warning,
"Setup of platform %s is taking over %s seconds.",
self.platform_name, SLOW_SETUP_WARNING)
try:
task = async_create_setup_task()
await asyncio.wait_for(
asyncio.shield(task),
SLOW_SETUP_MAX_WAIT)
# Block till all entities are done
if self._tasks:
pending = [task for task in self._tasks if not task.done()]
self._tasks.clear()
if pending:
await asyncio.wait(
pending)
hass.config.components.add(full_name)
return True
except PlatformNotReady:
tries += 1
wait_time = min(tries, 6) * 30
logger.warning(
'Platform %s not ready yet. Retrying in %d seconds.',
self.platform_name, wait_time)
async def setup_again(now):
"""Run setup again."""
self._async_cancel_retry_setup = None
await self._async_setup_platform(
async_create_setup_task, tries)
self._async_cancel_retry_setup = \
async_call_later(hass, wait_time, setup_again)
return False
except asyncio.TimeoutError:
logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
self.platform_name, SLOW_SETUP_MAX_WAIT)
return False
except Exception: # pylint: disable=broad-except
logger.exception(
"Error while setting up platform %s", self.platform_name)
return False
finally:
warn_task.cancel()
def _schedule_add_entities(self, new_entities, update_before_add=False):
"""Schedule adding entities for a single platform, synchronously."""
run_callback_threadsafe(
self.hass.loop,
self._async_schedule_add_entities, list(new_entities),
update_before_add
).result()
@callback
def _async_schedule_add_entities(self, new_entities,
update_before_add=False):
"""Schedule adding entities for a single platform async."""
self._tasks.append(self.hass.async_add_job(
self.async_add_entities(
new_entities, update_before_add=update_before_add)
))
def add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform."""
# That avoid deadlocks
if update_before_add:
self.logger.warning(
"Call 'add_entities' with update_before_add=True "
"only inside tests or you can run into a deadlock!")
run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), update_before_add),
self.hass.loop).result()
async def async_add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform async.
This method must be run in the event loop.
"""
# handle empty list from component/platform
if not new_entities:
return
hass = self.hass
device_registry = await \
hass.helpers.device_registry.async_get_registry()
entity_registry = await \
hass.helpers.entity_registry.async_get_registry()
tasks = [
self._async_add_entity(entity, update_before_add,
entity_registry, device_registry)
for entity in new_entities]
# No entities for processing
if not tasks:
return
await asyncio.wait(tasks)
self.async_entities_added_callback()
if self._async_unsub_polling is not None or \
not any(entity.should_poll for entity
in self.entities.values()):
return
self._async_unsub_polling = async_track_time_interval(
self.hass, self._update_entity_states, self.scan_interval
)
async def _async_add_entity(self, entity, update_before_add,
entity_registry, device_registry):
"""Add an entity to the platform."""
if entity is None:
raise ValueError('Entity cannot be None')
entity.hass = self.hass
entity.platform = self
# Async entity
# PARALLEL_UPDATE == None: entity.parallel_updates = None
# PARALLEL_UPDATE == 0: entity.parallel_updates = None
# PARALLEL_UPDATE > 0: entity.parallel_updates = Semaphore(p)
# Sync entity
# PARALLEL_UPDATE == None: entity.parallel_updates = Semaphore(1)
# PARALLEL_UPDATE == 0: entity.parallel_updates = None
# PARALLEL_UPDATE > 0: entity.parallel_updates = Semaphore(p)
if hasattr(entity, 'async_update') and not self.parallel_updates:
entity.parallel_updates = None
elif (not hasattr(entity, 'async_update')
and self.parallel_updates == 0):
entity.parallel_updates = None
else:
entity.parallel_updates = self._get_parallel_updates_semaphore()
# Update properties before we generate the entity_id
if update_before_add:
try:
await entity.async_device_update(warning=False)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"%s: Error on device update!", self.platform_name)
return
suggested_object_id = None
# Get entity_id from unique ID registration
if entity.unique_id is not None:
if entity.entity_id is not None:
suggested_object_id = split_entity_id(entity.entity_id)[1]
else:
suggested_object_id = entity.name
if self.entity_namespace is not None:
suggested_object_id = '{} {}'.format(
self.entity_namespace, suggested_object_id)
if self.config_entry is not None:
config_entry_id = self.config_entry.entry_id
else:
config_entry_id = None
device_info = entity.device_info
device_id = None
if config_entry_id is not None and device_info is not None:
processed_dev_info = {
'config_entry_id': config_entry_id
}
for key in (
'connections',
'identifiers',
'manufacturer',
'model',
'name',
'sw_version',
'via_hub',
):
if key in device_info:
processed_dev_info[key] = device_info[key]
device = device_registry.async_get_or_create(
**processed_dev_info)
if device:
device_id = device.id
entry = entity_registry.async_get_or_create(
self.domain, self.platform_name, entity.unique_id,
suggested_object_id=suggested_object_id,
config_entry_id=config_entry_id,
device_id=device_id,
known_object_ids=self.entities.keys())
if entry.disabled:
self.logger.info(
"Not adding entity %s because it's disabled",
entry.name or entity.name or
'"{} {}"'.format(self.platform_name, entity.unique_id))
return
entity.entity_id = entry.entity_id
entity.registry_name = entry.name
entity.async_on_remove(entry.add_update_listener(entity))
# We won't generate an entity ID if the platform has already set one
# We will however make sure that platform cannot pick a registered ID
elif (entity.entity_id is not None and
entity_registry.async_is_registered(entity.entity_id)):
# If entity already registered, convert entity id to suggestion
suggested_object_id = split_entity_id(entity.entity_id)[1]
entity.entity_id = None
# Generate entity ID
if entity.entity_id is None:
suggested_object_id = \
suggested_object_id or entity.name or DEVICE_DEFAULT_NAME
if self.entity_namespace is not None:
suggested_object_id = '{} {}'.format(self.entity_namespace,
suggested_object_id)
entity.entity_id = entity_registry.async_generate_entity_id(
self.domain, suggested_object_id, self.entities.keys())
# Make sure it is valid in case an entity set the value themselves
if not valid_entity_id(entity.entity_id):
raise HomeAssistantError(
'Invalid entity id: {}'.format(entity.entity_id))
if (entity.entity_id in self.entities or
entity.entity_id in self.hass.states.async_entity_ids(
self.domain)):
msg = 'Entity id already exists: {}'.format(entity.entity_id)
if entity.unique_id is not None:
msg += '. Platform {} does not generate unique IDs'.format(
self.platform_name)
raise HomeAssistantError(msg)
entity_id = entity.entity_id
self.entities[entity_id] = entity
entity.async_on_remove(lambda: self.entities.pop(entity_id))
await entity.async_added_to_hass()
await entity.async_update_ha_state()
async def async_reset(self):
"""Remove all entities and reset data.
This method must be run in the event loop.
"""
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
if not self.entities:
return
tasks = [self.async_remove_entity(entity_id)
for entity_id in self.entities]
await asyncio.wait(tasks)
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
async def async_remove_entity(self, entity_id):
"""Remove entity id from platform."""
await self.entities[entity_id].async_remove()
# Clean up polling job if no longer needed
if (self._async_unsub_polling is not None and
not any(entity.should_poll for entity
in self.entities.values())):
self._async_unsub_polling()
self._async_unsub_polling = None
async def _update_entity_states(self, now):
"""Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop.
"""
if self._process_updates.locked():
self.logger.warning(
"Updating %s %s took longer than the scheduled update "
"interval %s", self.platform_name, self.domain,
self.scan_interval)
return
async with self._process_updates:
tasks = []
for entity in self.entities.values():
if not entity.should_poll:
continue
tasks.append(entity.async_update_ha_state(True))
if tasks:
await asyncio.wait(tasks)
| 38.491726
| 78
| 0.605577
|
import asyncio
from homeassistant.const import DEVICE_DEFAULT_NAME
from homeassistant.core import callback, valid_entity_id, split_entity_id
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
from .event import async_track_time_interval, async_call_later
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
PLATFORM_NOT_READY_RETRIES = 10
class EntityPlatform:
def __init__(self, *, hass, logger, domain, platform_name, platform,
scan_interval, entity_namespace,
async_entities_added_callback):
self.hass = hass
self.logger = logger
self.domain = domain
self.platform_name = platform_name
self.platform = platform
self.scan_interval = scan_interval
self.entity_namespace = entity_namespace
self.async_entities_added_callback = async_entities_added_callback
self.config_entry = None
self.entities = {}
self._tasks = []
self._async_unsub_polling = None
self._async_cancel_retry_setup = None
self._process_updates = asyncio.Lock()
if platform is None:
self.parallel_updates = None
self.parallel_updates_semaphore = None
return
self.parallel_updates = getattr(platform, 'PARALLEL_UPDATES', None)
self.parallel_updates_semaphore = None
def _get_parallel_updates_semaphore(self):
if self.parallel_updates_semaphore is None:
self.parallel_updates_semaphore = asyncio.Semaphore(
self.parallel_updates if self.parallel_updates else 1,
loop=self.hass.loop
)
return self.parallel_updates_semaphore
async def async_setup(self, platform_config, discovery_info=None):
platform = self.platform
hass = self.hass
@callback
def async_create_setup_task():
if getattr(platform, 'async_setup_platform', None):
return platform.async_setup_platform(
hass, platform_config,
self._async_schedule_add_entities, discovery_info
)
return hass.loop.run_in_executor(
None, platform.setup_platform, hass, platform_config,
self._schedule_add_entities, discovery_info
)
await self._async_setup_platform(async_create_setup_task)
async def async_setup_entry(self, config_entry):
# Store it so that we can save config entry ID in entity registry
self.config_entry = config_entry
platform = self.platform
@callback
def async_create_setup_task():
return platform.async_setup_entry(
self.hass, config_entry, self._async_schedule_add_entities)
return await self._async_setup_platform(async_create_setup_task)
async def _async_setup_platform(self, async_create_setup_task, tries=0):
logger = self.logger
hass = self.hass
full_name = '{}.{}'.format(self.domain, self.platform_name)
logger.info("Setting up %s", full_name)
warn_task = hass.loop.call_later(
SLOW_SETUP_WARNING, logger.warning,
"Setup of platform %s is taking over %s seconds.",
self.platform_name, SLOW_SETUP_WARNING)
try:
task = async_create_setup_task()
await asyncio.wait_for(
asyncio.shield(task),
SLOW_SETUP_MAX_WAIT)
# Block till all entities are done
if self._tasks:
pending = [task for task in self._tasks if not task.done()]
self._tasks.clear()
if pending:
await asyncio.wait(
pending)
hass.config.components.add(full_name)
return True
except PlatformNotReady:
tries += 1
wait_time = min(tries, 6) * 30
logger.warning(
'Platform %s not ready yet. Retrying in %d seconds.',
self.platform_name, wait_time)
async def setup_again(now):
"""Run setup again."""
self._async_cancel_retry_setup = None
await self._async_setup_platform(
async_create_setup_task, tries)
self._async_cancel_retry_setup = \
async_call_later(hass, wait_time, setup_again)
return False
except asyncio.TimeoutError:
logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
self.platform_name, SLOW_SETUP_MAX_WAIT)
return False
except Exception: # pylint: disable=broad-except
logger.exception(
"Error while setting up platform %s", self.platform_name)
return False
finally:
warn_task.cancel()
def _schedule_add_entities(self, new_entities, update_before_add=False):
run_callback_threadsafe(
self.hass.loop,
self._async_schedule_add_entities, list(new_entities),
update_before_add
).result()
@callback
def _async_schedule_add_entities(self, new_entities,
update_before_add=False):
self._tasks.append(self.hass.async_add_job(
self.async_add_entities(
new_entities, update_before_add=update_before_add)
))
def add_entities(self, new_entities, update_before_add=False):
# That avoid deadlocks
if update_before_add:
self.logger.warning(
"Call 'add_entities' with update_before_add=True "
"only inside tests or you can run into a deadlock!")
run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), update_before_add),
self.hass.loop).result()
async def async_add_entities(self, new_entities, update_before_add=False):
# handle empty list from component/platform
if not new_entities:
return
hass = self.hass
device_registry = await \
hass.helpers.device_registry.async_get_registry()
entity_registry = await \
hass.helpers.entity_registry.async_get_registry()
tasks = [
self._async_add_entity(entity, update_before_add,
entity_registry, device_registry)
for entity in new_entities]
# No entities for processing
if not tasks:
return
await asyncio.wait(tasks)
self.async_entities_added_callback()
if self._async_unsub_polling is not None or \
not any(entity.should_poll for entity
in self.entities.values()):
return
self._async_unsub_polling = async_track_time_interval(
self.hass, self._update_entity_states, self.scan_interval
)
async def _async_add_entity(self, entity, update_before_add,
entity_registry, device_registry):
if entity is None:
raise ValueError('Entity cannot be None')
entity.hass = self.hass
entity.platform = self
# Async entity
# PARALLEL_UPDATE == None: entity.parallel_updates = None
# PARALLEL_UPDATE == 0: entity.parallel_updates = None
# PARALLEL_UPDATE > 0: entity.parallel_updates = Semaphore(p)
# Sync entity
# PARALLEL_UPDATE == None: entity.parallel_updates = Semaphore(1)
# PARALLEL_UPDATE == 0: entity.parallel_updates = None
# PARALLEL_UPDATE > 0: entity.parallel_updates = Semaphore(p)
if hasattr(entity, 'async_update') and not self.parallel_updates:
entity.parallel_updates = None
elif (not hasattr(entity, 'async_update')
and self.parallel_updates == 0):
entity.parallel_updates = None
else:
entity.parallel_updates = self._get_parallel_updates_semaphore()
# Update properties before we generate the entity_id
if update_before_add:
try:
await entity.async_device_update(warning=False)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"%s: Error on device update!", self.platform_name)
return
suggested_object_id = None
# Get entity_id from unique ID registration
if entity.unique_id is not None:
if entity.entity_id is not None:
suggested_object_id = split_entity_id(entity.entity_id)[1]
else:
suggested_object_id = entity.name
if self.entity_namespace is not None:
suggested_object_id = '{} {}'.format(
self.entity_namespace, suggested_object_id)
if self.config_entry is not None:
config_entry_id = self.config_entry.entry_id
else:
config_entry_id = None
device_info = entity.device_info
device_id = None
if config_entry_id is not None and device_info is not None:
processed_dev_info = {
'config_entry_id': config_entry_id
}
for key in (
'connections',
'identifiers',
'manufacturer',
'model',
'name',
'sw_version',
'via_hub',
):
if key in device_info:
processed_dev_info[key] = device_info[key]
device = device_registry.async_get_or_create(
**processed_dev_info)
if device:
device_id = device.id
entry = entity_registry.async_get_or_create(
self.domain, self.platform_name, entity.unique_id,
suggested_object_id=suggested_object_id,
config_entry_id=config_entry_id,
device_id=device_id,
known_object_ids=self.entities.keys())
if entry.disabled:
self.logger.info(
"Not adding entity %s because it's disabled",
entry.name or entity.name or
'"{} {}"'.format(self.platform_name, entity.unique_id))
return
entity.entity_id = entry.entity_id
entity.registry_name = entry.name
entity.async_on_remove(entry.add_update_listener(entity))
# We will however make sure that platform cannot pick a registered ID
elif (entity.entity_id is not None and
entity_registry.async_is_registered(entity.entity_id)):
# If entity already registered, convert entity id to suggestion
suggested_object_id = split_entity_id(entity.entity_id)[1]
entity.entity_id = None
# Generate entity ID
if entity.entity_id is None:
suggested_object_id = \
suggested_object_id or entity.name or DEVICE_DEFAULT_NAME
if self.entity_namespace is not None:
suggested_object_id = '{} {}'.format(self.entity_namespace,
suggested_object_id)
entity.entity_id = entity_registry.async_generate_entity_id(
self.domain, suggested_object_id, self.entities.keys())
# Make sure it is valid in case an entity set the value themselves
if not valid_entity_id(entity.entity_id):
raise HomeAssistantError(
'Invalid entity id: {}'.format(entity.entity_id))
if (entity.entity_id in self.entities or
entity.entity_id in self.hass.states.async_entity_ids(
self.domain)):
msg = 'Entity id already exists: {}'.format(entity.entity_id)
if entity.unique_id is not None:
msg += '. Platform {} does not generate unique IDs'.format(
self.platform_name)
raise HomeAssistantError(msg)
entity_id = entity.entity_id
self.entities[entity_id] = entity
entity.async_on_remove(lambda: self.entities.pop(entity_id))
await entity.async_added_to_hass()
await entity.async_update_ha_state()
async def async_reset(self):
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
if not self.entities:
return
tasks = [self.async_remove_entity(entity_id)
for entity_id in self.entities]
await asyncio.wait(tasks)
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
async def async_remove_entity(self, entity_id):
await self.entities[entity_id].async_remove()
# Clean up polling job if no longer needed
if (self._async_unsub_polling is not None and
not any(entity.should_poll for entity
in self.entities.values())):
self._async_unsub_polling()
self._async_unsub_polling = None
async def _update_entity_states(self, now):
if self._process_updates.locked():
self.logger.warning(
"Updating %s %s took longer than the scheduled update "
"interval %s", self.platform_name, self.domain,
self.scan_interval)
return
async with self._process_updates:
tasks = []
for entity in self.entities.values():
if not entity.should_poll:
continue
tasks.append(entity.async_update_ha_state(True))
if tasks:
await asyncio.wait(tasks)
| true
| true
|
79084464af6581fe0ffd17abdc5d4d350d5b94cb
| 889
|
py
|
Python
|
web/src/additional_settings/celery_settings.py
|
SilinAlexander/django-chat
|
11e59209d478469bd3ea91a0f3c7a504ff34758f
|
[
"MIT"
] | 4
|
2021-03-11T07:37:36.000Z
|
2021-09-19T07:58:41.000Z
|
web/src/additional_settings/celery_settings.py
|
SilinAlexander/django-chat
|
11e59209d478469bd3ea91a0f3c7a504ff34758f
|
[
"MIT"
] | 1
|
2021-11-21T07:48:41.000Z
|
2021-11-21T07:48:41.000Z
|
web/src/additional_settings/celery_settings.py
|
SilinAlexander/django-chat
|
11e59209d478469bd3ea91a0f3c7a504ff34758f
|
[
"MIT"
] | 6
|
2021-03-24T09:01:23.000Z
|
2021-09-18T13:16:34.000Z
|
from os import environ
from kombu import Queue, Exchange
CELERY_BROKER_URL = environ.get('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = environ.get('CELERY_RESULT_BACKEND')
CELERY_TIMEZONE = environ.get('TZ', 'UTC')
CELERY_RESULT_PERSISTENT = True
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_TIME_LIMIT = 30 * 60
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_BROKER_HEARTBEAT_CHECKRATE = 10
CELERY_EVENT_QUEUE_EXPIRES = 10
CELERY_EVENT_QUEUE_TTL = 10
CELERY_TASK_SOFT_TIME_LIMIT = 60
CELERY_BROKER_TRANSPORT_OPTIONS = {
'max_retries': 4,
'interval_start': 0,
'interval_step': 0.5,
'interval_max': 3,
}
celery_exchange = Exchange('celery', type='direct') # topic, fanout
CELERY_TASK_ROUTES = {
'*': {'queue': 'celery'},
}
CELERY_TASK_QUEUES = (
Queue('celery', exchange=celery_exchange, queue_arguments={'x-queue-mode': 'lazy'}),
)
| 24.694444
| 88
| 0.752531
|
from os import environ
from kombu import Queue, Exchange
CELERY_BROKER_URL = environ.get('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = environ.get('CELERY_RESULT_BACKEND')
CELERY_TIMEZONE = environ.get('TZ', 'UTC')
CELERY_RESULT_PERSISTENT = True
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_TIME_LIMIT = 30 * 60
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_BROKER_HEARTBEAT_CHECKRATE = 10
CELERY_EVENT_QUEUE_EXPIRES = 10
CELERY_EVENT_QUEUE_TTL = 10
CELERY_TASK_SOFT_TIME_LIMIT = 60
CELERY_BROKER_TRANSPORT_OPTIONS = {
'max_retries': 4,
'interval_start': 0,
'interval_step': 0.5,
'interval_max': 3,
}
celery_exchange = Exchange('celery', type='direct')
CELERY_TASK_ROUTES = {
'*': {'queue': 'celery'},
}
CELERY_TASK_QUEUES = (
Queue('celery', exchange=celery_exchange, queue_arguments={'x-queue-mode': 'lazy'}),
)
| true
| true
|
790844811447283f2df1dd50ec47b51a56591b0b
| 1,110
|
py
|
Python
|
web/src/views/home.py
|
TexAgg/MarkovTextGenerator
|
ed1b260da8cd6a015a422a332102c34226de52ba
|
[
"MIT"
] | 3
|
2016-08-20T02:10:57.000Z
|
2019-02-04T21:12:47.000Z
|
web/src/views/home.py
|
TexAgg/MarkovTextGenerator
|
ed1b260da8cd6a015a422a332102c34226de52ba
|
[
"MIT"
] | null | null | null |
web/src/views/home.py
|
TexAgg/MarkovTextGenerator
|
ed1b260da8cd6a015a422a332102c34226de52ba
|
[
"MIT"
] | null | null | null |
from markovp import Markov
from src.forms import Markov_Form
from flask import Flask, render_template, request, redirect, url_for, Blueprint, make_response
home = Blueprint("home", __name__)
@home.route("/")
def index():
#{
form = Markov_Form()
return render_template('form.html', form = form)
#}
# The submission page.
@home.route("/submit", methods=["GET"])
def submit():
#{
if request.method == "GET":
#{
if request.args.get('submit_button'):
#{
# Get form values.
# http://stackoverflow.com/a/20341272/5415895
text = request.args.get("input_text")
# We have to cast text as a string, otherwise C++ complains.
mark = Markov(str(text), 1)
output = mark.generate()
return render_template("output.html", input = str(text), output = output)
#}
else:
#{
# Make sure nobody can access the submit path without submitting.
return redirect(url_for('index'))
#}
#}
else:
#{
return redirect(url_for('index'))
#}
#}
| 27.073171
| 94
| 0.583784
|
from markovp import Markov
from src.forms import Markov_Form
from flask import Flask, render_template, request, redirect, url_for, Blueprint, make_response
home = Blueprint("home", __name__)
@home.route("/")
def index():
form = Markov_Form()
return render_template('form.html', form = form)
@home.route("/submit", methods=["GET"])
def submit():
if request.method == "GET":
if request.args.get('submit_button'):
text = request.args.get("input_text")
mark = Markov(str(text), 1)
output = mark.generate()
return render_template("output.html", input = str(text), output = output)
else:
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
| true
| true
|
7908450aafd9c882d841fbba45ec21f1cc39ded1
| 2,429
|
py
|
Python
|
neuwon/database/examples/life/model.py
|
ctrl-z-9000-times/NEUWON
|
ed5c13f8cecfd638dd0952d231c36f48073a64a6
|
[
"MIT"
] | 3
|
2020-12-26T02:26:28.000Z
|
2022-03-30T05:54:13.000Z
|
neuwon/database/examples/life/model.py
|
ctrl-z-9000-times/NEUWON
|
ed5c13f8cecfd638dd0952d231c36f48073a64a6
|
[
"MIT"
] | null | null | null |
neuwon/database/examples/life/model.py
|
ctrl-z-9000-times/NEUWON
|
ed5c13f8cecfd638dd0952d231c36f48073a64a6
|
[
"MIT"
] | null | null | null |
from neuwon.database import Database
import numpy as np
import numba
class GameOfLife:
class _CellBaseClass:
__slots__ = ()
@classmethod
def _add_to_database(cls, database):
cell_data = database.add_class("Cell", cls)
cell_data.add_attribute("coordinates", shape=(2,), dtype=np.int32)
cell_data.add_attribute("alive", False, dtype=np.bool)
cell_data.add_connectivity_matrix("neighbors", "Cell")
return cell_data.get_instance_type()
def __init__(self, shape):
self.db = Database()
self.Cell = self._CellBaseClass._add_to_database(self.db)
self.shape = shape
self.grid = np.empty(self.shape, dtype=object)
for x in range(self.shape[0]):
for y in range(self.shape[1]):
self.grid[x,y] = self.Cell(coordinates=(x,y))
for x in range(self.shape[0]):
for y in range(self.shape[1]):
cell = self.grid[x,y]
neighbors = []
for x_offset in [-1, 0, 1]:
for y_offset in [-1, 0, 1]:
nx = x - x_offset
ny = y - y_offset
if nx < 0: nx = 0
if ny < 0: ny = 0
if nx >= self.shape[0]: nx = self.shape[0] - 1
if ny >= self.shape[1]: ny = self.shape[1] - 1
neighbor = self.grid[nx, ny]
if cell != neighbor:
neighbors.append(neighbor)
cell.neighbors = neighbors
self.db.get("Cell.neighbors").to_csr()
def randomize(self, alive_fraction):
a = self.db.get_data("Cell.alive")
a.fill(False)
a[np.random.uniform(size=a.shape) < alive_fraction] = True
def get_num_alive(self):
return sum(self.db.get_data("Cell.alive"))
def advance(self):
a = self.db.get_data("Cell.alive")
n = self.db.get_data("Cell.neighbors")
# C is the number of living neighbors for each cell.
c = n * np.array(a, dtype=np.int32)
_advance(a, c)
@numba.njit(parallel=True)
def _advance(a, c):
for idx in numba.prange(len(a)):
ci = c[idx]
if a[idx]:
if ci not in range(2, 4):
a[idx] = False
else:
if ci == 3:
a[idx] = True
| 35.720588
| 78
| 0.516674
|
from neuwon.database import Database
import numpy as np
import numba
class GameOfLife:
class _CellBaseClass:
__slots__ = ()
@classmethod
def _add_to_database(cls, database):
cell_data = database.add_class("Cell", cls)
cell_data.add_attribute("coordinates", shape=(2,), dtype=np.int32)
cell_data.add_attribute("alive", False, dtype=np.bool)
cell_data.add_connectivity_matrix("neighbors", "Cell")
return cell_data.get_instance_type()
def __init__(self, shape):
self.db = Database()
self.Cell = self._CellBaseClass._add_to_database(self.db)
self.shape = shape
self.grid = np.empty(self.shape, dtype=object)
for x in range(self.shape[0]):
for y in range(self.shape[1]):
self.grid[x,y] = self.Cell(coordinates=(x,y))
for x in range(self.shape[0]):
for y in range(self.shape[1]):
cell = self.grid[x,y]
neighbors = []
for x_offset in [-1, 0, 1]:
for y_offset in [-1, 0, 1]:
nx = x - x_offset
ny = y - y_offset
if nx < 0: nx = 0
if ny < 0: ny = 0
if nx >= self.shape[0]: nx = self.shape[0] - 1
if ny >= self.shape[1]: ny = self.shape[1] - 1
neighbor = self.grid[nx, ny]
if cell != neighbor:
neighbors.append(neighbor)
cell.neighbors = neighbors
self.db.get("Cell.neighbors").to_csr()
def randomize(self, alive_fraction):
a = self.db.get_data("Cell.alive")
a.fill(False)
a[np.random.uniform(size=a.shape) < alive_fraction] = True
def get_num_alive(self):
return sum(self.db.get_data("Cell.alive"))
def advance(self):
a = self.db.get_data("Cell.alive")
n = self.db.get_data("Cell.neighbors")
c = n * np.array(a, dtype=np.int32)
_advance(a, c)
@numba.njit(parallel=True)
def _advance(a, c):
for idx in numba.prange(len(a)):
ci = c[idx]
if a[idx]:
if ci not in range(2, 4):
a[idx] = False
else:
if ci == 3:
a[idx] = True
| true
| true
|
790846053db966a21b7b6b346852f1e665469c3b
| 2,730
|
py
|
Python
|
src/openprocurement/planning/api/views/plan_milestone_document.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 3
|
2020-03-13T06:44:23.000Z
|
2020-11-05T18:25:29.000Z
|
src/openprocurement/planning/api/views/plan_milestone_document.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 2
|
2021-03-25T23:29:58.000Z
|
2022-03-21T22:18:37.000Z
|
src/openprocurement/planning/api/views/plan_milestone_document.py
|
scrubele/prozorro-testing
|
42b93ea2f25d8cc40e66c596f582c7c05e2a9d76
|
[
"Apache-2.0"
] | 3
|
2020-10-16T16:25:14.000Z
|
2021-05-22T12:26:20.000Z
|
# -*- coding: utf-8 -*-
from openprocurement.planning.api.utils import opresource
from openprocurement.api.utils import get_now, context_unpack, json_view
from openprocurement.planning.api.validation import validate_plan_not_terminated
from openprocurement.api.validation import validate_file_update, validate_file_upload, validate_patch_document_data
from openprocurement.planning.api.views.plan_document import PlansDocumentResource
@opresource(
name="Plan Milestone Documents",
collection_path="/plans/{plan_id}/milestones/{milestone_id}/documents",
path="/plans/{plan_id}/milestones/{milestone_id}/documents/{document_id}",
description="Plan milestone related files",
)
class PlanMilestoneDocumentResource(PlansDocumentResource):
def update_modified_dates(self):
plan = self.request.validated["plan"]
milestone = self.request.validated["milestone"]
plan.dateModified = milestone.dateModified = get_now()
plan.modified = False
@json_view(
permission="update_milestone",
validators=(validate_file_upload, validate_plan_not_terminated)
)
def collection_post(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).collection_post()
@json_view(
permission="update_milestone",
validators=(validate_file_update, validate_plan_not_terminated)
)
def put(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).put()
@json_view(
content_type="application/json",
permission="update_milestone",
validators=(validate_patch_document_data, validate_plan_not_terminated),
)
def patch(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).patch()
def _post_document_log(self, document):
self.LOGGER.info(
"Created plan milestone document {}".format(document.id),
extra=context_unpack(
self.request,
{"MESSAGE_ID": "plan_milestone_document_create"},
{"document_id": document.id}
),
)
def _put_document_log(self):
self.LOGGER.info(
"Updated plan milestone document {}".format(self.request.context.id),
extra=context_unpack(self.request,
{"MESSAGE_ID": "plan_milestone_document_put"}),
)
def _patch_document_log(self):
self.LOGGER.info(
"Updated plan milestone document {}".format(self.request.context.id),
extra=context_unpack(self.request,
{"MESSAGE_ID": "plan_milestone_document_patch"}),
)
| 38.450704
| 115
| 0.684615
|
from openprocurement.planning.api.utils import opresource
from openprocurement.api.utils import get_now, context_unpack, json_view
from openprocurement.planning.api.validation import validate_plan_not_terminated
from openprocurement.api.validation import validate_file_update, validate_file_upload, validate_patch_document_data
from openprocurement.planning.api.views.plan_document import PlansDocumentResource
@opresource(
name="Plan Milestone Documents",
collection_path="/plans/{plan_id}/milestones/{milestone_id}/documents",
path="/plans/{plan_id}/milestones/{milestone_id}/documents/{document_id}",
description="Plan milestone related files",
)
class PlanMilestoneDocumentResource(PlansDocumentResource):
def update_modified_dates(self):
plan = self.request.validated["plan"]
milestone = self.request.validated["milestone"]
plan.dateModified = milestone.dateModified = get_now()
plan.modified = False
@json_view(
permission="update_milestone",
validators=(validate_file_upload, validate_plan_not_terminated)
)
def collection_post(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).collection_post()
@json_view(
permission="update_milestone",
validators=(validate_file_update, validate_plan_not_terminated)
)
def put(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).put()
@json_view(
content_type="application/json",
permission="update_milestone",
validators=(validate_patch_document_data, validate_plan_not_terminated),
)
def patch(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).patch()
def _post_document_log(self, document):
self.LOGGER.info(
"Created plan milestone document {}".format(document.id),
extra=context_unpack(
self.request,
{"MESSAGE_ID": "plan_milestone_document_create"},
{"document_id": document.id}
),
)
def _put_document_log(self):
self.LOGGER.info(
"Updated plan milestone document {}".format(self.request.context.id),
extra=context_unpack(self.request,
{"MESSAGE_ID": "plan_milestone_document_put"}),
)
def _patch_document_log(self):
self.LOGGER.info(
"Updated plan milestone document {}".format(self.request.context.id),
extra=context_unpack(self.request,
{"MESSAGE_ID": "plan_milestone_document_patch"}),
)
| true
| true
|
790846295bb8eb919ad9b04bfde59a7401e0f7d3
| 9,265
|
py
|
Python
|
pythia/utils/checkpoint.py
|
zean-wen/mmgnn_textvqa
|
2cfe82ed54610975a1d4937f2032e5f4565ecbe7
|
[
"BSD-3-Clause"
] | 35
|
2020-03-06T13:05:17.000Z
|
2021-07-30T15:12:00.000Z
|
pythia/utils/checkpoint.py
|
zean-wen/mmgnn_textvqa
|
2cfe82ed54610975a1d4937f2032e5f4565ecbe7
|
[
"BSD-3-Clause"
] | 1
|
2021-01-12T01:45:54.000Z
|
2021-03-17T00:44:47.000Z
|
pythia/utils/checkpoint.py
|
zean-wen/mmgnn_textvqa
|
2cfe82ed54610975a1d4937f2032e5f4565ecbe7
|
[
"BSD-3-Clause"
] | 11
|
2020-03-07T08:10:15.000Z
|
2021-06-24T05:39:36.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import warnings
# import git
import torch
import yaml
from pythia.common.registry import registry
from pythia.utils.distributed_utils import is_main_process, synchronize
from pythia.utils.general import (ckpt_name_from_core_args,
foldername_from_config_override, updir)
class Checkpoint:
def __init__(self, trainer):
"""
Generates a path for saving model which can also be used for resuming
from a checkpoint.
"""
self.trainer = trainer
self.config = self.trainer.config
self.save_dir = self.config.training_parameters.save_dir
self.model_name = self.config.model
self.ckpt_foldername = ckpt_name_from_core_args(self.config)
self.ckpt_foldername += foldername_from_config_override(self.trainer.args)
self.device = registry.get("current_device")
self.ckpt_prefix = ""
if hasattr(self.trainer.model, "get_ckpt_name"):
self.ckpt_prefix = self.trainer.model.get_ckpt_name() + "_"
self.config["log_foldername"] = self.ckpt_foldername
self.ckpt_foldername = os.path.join(self.save_dir, self.ckpt_foldername)
self.pth_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + self.model_name + getattr(self.config.model_attributes,
self.model_name).code_name + "_final.pth"
)
self.models_foldername = os.path.join(self.ckpt_foldername, "models")
if not os.path.exists(self.models_foldername):
os.makedirs(self.models_foldername)
self.save_config()
self.repo_path = updir(os.path.abspath(__file__), n=3)
# self.repo = git.Repo(self.repo_path)
def save_config(self):
cfg_file = os.path.join(self.ckpt_foldername, "config.yaml")
with open(cfg_file, "w") as f:
# Pop out config_override if present to remove clutter in
# saved configuration yaml file
self.config.pop("config_override", None)
f.write(str(self.config))
def load_state_dict(self):
tp = self.config.training_parameters
if tp.resume_file is not None:
if os.path.exists(tp.resume_file):
self._load(tp.resume_file)
return
else:
raise RuntimeError("{} doesn't exist".format(tp.resume_file))
ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
if tp.resume is True:
if os.path.exists(ckpt_filepath):
self._load(ckpt_filepath)
else:
warnings.warn(
"Tried to resume but checkpoint filepath {} "
"is not present. Skipping.".format(ckpt_filepath)
)
def _load(self, file):
self.trainer.writer.write("Loading checkpoint")
ckpt = self._torch_load(file)
data_parallel = registry.get("data_parallel")
if "model" in ckpt:
ckpt_model = ckpt["model"]
else:
ckpt_model = ckpt
ckpt = {"model": ckpt}
pretrained_mapping = self.config.training_parameters.pretrained_mapping
if not self.config.training_parameters.load_pretrained:
pretrained_mapping = {}
new_dict = {}
# TODO: Move to separate function
for attr in ckpt_model:
if "fa_history" in attr:
new_dict[attr.replace("fa_history", "fa_context")] = ckpt_model[attr]
elif data_parallel is False and attr.startswith("module."):
# In case the ckpt was actually a data parallel model
# replace first module. from dataparallel with empty string
new_dict[attr.replace("module.", "", 1)] = ckpt_model[attr]
else:
new_dict[attr] = ckpt_model[attr]
if len(pretrained_mapping.items()) == 0:
final_dict = new_dict
self.trainer.model.load_state_dict(final_dict)
if "optimizer" in ckpt:
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
else:
warnings.warn(
"'optimizer' key is not present in the "
"checkpoint asked to be loaded. Skipping."
)
self.trainer.early_stopping.init_from_checkpoint(ckpt)
self.trainer.writer.write("Checkpoint loaded")
if "best_iteration" in ckpt:
self.trainer.current_iteration = ckpt["best_iteration"]
registry.register("current_iteration", self.trainer.current_iteration)
if "best_epoch" in ckpt:
self.trainer.current_epoch = ckpt["best_epoch"]
registry.register("current_epoch", self.trainer.current_epoch)
else:
final_dict = {}
model = self.trainer.model
own_state = model.state_dict()
for key, value in pretrained_mapping.items():
key += "."
value += "."
for attr in new_dict:
for own_attr in own_state:
if (
key in attr
and value in own_attr
and attr.replace(key, "") == own_attr.replace(value, "")
):
self.trainer.writer.write(
"Copying " + attr + " " + own_attr
)
own_state[own_attr].copy_(new_dict[attr])
self.trainer.writer.write("Pretrained model loaded")
def _load_state_dict_mapping(self, ckpt_model):
model = self.trainer.model
attr_mapping = {
"image_feature_encoders": "img_feat_encoders",
"image_feature_embeddings_list": "img_embeddings_list",
"image_text_multi_modal_combine_layer": "multi_modal_combine_layer",
"text_embeddings": "text_embeddings",
"classifier": "classifier",
}
data_parallel = registry.get("data_parallel")
if not data_parallel:
for key in attr_mapping:
attr_mapping[key.replace("module.", "")] = attr_mapping[key]
attr_mapping.pop(key)
for key in attr_mapping:
getattr(model, key).load_state_dict(ckpt_model[attr_mapping[key]])
def _torch_load(self, file):
if "cuda" in str(self.device):
return torch.load(file)
else:
return torch.load(file, map_location=lambda storage, loc: storage)
# def _get_vcs_fields(self):
# """Returns a dict with git fields of the current repository
#
# To reproduce an experiment directly from a checkpoint
#
# 1) Export `config` key as a yaml
# 2) Clone repository and checkout at given commit on given branch
# 3) Any local change (diff) while running the experiment is stored
# in the value with key `git/diff`, output the diff to a `path.diff`
# file and apply the patch to the current state by simply
#
# `patch -p0 < path.diff`
# """
#
# return {
# "git/branch": self.repo.active_branch.name,
# "git/commit_hash": self.repo.head.commit.name_rev,
# "git/commit_author": self.repo.head.commit.author.name,
# "git/commit_message": self.repo.head.commit.message,
# "git/diff": self.repo.git.diff("--no-prefix"),
# }
def save(self, iteration, update_best=False):
# Only save in main process
if not is_main_process():
return
ckpt_filepath = os.path.join(
self.models_foldername, "model_%d.ckpt" % iteration
)
best_ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
best_iteration = self.trainer.early_stopping.best_monitored_iteration
best_metric = self.trainer.early_stopping.best_monitored_value
ckpt = {
"model": self.trainer.model.state_dict(),
"optimizer": self.trainer.optimizer.state_dict(),
"best_iteration": best_iteration,
"best_metric_value": best_metric,
"config": self.config,
}
# git_metadata_dict = self._get_vcs_fields()
# ckpt.update(git_metadata_dict)
torch.save(ckpt, ckpt_filepath)
if update_best:
torch.save(ckpt, best_ckpt_filepath)
def restore(self):
self.trainer.writer.write("Restoring checkpoint")
best_path = os.path.join(self.ckpt_foldername, self.ckpt_prefix + "best.ckpt")
if os.path.exists(best_path):
ckpt = self._torch_load(best_path)
self.trainer.model.load_state_dict(ckpt["model"])
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
def finalize(self):
torch.save(self.trainer.model.state_dict(), self.pth_filepath)
| 37.510121
| 120
| 0.586508
|
import os
import warnings
import torch
import yaml
from pythia.common.registry import registry
from pythia.utils.distributed_utils import is_main_process, synchronize
from pythia.utils.general import (ckpt_name_from_core_args,
foldername_from_config_override, updir)
class Checkpoint:
def __init__(self, trainer):
self.trainer = trainer
self.config = self.trainer.config
self.save_dir = self.config.training_parameters.save_dir
self.model_name = self.config.model
self.ckpt_foldername = ckpt_name_from_core_args(self.config)
self.ckpt_foldername += foldername_from_config_override(self.trainer.args)
self.device = registry.get("current_device")
self.ckpt_prefix = ""
if hasattr(self.trainer.model, "get_ckpt_name"):
self.ckpt_prefix = self.trainer.model.get_ckpt_name() + "_"
self.config["log_foldername"] = self.ckpt_foldername
self.ckpt_foldername = os.path.join(self.save_dir, self.ckpt_foldername)
self.pth_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + self.model_name + getattr(self.config.model_attributes,
self.model_name).code_name + "_final.pth"
)
self.models_foldername = os.path.join(self.ckpt_foldername, "models")
if not os.path.exists(self.models_foldername):
os.makedirs(self.models_foldername)
self.save_config()
self.repo_path = updir(os.path.abspath(__file__), n=3)
def save_config(self):
cfg_file = os.path.join(self.ckpt_foldername, "config.yaml")
with open(cfg_file, "w") as f:
self.config.pop("config_override", None)
f.write(str(self.config))
def load_state_dict(self):
tp = self.config.training_parameters
if tp.resume_file is not None:
if os.path.exists(tp.resume_file):
self._load(tp.resume_file)
return
else:
raise RuntimeError("{} doesn't exist".format(tp.resume_file))
ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
if tp.resume is True:
if os.path.exists(ckpt_filepath):
self._load(ckpt_filepath)
else:
warnings.warn(
"Tried to resume but checkpoint filepath {} "
"is not present. Skipping.".format(ckpt_filepath)
)
def _load(self, file):
self.trainer.writer.write("Loading checkpoint")
ckpt = self._torch_load(file)
data_parallel = registry.get("data_parallel")
if "model" in ckpt:
ckpt_model = ckpt["model"]
else:
ckpt_model = ckpt
ckpt = {"model": ckpt}
pretrained_mapping = self.config.training_parameters.pretrained_mapping
if not self.config.training_parameters.load_pretrained:
pretrained_mapping = {}
new_dict = {}
# TODO: Move to separate function
for attr in ckpt_model:
if "fa_history" in attr:
new_dict[attr.replace("fa_history", "fa_context")] = ckpt_model[attr]
elif data_parallel is False and attr.startswith("module."):
# In case the ckpt was actually a data parallel model
# replace first module. from dataparallel with empty string
new_dict[attr.replace("module.", "", 1)] = ckpt_model[attr]
else:
new_dict[attr] = ckpt_model[attr]
if len(pretrained_mapping.items()) == 0:
final_dict = new_dict
self.trainer.model.load_state_dict(final_dict)
if "optimizer" in ckpt:
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
else:
warnings.warn(
"'optimizer' key is not present in the "
"checkpoint asked to be loaded. Skipping."
)
self.trainer.early_stopping.init_from_checkpoint(ckpt)
self.trainer.writer.write("Checkpoint loaded")
if "best_iteration" in ckpt:
self.trainer.current_iteration = ckpt["best_iteration"]
registry.register("current_iteration", self.trainer.current_iteration)
if "best_epoch" in ckpt:
self.trainer.current_epoch = ckpt["best_epoch"]
registry.register("current_epoch", self.trainer.current_epoch)
else:
final_dict = {}
model = self.trainer.model
own_state = model.state_dict()
for key, value in pretrained_mapping.items():
key += "."
value += "."
for attr in new_dict:
for own_attr in own_state:
if (
key in attr
and value in own_attr
and attr.replace(key, "") == own_attr.replace(value, "")
):
self.trainer.writer.write(
"Copying " + attr + " " + own_attr
)
own_state[own_attr].copy_(new_dict[attr])
self.trainer.writer.write("Pretrained model loaded")
def _load_state_dict_mapping(self, ckpt_model):
model = self.trainer.model
attr_mapping = {
"image_feature_encoders": "img_feat_encoders",
"image_feature_embeddings_list": "img_embeddings_list",
"image_text_multi_modal_combine_layer": "multi_modal_combine_layer",
"text_embeddings": "text_embeddings",
"classifier": "classifier",
}
data_parallel = registry.get("data_parallel")
if not data_parallel:
for key in attr_mapping:
attr_mapping[key.replace("module.", "")] = attr_mapping[key]
attr_mapping.pop(key)
for key in attr_mapping:
getattr(model, key).load_state_dict(ckpt_model[attr_mapping[key]])
def _torch_load(self, file):
if "cuda" in str(self.device):
return torch.load(file)
else:
return torch.load(file, map_location=lambda storage, loc: storage)
# def _get_vcs_fields(self):
# """Returns a dict with git fields of the current repository
#
# To reproduce an experiment directly from a checkpoint
#
# 1) Export `config` key as a yaml
# 2) Clone repository and checkout at given commit on given branch
# 3) Any local change (diff) while running the experiment is stored
# in the value with key `git/diff`, output the diff to a `path.diff`
# file and apply the patch to the current state by simply
#
# `patch -p0 < path.diff`
# """
#
# return {
# "git/branch": self.repo.active_branch.name,
# "git/commit_hash": self.repo.head.commit.name_rev,
# "git/commit_author": self.repo.head.commit.author.name,
# "git/commit_message": self.repo.head.commit.message,
# "git/diff": self.repo.git.diff("--no-prefix"),
# }
def save(self, iteration, update_best=False):
# Only save in main process
if not is_main_process():
return
ckpt_filepath = os.path.join(
self.models_foldername, "model_%d.ckpt" % iteration
)
best_ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
best_iteration = self.trainer.early_stopping.best_monitored_iteration
best_metric = self.trainer.early_stopping.best_monitored_value
ckpt = {
"model": self.trainer.model.state_dict(),
"optimizer": self.trainer.optimizer.state_dict(),
"best_iteration": best_iteration,
"best_metric_value": best_metric,
"config": self.config,
}
# git_metadata_dict = self._get_vcs_fields()
# ckpt.update(git_metadata_dict)
torch.save(ckpt, ckpt_filepath)
if update_best:
torch.save(ckpt, best_ckpt_filepath)
def restore(self):
self.trainer.writer.write("Restoring checkpoint")
best_path = os.path.join(self.ckpt_foldername, self.ckpt_prefix + "best.ckpt")
if os.path.exists(best_path):
ckpt = self._torch_load(best_path)
self.trainer.model.load_state_dict(ckpt["model"])
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
def finalize(self):
torch.save(self.trainer.model.state_dict(), self.pth_filepath)
| true
| true
|
7908471290734f2e18358242cb73a530aa165830
| 1,709
|
py
|
Python
|
examples/house_example.py
|
ntezak/plotille
|
7baa4fa176ced99f7a9ab9688e99b525ef3edf4d
|
[
"MIT"
] | 157
|
2017-09-28T12:16:52.000Z
|
2022-03-31T08:13:23.000Z
|
examples/house_example.py
|
ntezak/plotille
|
7baa4fa176ced99f7a9ab9688e99b525ef3edf4d
|
[
"MIT"
] | 43
|
2017-11-01T19:21:21.000Z
|
2022-03-27T08:36:56.000Z
|
examples/house_example.py
|
ntezak/plotille
|
7baa4fa176ced99f7a9ab9688e99b525ef3edf4d
|
[
"MIT"
] | 12
|
2018-01-14T08:05:07.000Z
|
2021-07-31T05:15:38.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# The MIT License
# Copyright (c) 2017 - 2021 Tammo Ippen, tammo.ippen@posteo.de
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from plotille import Canvas
# The underlying canvas-implementation can be used on its own.
def main():
c = Canvas(width=40, height=20)
c.rect(0.1, 0.1, 0.6, 0.6)
c.line(0.1, 0.1, 0.6, 0.6)
c.line(0.1, 0.6, 0.6, 0.1)
c.line(0.1, 0.6, 0.35, 0.8)
c.line(0.35, 0.8, 0.6, 0.6)
c.text(0.3, 0.5, 'hi', color='red')
c.point(0.35, 0.35, color='blue')
c.fill_char(0.35, 0.1)
print(c.plot())
if __name__ == '__main__':
main()
| 37.152174
| 82
| 0.716793
|
from __future__ import absolute_import, division, print_function, unicode_literals
from plotille import Canvas
def main():
c = Canvas(width=40, height=20)
c.rect(0.1, 0.1, 0.6, 0.6)
c.line(0.1, 0.1, 0.6, 0.6)
c.line(0.1, 0.6, 0.6, 0.1)
c.line(0.1, 0.6, 0.35, 0.8)
c.line(0.35, 0.8, 0.6, 0.6)
c.text(0.3, 0.5, 'hi', color='red')
c.point(0.35, 0.35, color='blue')
c.fill_char(0.35, 0.1)
print(c.plot())
if __name__ == '__main__':
main()
| true
| true
|
790847497be475df0c275744fe8ab1fd60ba1827
| 107
|
py
|
Python
|
setup.py
|
NGoetz/NF
|
935886db48f4675db1a2c42f7c264b12d5014ed8
|
[
"MIT"
] | 1
|
2020-11-26T11:03:50.000Z
|
2020-11-26T11:03:50.000Z
|
setup.py
|
NGoetz/NF
|
935886db48f4675db1a2c42f7c264b12d5014ed8
|
[
"MIT"
] | 1
|
2021-02-27T10:18:00.000Z
|
2021-02-28T16:18:12.000Z
|
setup.py
|
NGoetz/NF
|
935886db48f4675db1a2c42f7c264b12d5014ed8
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='nisrep', version='1.0', packages=find_packages())
| 26.75
| 61
| 0.775701
|
from setuptools import setup, find_packages
setup(name='nisrep', version='1.0', packages=find_packages())
| true
| true
|
790848ea6d0eaac765686b016e7f84e414b14546
| 863
|
py
|
Python
|
Difficulty/Medium/102.binary-tree-level-order-traversal.py
|
ryderfang/LeetCode
|
be9470dee6175bab321f0549f173c0c682dddd77
|
[
"MIT"
] | null | null | null |
Difficulty/Medium/102.binary-tree-level-order-traversal.py
|
ryderfang/LeetCode
|
be9470dee6175bab321f0549f173c0c682dddd77
|
[
"MIT"
] | null | null | null |
Difficulty/Medium/102.binary-tree-level-order-traversal.py
|
ryderfang/LeetCode
|
be9470dee6175bab321f0549f173c0c682dddd77
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=102 lang=python3
#
# [102] Binary Tree Level Order Traversal
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
from typing import List, Optional
class Solution:
def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
ans = []
def _larger(n):
for i in range(len(ans), n + 1):
ans.append([])
def _traversal(node, depth):
if node != None:
_larger(depth)
tmp = ans[depth]
tmp.append(node.val)
_traversal(node.left, depth + 1)
_traversal(node.right, depth + 1)
_traversal(root, 0)
return ans
# @lc code=end
| 26.151515
| 70
| 0.543453
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
from typing import List, Optional
class Solution:
def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:
ans = []
def _larger(n):
for i in range(len(ans), n + 1):
ans.append([])
def _traversal(node, depth):
if node != None:
_larger(depth)
tmp = ans[depth]
tmp.append(node.val)
_traversal(node.left, depth + 1)
_traversal(node.right, depth + 1)
_traversal(root, 0)
return ans
| true
| true
|
79084a97db59f896cea5221ece7bd8b33762b60d
| 905
|
py
|
Python
|
ex001 a ex114/ex113.py
|
kesia-barros/exercicios-python
|
12a019e61c4b29fa29803f394b15d0af304c2ff0
|
[
"MIT"
] | null | null | null |
ex001 a ex114/ex113.py
|
kesia-barros/exercicios-python
|
12a019e61c4b29fa29803f394b15d0af304c2ff0
|
[
"MIT"
] | null | null | null |
ex001 a ex114/ex113.py
|
kesia-barros/exercicios-python
|
12a019e61c4b29fa29803f394b15d0af304c2ff0
|
[
"MIT"
] | null | null | null |
def leiaint(msg):
while True:
try:
n = int(input(msg))
except(ValueError, TypeError):
print("\033[31mERRO: Por favor, digite um numero inteiro valido.\033[m")
except KeyboardInterrupt:
print("\n\033[31mO usuario preferiu não digitar esse numero.")
return 0
else:
return n
def leiafloat(msg):
while True:
try:
n = float(input(msg))
except(ValueError, TypeError):
print("\033[31mERRO: Por favor, digite um numero inteiro valido.\033[m")
except KeyboardInterrupt:
print("\n\033[31mO usuario preferiu não digitar esse numero.")
return 0
else:
return n
n1 = leiaint("Digite um numero inteiro: ")
n2 = leiafloat("Digite um numero real: ")
print(f"Você acabou de digitar o numero inteiro {n1} e o numero real {n2}!")
| 31.206897
| 84
| 0.583425
|
def leiaint(msg):
while True:
try:
n = int(input(msg))
except(ValueError, TypeError):
print("\033[31mERRO: Por favor, digite um numero inteiro valido.\033[m")
except KeyboardInterrupt:
print("\n\033[31mO usuario preferiu não digitar esse numero.")
return 0
else:
return n
def leiafloat(msg):
while True:
try:
n = float(input(msg))
except(ValueError, TypeError):
print("\033[31mERRO: Por favor, digite um numero inteiro valido.\033[m")
except KeyboardInterrupt:
print("\n\033[31mO usuario preferiu não digitar esse numero.")
return 0
else:
return n
n1 = leiaint("Digite um numero inteiro: ")
n2 = leiafloat("Digite um numero real: ")
print(f"Você acabou de digitar o numero inteiro {n1} e o numero real {n2}!")
| true
| true
|
79084c12025bee298f9b7e86bb2f2006083722b9
| 669
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
nagarjunand/receipe-app-api
|
705ed99979356644dddf999370e6b618404cef3e
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
nagarjunand/receipe-app-api
|
705ed99979356644dddf999370e6b618404cef3e
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
nagarjunand/receipe-app-api
|
705ed99979356644dddf999370e6b618404cef3e
|
[
"MIT"
] | null | null | null |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available """
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available'))
| 35.210526
| 78
| 0.654709
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available'))
| true
| true
|
79084c67b911633cc1eddb635eec3def07253b4b
| 1,742
|
py
|
Python
|
crawler/lib/RobotsParser.py
|
wilsonsk/Node-React-Python-D3-Crawler-App
|
5f988148e601f20d5ea86b0734d46eb1b5694fb7
|
[
"MIT"
] | null | null | null |
crawler/lib/RobotsParser.py
|
wilsonsk/Node-React-Python-D3-Crawler-App
|
5f988148e601f20d5ea86b0734d46eb1b5694fb7
|
[
"MIT"
] | null | null | null |
crawler/lib/RobotsParser.py
|
wilsonsk/Node-React-Python-D3-Crawler-App
|
5f988148e601f20d5ea86b0734d46eb1b5694fb7
|
[
"MIT"
] | null | null | null |
"""
Filename: RobotsParser.py
Author: Maxwell Goldberg
Last modified: 06.09.17
Description: Helper class for parsing individual robots.txt records.
"""
# CONSTANTS
from constants import RECORD_MAX_LEN
# PYTHON BUILTINS
import re, unicodedata, logging
def test_ctrl_chars(s):
return len(s) != len("".join(ch for ch in s if unicodedata.category(ch)[0]!="C"))
class RobotsParser:
valid_fields = [u'user-agent', u'allow', u'disallow']
def __init__(self, record=None):
if record is None:
raise TypeError('Parameter record must not be NoneType')
if not isinstance(record, unicode):
raise TypeError('Parameter record must be a Unicode string')
if len(record) > RECORD_MAX_LEN:
raise ValueError('Parameter record exceeds maximum record num characters')
self.record = record
def parse_field(self, field):
field = field.strip().lower()
if field not in RobotsParser.valid_fields:
raise ValueError('Record contains invalid field')
return field
def parse_path(self, path):
path = path.strip()
if test_ctrl_chars(path):
raise ValueError('Record path contains control characters')
# Get path length prior to parsing
self.init_path_len = len(path)
path = re.escape(path)
path = path.replace('\\*', '.*').replace('\\$', '$')
return path
def parse(self):
# Attempt to separate a record by a colon delimiter.
record_list = self.record.split('#')[0]
record_list = record_list.split(':', 1)
if len(record_list) <= 1:
raise ValueError('Record must contain a delimiter')
if len(record_list) > 2:
raise ValueError('Record contains too many delimited fields')
# Parse the field
self.field = self.parse_field(record_list[0])
# Parse the path
self.path = self.parse_path(record_list[1])
| 30.034483
| 82
| 0.721584
|
from constants import RECORD_MAX_LEN
import re, unicodedata, logging
def test_ctrl_chars(s):
return len(s) != len("".join(ch for ch in s if unicodedata.category(ch)[0]!="C"))
class RobotsParser:
valid_fields = [u'user-agent', u'allow', u'disallow']
def __init__(self, record=None):
if record is None:
raise TypeError('Parameter record must not be NoneType')
if not isinstance(record, unicode):
raise TypeError('Parameter record must be a Unicode string')
if len(record) > RECORD_MAX_LEN:
raise ValueError('Parameter record exceeds maximum record num characters')
self.record = record
def parse_field(self, field):
field = field.strip().lower()
if field not in RobotsParser.valid_fields:
raise ValueError('Record contains invalid field')
return field
def parse_path(self, path):
path = path.strip()
if test_ctrl_chars(path):
raise ValueError('Record path contains control characters')
self.init_path_len = len(path)
path = re.escape(path)
path = path.replace('\\*', '.*').replace('\\$', '$')
return path
def parse(self):
record_list = self.record.split('#')[0]
record_list = record_list.split(':', 1)
if len(record_list) <= 1:
raise ValueError('Record must contain a delimiter')
if len(record_list) > 2:
raise ValueError('Record contains too many delimited fields')
self.field = self.parse_field(record_list[0])
self.path = self.parse_path(record_list[1])
| true
| true
|
79084c8d562303d6f89ec7cf7a4b26f457699d5d
| 118
|
py
|
Python
|
boa3_test/test_sc/exception_test/RaiseVariableException.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/exception_test/RaiseVariableException.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/exception_test/RaiseVariableException.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from boa3.builtin import public
@public
def test_raise(arg: int):
x = Exception
if arg < 0:
raise x
| 13.111111
| 31
| 0.627119
|
from boa3.builtin import public
@public
def test_raise(arg: int):
x = Exception
if arg < 0:
raise x
| true
| true
|
79084ccc3679e782cdaddcbe8b3ea07ee03077ff
| 1,296
|
py
|
Python
|
torchio/transforms/interpolation.py
|
jwitos/torchio
|
4d5c4fd75e1169f33f566c86e0bd25056dd5be0d
|
[
"Apache-2.0"
] | null | null | null |
torchio/transforms/interpolation.py
|
jwitos/torchio
|
4d5c4fd75e1169f33f566c86e0bd25056dd5be0d
|
[
"Apache-2.0"
] | null | null | null |
torchio/transforms/interpolation.py
|
jwitos/torchio
|
4d5c4fd75e1169f33f566c86e0bd25056dd5be0d
|
[
"Apache-2.0"
] | null | null | null |
import enum
import SimpleITK as sitk
@enum.unique
class Interpolation(enum.Enum):
"""Interpolation techniques available in ITK.
Example:
>>> import torchio as tio
>>> transform = tio.RandomAffine(image_interpolation='nearest')
"""
#: Interpolates image intensity at a non-integer pixel position by copying the intensity for the nearest neighbor.
NEAREST: str = 'sitkNearestNeighbor'
#: Linearly interpolates image intensity at a non-integer pixel position.
LINEAR: str = 'sitkLinear'
#: Computes the B-spline interpolation weights over the support region of the B-spline.
BSPLINE: str = 'sitkBSpline'
GAUSSIAN: str = 'sitkGaussian'
LABEL_GAUSSIAN: str = 'sitkLabelGaussian'
HAMMING: str = 'sitkHammingWindowedSinc'
COSINE: str = 'sitkCosineWindowedSinc'
WELCH: str = 'sitkWelchWindowedSinc'
LANCZOS: str = 'sitkLanczosWindowedSinc'
BLACKMAN: str = 'sitkBlackmanWindowedSinc'
def get_sitk_interpolator(interpolation: str) -> int:
if not isinstance(interpolation, str):
message = (
f'Interpolation must be a string, not {type(interpolation)}'
)
raise ValueError(message)
string = getattr(Interpolation, interpolation.upper()).value
return getattr(sitk, string)
| 32.4
| 118
| 0.703704
|
import enum
import SimpleITK as sitk
@enum.unique
class Interpolation(enum.Enum):
NEAREST: str = 'sitkNearestNeighbor'
LINEAR: str = 'sitkLinear'
BSPLINE: str = 'sitkBSpline'
GAUSSIAN: str = 'sitkGaussian'
LABEL_GAUSSIAN: str = 'sitkLabelGaussian'
HAMMING: str = 'sitkHammingWindowedSinc'
COSINE: str = 'sitkCosineWindowedSinc'
WELCH: str = 'sitkWelchWindowedSinc'
LANCZOS: str = 'sitkLanczosWindowedSinc'
BLACKMAN: str = 'sitkBlackmanWindowedSinc'
def get_sitk_interpolator(interpolation: str) -> int:
if not isinstance(interpolation, str):
message = (
f'Interpolation must be a string, not {type(interpolation)}'
)
raise ValueError(message)
string = getattr(Interpolation, interpolation.upper()).value
return getattr(sitk, string)
| true
| true
|
79084e06eb7c41d7efaca16a12e16305e91d17b5
| 95,422
|
py
|
Python
|
mmtbx/twinning/twin_f_model.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/twinning/twin_f_model.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/twinning/twin_f_model.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
from cctbx import miller
from cctbx import crystal
from cctbx import sgtbx
from cctbx import xray
from cctbx import adptbx
from copy import deepcopy
from mmtbx import masks
import cctbx.xray.structure_factors
from cctbx.eltbx.xray_scattering import wk1995
from libtbx import adopt_init_args
from cctbx.array_family import flex
from libtbx.utils import Sorry, date_and_time
from libtbx.math_utils import iround
import iotbx.phil
from iotbx.pdb import xray_structure
import mmtbx.scaling
from mmtbx.scaling import relative_scaling
from mmtbx.scaling import sigmaa_estimation
from mmtbx import max_lik
import mmtbx.f_model
from libtbx import table_utils
from libtbx.utils import Sorry, user_plus_sys_time
from cStringIO import StringIO
import sys, os, math
import mmtbx.f_model
from libtbx.str_utils import format_value
from libtbx import Auto
import mmtbx.bulk_solvent.bulk_solvent_and_scaling as bss
import libtbx.path
import mmtbx.refinement.targets
import mmtbx.f_model.f_model_info
master_params = iotbx.phil.parse("""
twin_law = None
.type=str
.input_size = 80
.style = bold noauto
twin_target=*twin_lsq_f
.type=choice
detwin{
mode = algebraic proportional *auto
.type= choice
.short_caption = Detwin mode
map_types{
twofofc = *two_m_dtfo_d_fc two_dtfo_fc
.type = choice
.short_caption = 2Fo-Fc map type
fofc = *m_dtfo_d_fc gradient m_gradient
.type = choice
.short_caption = Fo-Fc map type
aniso_correct = False
.type=bool
.short_caption = Anisotropy correction
}
}
""")
class twin_fraction_object(object):
"""provides methods for derivatives and
transformastion of twin fraction"""
def __init__(self, twin_fraction = 0):
self.min_frac = 0.001
self.max_frac = 0.999
self.twin_fraction = float(twin_fraction)
if (self.twin_fraction<=self.min_frac):
self.twin_fraction = self.min_frac + self.min_frac/10.0
if (self.twin_fraction>=self.max_frac):
self.twin_fraction = self.max_frac - self.min_frac/10.0
def twin_fraction_to_ref( self ):
tmp = self.twin_fraction - self.min_frac
tmp = (self.max_frac-self.min_frac)/tmp -1.0
if tmp < 1e-70:
tmp = 1e-70
tmp = -math.log( tmp )
return tmp
def ref_to_twin_fraction(self, x):
if (x<-10):
x=-10
tmp = self.min_frac + (self.max_frac-self.min_frac)/(1+math.exp(-x) )
self.twin_fraction = tmp
def d_t_d_twin_fraction_ref(self, dtdp ):
tmp = self.twin_fraction_to_ref()
tmp2 = 1.0+math.exp(-tmp )
tmp = (self.max_frac - self.min_frac)*math.exp( -tmp )/(tmp2*tmp2)
# (d target)/(d twin_fraction)* (d twin_fraction)/(d refinable parameter)
# |--------------------------| |---------------------------------------|
# from outside calculated above
return dtdp*tmp
def show(self,out=None):
if out is None:
out = sys.stdout
print >> out, "twin fraction: %4.3f" %( self.twin_fraction )
class scaling_parameters_object(object):
"""Object holds a set of parameters needed for f model.
provides tranformations for parameter optimisation"""
def __init__(self,
xs=None,
k_overall=1.0,
u_star=(0,0,0,0,0,0),
k_sol=0,
u_sol=0,
k_part=0,
u_part=0,
object=None,
max_u_sol = 5.0,
max_u_part = 5.0 ):
if object is not None:
k_overall = object.k_overall
u_star = object.u_star
k_sol = object.k_sol
u_sol = object.u_sol
k_part = object.k_part
u_part = object.u_part
u_star = object.u_star
# this is complete paranoia,. Trying to ensure that one always obtains 2 unique objects.
self.k_overall = float(k_overall)
self.u_part = float(u_part)
self.k_sol = float(k_sol)
self.u_sol = float(u_sol)
self.k_part = float(k_part)
self.u_star = ( float(u_star[0]),
float(u_star[1]),
float(u_star[2]),
float(u_star[3]),
float(u_star[4]),
float(u_star[5])
)
self.max_u_sol = float(max_u_sol)
self.max_u_part= float(max_u_part)
if xs is None:
self.xs=object.xs
else:
self.xs=xs
assert self.xs is not None
self.adp_constraints = self.xs.space_group().adp_constraints()
self.vrwgk = math.pow(self.xs.unit_cell().volume(),-2.0/3.0)
self.n_u_indep = self.xs.space_group().adp_constraints(
).n_independent_params()
# make sure that the supplied adp follows the symmetry constraints
self.u_star = self.xs.space_group().average_u_star( self.u_star )
def ref_to_k_overall(self,x):
if(x > 500.): self.k_overall=math.exp(500.)
else: self.k_overall = math.exp( x )
def ref_to_k_sol(self,x):
if x>10:
self.k_sol = math.exp( 10 )
else:
self.k_sol = math.exp( x )
def ref_to_u_sol(self, x):
if x>10:
self.u_sol = math.exp(10.0)
else:
self.u_sol = math.exp( x )
if self.u_sol > self.max_u_sol:
self.u_sol = self.max_u_sol
def ref_to_k_part(self, x):
if x > 10:
self.k_part = math.exp(10)
else:
self.k_part = math.exp( x )
def ref_to_u_part(self, x):
self.u_part = math.exp( x )
if self.u_part > self.max_u_part:
self.u_part = self.max_u_part
def ref_to_u_star(self, x ):
# first we need to expand the bugger to the full size
tmp = self.adp_constraints.all_params( x )
# now it needs to be scaled back to something
# physical
tmp =list( flex.double(tmp) * self.vrwgk )
# done
self.u_star = tmp
def k_overall_to_ref(self):
if self.k_overall > 0:
return math.log( self.k_overall )
else:
return None
def k_sol_to_ref(self):
if self.k_sol>0:
return math.log( self.k_sol )
else:
return 0
def k_part_to_ref(self):
if self.k_part > 0:
return math.log( self.k_part )
else:
return 0
def u_sol_to_ref(self):
if self.u_sol > 0:
return math.log( self.u_sol )
else:
return -1000.0
def u_part_to_ref(self):
if self.u_part>0:
return math.log( self.u_part )
else:
return -1000.0
def u_star_to_ref(self):
# first we pick the independent parameters
tmp = self.xs.space_group().adp_constraints(
).independent_params(all_params=self.u_star)
# now do the scaling please
tmp = list( flex.double(tmp)/self.vrwgk )
return tmp
# derivatives of refinable parameter wrst to target
def d_t_d_k_overall_ref(self,dtdp):
return self.k_overall*dtdp
def d_t_d_k_sol_ref(self,dtdp):
return self.k_sol*dtdp
def d_t_d_k_part_ref(self,dtdp):
return self.k_part*dtdp
def d_t_d_u_sol_ref(self, dtdp):
return self.u_sol*dtdp
def d_t_d_u_part_ref(self, dtdp):
return self.u_part*dtdp
def d_t_d_u_star_ref(self, dtdp):
# first introduce the scaling
tmp = list( flex.double(dtdp) * self.vrwgk )
#now do the symmetry completion please
tmp = list( self.adp_constraints.independent_gradients(
list( tmp ) ) )
return tmp
def show(self,out=None):
if out is None:
out=sys.stdout
print >> out
print >> out, "F-model scaling parameters"
print >> out, "k_overall : %5.2e"%(self.k_overall)
print >> out, "u_star : %5.2e %5.2e %5.2e %5.2e %5.2e %5.2e"%(
self.u_star[0], self.u_star[1], self.u_star[2],
self.u_star[3], self.u_star[4], self.u_star[5])
print >> out, " (%i independent parameters)"%(self.n_u_indep)
print >> out, "k_sol : %5.2e"%(self.k_sol)
print >> out, "u_sol : %5.2e"%(self.u_sol)
print >> out, " B_sol : %5.2f"%(self.u_sol*79.0)
print >> out, "k_part : %5.2e"%(self.k_part)
print >> out, "u_part : %5.2e"%(self.u_part)
print >> out
def deep_copy(self):
new = scaling_parameters_object(object=self)
return new
def get_initial_scale(miller_obs,
f_atoms):
tmp_calc = f_atoms.deep_copy().map_to_asu()
tmp_obs = miller_obs.deep_copy().map_to_asu()
tmp_calc, tmp_obs = tmp_obs.common_sets(
abs(tmp_calc) )
init_scale = flex.sum( tmp_calc.data()*tmp_obs.data() )/ \
flex.sum( tmp_calc.data()*tmp_calc.data() )
return init_scale
class scaling_parameter_mask(object):
def __init__(self,
twin_fraction=True,
k_overall=True,
u_star=True,
k_sol=True,
u_sol=True):
self.twin_fraction = 0.0
self.k_overall = 0.0
self.u_star = 0.0
self.k_sol = 0.0
self.u_sol = 0.0
if twin_fraction:
self.twin_fraction = 1.0
if k_overall:
self.k_overall = 1.0
if u_star:
self.u_star = 1.0
if k_sol:
self.k_sol = 1.0
if u_sol:
self.u_sol = 1.0
class target_attributes(mmtbx.refinement.targets.target_attributes):
def __init__(self):
mmtbx.refinement.targets.target_attributes.__init__(self, family="ls")
self.twin = "amplitudes"
self.pseudo_ml = False
class twin_model_manager(mmtbx.f_model.manager_mixin):
def __init__(self,
f_obs = None,
f_mask = None,
f_calc = None,
r_free_flags = None,
xray_structure = None,
scaling_parameters = None,
sf_and_grads_accuracy_params =
mmtbx.f_model.sf_and_grads_accuracy_master_params.extract(),
mask_params = None,
out = None,
twin_law = None,
twin_law_str = None,
start_fraction = 0.1,
n_refl_bin = 2000,
max_bins = 20,
detwin_mode = None,
map_types = None,
twin_target = master_params.extract().twin_target):
self.fmodel_ts1 = None
self.f_obs_ = f_obs
if(f_calc is not None): raise RuntimeError("Not implemented.")
if(map_types is None):
map_types = master_params.extract().detwin.map_types
if(detwin_mode is None):
detwin_mode = "auto"
self.alpha_beta_params=None
self.twin = True
self.twin_law_str = twin_law_str
self.sfg_params = sf_and_grads_accuracy_params
self.target_name=twin_target
self._target_attributes = target_attributes()
if self.target_name =="pseudo_ml_f":
self.target_name = "twin_lsq_f"
self._target_attributes.pseudo_ml=True
if(out is None): out = sys.stdout
self.out = out
self.did_search = 0
if self.out is None:
self.out = sys.stdout
self.twin_fraction_object = twin_fraction_object(twin_fraction=start_fraction)
self.twin_law=twin_law
self.twin_fraction=start_fraction
self.possible_detwin_modes = ["proportional",
"algebraic",
"gradient",
"auto"
]
self.detwin_mode = detwin_mode
if self.detwin_mode is Auto:
self.detwin_mode="auto"
assert self.detwin_mode in self.possible_detwin_modes
self.detwin_switch_twin_fraction = 0.45
self.map_types = map_types
assert (self.twin_law is not None)
f_obs = f_obs.map_to_asu()
self.f_obs_ = f_obs
self.r_free_flags_ = r_free_flags.map_to_asu().common_set(f_obs)
assert self.f_obs_.indices().all_eq( self.r_free_flags_.indices() )
self.f_obs_w = f_obs.select( ~self.r_free_flags_.data() )
if(self.r_free_flags_.data().count(True)>0):
self.f_obs_f = f_obs.select( self.r_free_flags_.data() )
else:
self.f_obs_f = self.f_obs_w.deep_copy()
if(self.f_obs_w.data().size()==0 and self.f_obs_f.data().size()>0):
self.f_obs_w = self.f_obs_f
#setup the binners if this has not been done yet
self.max_bins = max_bins
self.n_refl_bin = n_refl_bin
if (self.n_refl_bin>f_obs.data().size() ) or (self.n_refl_bin is None):
self.n_refl_bin = f_obs.data().size()
if f_obs.binner() is None:
if f_obs.indices().size()/float(n_refl_bin) > max_bins:
f_obs.setup_binner(n_bins = max_bins)
else:
f_obs.setup_binner( reflections_per_bin=self.n_refl_bin )
self.f_obs_w.use_binning_of( f_obs )
self.f_obs_f.use_binning_of( f_obs )
self.xray_structure = xray_structure
self.xs = crystal.symmetry( unit_cell=f_obs.unit_cell(),
space_group=f_obs.space_group() )
self.scaling_parameters = scaling_parameters_object(
xs = self.xs,
object = scaling_parameters)
if self.scaling_parameters is None:
self.scaling_parameters = scaling_parameters_object(self.xs)
self.mask_params=None
if mask_params is not None:
self.mask_params = mask_params
else:
self.mask_params = mmtbx.masks.mask_master_params.extract()
self.norma_sum_f_sq = flex.sum( f_obs.data() * f_obs.data() )
self.norma_sum_f_sq_w = flex.sum( self.f_obs_w.data() * self.f_obs_w.data() )
self.norma_sum_f_sq_f = flex.sum( self.f_obs_f.data() * self.f_obs_f.data() )
#-------------------
self.miller_set = None
self.f_atoms = None
self.free_flags_for_f_atoms = None
self.miller_set = None
self.f_atoms = self.compute_f_atoms()
#-------------------
self.f_mask_array = None
if f_mask is not None:
if f_mask.data().size() == self.f_atoms.data().size():
self.f_mask_array = f_mask
else:
self.update_f_mask()
else:
self.update_f_mask()
#-------------------
self.f_partial_array = None
#-------------------
self.data_core = xray.f_model_core_data(
hkl = self.f_atoms.indices(),
f_atoms= self.f_atoms.data(),
f_mask = self.f_mask_array.data(),
unit_cell = self.f_atoms.unit_cell(),
k_overall=self.scaling_parameters.k_overall,
u_star=self.scaling_parameters.u_star,
k_sol=self.scaling_parameters.k_sol,
u_sol=self.scaling_parameters.u_sol,
f_part=None,
k_part=self.scaling_parameters.k_part,
u_part=self.scaling_parameters.u_part )
self.target_evaluator = xray.least_squares_hemihedral_twinning_on_f(
hkl_obs = self.f_obs_w.indices(),
f_obs = self.f_obs_w.data(),
w_obs = self.f_obs_w.sigmas(),
hkl_calc = self.f_atoms.indices(),
space_group = f_obs.space_group(),
anomalous_flag= f_obs.anomalous_flag(),
alpha = self.twin_fraction,
twin_law = self.twin_law.as_double_array()[0:9] )
if(self.f_obs_f.indices().size() == 0):
self.free_target_evaluator = self.target_evaluator
else:
self.free_target_evaluator = xray.least_squares_hemihedral_twinning_on_f(
hkl_obs = self.f_obs_f.indices(),
f_obs = self.f_obs_f.data(),
w_obs = self.f_obs_f.sigmas(),
hkl_calc = self.f_atoms.indices(),
space_group = f_obs.space_group(),
anomalous_flag = f_obs.anomalous_flag(),
alpha = self.twin_fraction,
twin_law = self.twin_law.as_double_array()[0:9] )
self.r_all_object = xray.hemihedral_r_values(
hkl_obs = f_obs.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = f_obs.space_group(),
anomalous_flag = f_obs.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
self.r_work_object = xray.hemihedral_r_values(
hkl_obs = self.f_obs_w.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = self.f_obs_w.space_group(),
anomalous_flag = f_obs.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
self.r_free_object = xray.hemihedral_r_values(
hkl_obs = self.f_obs_f.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = self.f_obs_f.space_group(),
anomalous_flag = self.f_obs_f.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
self.work_detwinner = xray.hemihedral_detwinner(
hkl_obs = self.f_obs_w.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = self.f_obs_w.space_group(),
anomalous_flag = self.f_obs_w.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
if(self.f_obs_f.indices().size() == 0):
self.free_detwinner = self.work_detwinner
else:
self.free_detwinner = xray.hemihedral_detwinner(
hkl_obs = self.f_obs_f.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = self.f_obs_f.space_group(),
anomalous_flag = self.f_obs_f.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
self.full_detwinner = xray.hemihedral_detwinner(
hkl_obs = f_obs.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = f_obs.space_group(),
anomalous_flag = f_obs.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
if(self.sfg_params is not None):
self.structure_factor_gradients_w = cctbx.xray.structure_factors.gradients(
miller_set = self.miller_set,
cos_sin_table = self.sfg_params.cos_sin_table,
grid_resolution_factor = self.sfg_params.grid_resolution_factor,
quality_factor = self.sfg_params.quality_factor,
u_base = self.sfg_params.u_base,
b_base = self.sfg_params.b_base,
wing_cutoff = self.sfg_params.wing_cutoff,
exp_table_one_over_step_size = self.sfg_params.exp_table_one_over_step_size)
else:
self.structure_factor_gradients_w = cctbx.xray.structure_factors.gradients(
miller_set = self.miller_set)
self.sigmaa_object_cache = None
self.update_sigmaa_object = True
self.xray_structure_mask_cache = None
if self.xray_structure is not None:
self.xray_structure_mask_cache = self.xray_structure.deep_copy_scatterers()
self.epsilons_w = self.f_obs_w.epsilons().data().as_double()
self.epsilons_f = self.f_obs_f.epsilons().data().as_double()
def f_obs(self):
return self.f_obs_
def r_free_flags(self):
return self.r_free_flags_
def twin_test(self):
return "yes"
def is_twin_fmodel_manager (self) :
return True
def update_f_hydrogens(self, log): # XXX dummy function to conform with non-twin equivalent
return None # XXX dummy function to conform with non-twin equivalent
def info(self, free_reflections_per_bin = 140, max_number_of_bins = 20,
n_bins=None):
return mmtbx.f_model.f_model_info.info(
fmodel = self,
free_reflections_per_bin = free_reflections_per_bin,
max_number_of_bins = max_number_of_bins,
n_bins = n_bins)
def outlier_selection(self, show = False, log = None):
# XXX
return None
def remove_outliers(self, show = False, log = None):
if (show):
if (log is None): log = sys.stdout
print >> log, """\
*****************************************************************
NOT performing outlier rejection in twin refinement mode.
If there are many outliers without twin refinement, the resulting
reflection statistics may differ significantly (for example
the percentage of R-free reflections).
*****************************************************************
"""
return self
def wilson_b(self, force_update = False):
# XXX
return None
def scale_k1(self):
# XXX is it true scale_k1 ?
# XXX name k_overall is ambigous.
return self.scaling_parameters.k_overall
def show_parameter_summary(self, manager=None):
# print bulk solvent parameters
print >> self.out, "Usol ", self.scaling_parameters.u_sol, self.data_core.usol()
print >> self.out, "Ksol ", self.scaling_parameters.k_sol, self.data_core.ksol()
print >> self.out, "Koverall ", self.scaling_parameters.k_overall, self.data_core.koverall()
print >> self.out, "Ustar ", self.scaling_parameters.u_star, self.data_core.ustar()
print >> self.out, "Twin fraction ", self.twin_fraction_object.twin_fraction, self.twin_fraction, self.target_evaluator.alpha()
print >> self.out, "mask step ", self.mask_params.grid_step_factor
print >> self.out, "mask shift ", self.mask_params.mean_shift_for_mask_update
print >> self.out, "mask trunk rad ", self.mask_params.shrink_truncation_radius
print >> self.out, "mask solv rad ", self.mask_params.solvent_radius
if manager is not None:
x = self.f_model().data()
y = manager.f_model().data()
print >> self.out, "Fmodel delta " , flex.sum( flex.abs(x - y) )
x = self.f_calc().data()
y = manager.f_calc().data()
print >> self.out, "Fatoms delta ", flex.sum( flex.abs(x - y) )
x = self.f_mask_array.data()
y = manager.f_mask_array.data()
print >> self.out, "Fmask delta ", flex.sum( flex.abs(x - y) )
x = flex.abs( self.bulk_solvent_mask().data - manager.bulk_solvent_mask().data )
print >> self.out, "Bit wise diff mask ", flex.sum( x )
def deep_copy(self):
new_object = twin_model_manager(
f_obs = self.f_obs().deep_copy(),
f_mask = self.f_mask_array.deep_copy(),
r_free_flags = self.r_free_flags().deep_copy(),
xray_structure = self.xray_structure.deep_copy_scatterers(),
scaling_parameters = self.scaling_parameters.deep_copy(),
mask_params = deepcopy(self.mask_params),
out = self.out,
twin_law = self.twin_law,
twin_law_str = self.twin_law_str,
start_fraction = self.twin_fraction,
n_refl_bin = self.n_refl_bin,
max_bins = self.max_bins,
detwin_mode = self.detwin_mode,
map_types = self.map_types,
sf_and_grads_accuracy_params = self.sfg_params,
)
new_object.twin_fraction_object.twin_fraction = float(self.twin_fraction_object.twin_fraction)
new_object.twin_fraction = float(self.twin_fraction_object.twin_fraction)
new_object.update()
new_object.did_search = self.did_search
if (self.fmodel_ts1 is not None) :
new_object.fmodel_ts1 = self.fmodel_ts1.deep_copy()
return new_object
def resolution_filter(self,d_max=None,d_min=None):
dc = self.deep_copy()
# make a dummy copy of the observed data please
dummy_obs = dc.f_obs().resolution_filter(d_max,d_min)
twin_complete = dc.construct_miller_set(external_miller_array = dummy_obs )
appropriate_f_mask_array = dc.f_mask_array.common_set( twin_complete )
new_object = twin_model_manager(
f_obs = dummy_obs,
f_mask = appropriate_f_mask_array, #dc.f_mask_array.resolution_filter(d_max,d_min),
r_free_flags = dc.r_free_flags().resolution_filter(d_max,d_min),
xray_structure = dc.xray_structure,
scaling_parameters = dc.scaling_parameters.deep_copy(),
mask_params = dc.mask_params,
out = dc.out,
twin_law = dc.twin_law,
twin_law_str = dc.twin_law_str,
start_fraction = dc.twin_fraction,
n_refl_bin = dc.n_refl_bin,
max_bins = dc.max_bins,
detwin_mode = dc.detwin_mode,
map_types = dc.map_types,
sf_and_grads_accuracy_params = dc.sfg_params
)
new_object.update()
new_object.did_search = self.did_search
return new_object
def select(self, selection):
self.update_f_mask()
dc = self.deep_copy()
# XXX BUG assert self.f_mask.data().size() == self.f_obs_.data().size()
if(selection is None): return dc
new_object = twin_model_manager(
f_obs = dc.f_obs.select(selection) ,
f_mask = dc.f_mask_array.select(selection), # XXX BUG, see commented assert above
r_free_flags = dc.r_free_flags().select(selection),
xray_structure = dc.xray_structure,
scaling_parameters = dc.scaling_parameters.deep_copy(),
mask_params = dc.mask_params,
out = dc.out,
twin_law = dc.twin_law,
start_fraction = dc.twin_fraction,
n_refl_bin = dc.n_refl_bin,
max_bins = dc.max_bins,
detwin_mode = dc.detwin_mode,
map_types = dc.map_types,
sf_and_grads_accuracy_params = dc.sfg_params
)
new_object.did_search = self.did_search
return new_object
def f_model_scaled_with_k1_composite_work_free(self):
# XXX scaled with k1 ???
ma_w = self.f_model_w()
ma_f = self.f_model_t()
if(ma_w.indices().size() == ma_f.indices().size()): return ma_w
return ma_w.concatenate(ma_f)
def f_model(self):
tmp_f_model = self.f_atoms.customized_copy(
data = self.data_core.f_model()
)
return tmp_f_model
def f_model_w(self):
tmp = self.f_model()
return tmp.select(~self.free_flags_for_f_atoms )
def f_model_t(self):
tmp = self.f_model()
return tmp.select( self.free_flags_for_f_atoms )
def f_calc(self):
if self.f_atoms is None:
self.f_atoms = self.compute_f_atoms()
return self.f_atoms
def f_calc_w(self):
tmp = self.f_calc()
return tmp.select(~self.free_flags_for_f_atoms )
def f_calc_t(self):
tmp = self.f_calc()
return tmp.select( self.free_flags_for_f_atoms )
def target_attributes(self):
return self._target_attributes
def r_work(self, d_min=None, d_max=None):
if(self.fmodel_ts1 is not None): # XXX BAD
self.fmodel_ts1.update_xray_structure(xray_structure = self.xray_structure,
update_f_calc = True, update_f_mask=True)
return self.fmodel_ts1.r_work(d_min=d_min, d_max=d_max)
else:
w,f = self.r_values(table=False, d_min=d_min, d_max=d_max)
return w
def r_free(self, d_min=None, d_max=None):
if(self.fmodel_ts1 is not None): # XXX BAD
self.fmodel_ts1.update_xray_structure(xray_structure = self.xray_structure,
update_f_calc = True, update_f_mask = True)
return self.fmodel_ts1.r_free(d_min=d_min, d_max=d_max)
else:
w,f = self.r_values(table=False, d_min=d_min, d_max=d_max)
return f
def f_part1(self): # XXX for compatiblity with other fmodel
return self.f_calc().customized_copy(data = self.f_calc().data()*0)
def show(self, log=None, suffix=None, show_header=False):
fmt = "r_work=%6.4f r_free=%6.4f twin_fraction=%4.2f twin_law=%s"
print >> log
print >> log, fmt%(self.r_work(), self.r_free(), self.twin_fraction,
self.twin_law_str)
def update_all_scales(self, params=None, log=None, show=False,
optimize_mask=False, nproc=None, fast=False,
remove_outliers=False,refine_hd_scattering=False,
apply_back_trace=False, update_f_part1=False):
self.update_solvent_and_scale(log=log,
apply_back_trace=apply_back_trace,
update_f_part1=update_f_part1)
def update_solvent_and_scale(self,
update_f_part1=False,
apply_back_trace=False,
optimize_mask=True,
optimize_mask_thorough=False, # XXX ignored
params=None,
log=None,
verbose=-1,
initialise=False,
nproc=None, # XXX ignored
fast=None): # XXX ignored
if(self.twin_law_str is None and self.twin_law is not None):
self.twin_law_str = sgtbx.change_of_basis_op( self.twin_law ).as_hkl()
self.fmodel_ts1 = mmtbx.f_model.manager(
f_obs = self.f_obs(),
r_free_flags = self.r_free_flags(),
xray_structure = self.xray_structure,
twin_law = self.twin_law_str,
mask_params = self.mask_params,
k_sol = self.k_sol(),
b_sol = self.b_sol(),
b_cart = self.b_cart(),
twin_fraction = self.twin_fraction)
self.twin_set = self.fmodel_ts1.twin_set
#if(params is None):
# params = bss.master_params.extract()
#params.k_sol_b_sol_grid_search = False # XXX too slow otherwise
#params.number_of_macro_cycles=1 # XXX too slow otherwise, let's see may be ok
result = self.fmodel_ts1.update_all_scales(fast=False, # XXX
params=params, log=log, apply_back_trace=apply_back_trace)
self.update_core(
k_sol = result.k_sol, # XXX not implemented (see above)
b_sol = result.b_sol,
twin_fraction = self.fmodel_ts1.twin_fraction,
b_cart = result.b_cart,
k_overall = self.fmodel_ts1.scale_k1_w_for_twin_targets())
self.mask_params = self.fmodel_ts1.mask_params
self.arrays = self.fmodel_ts1.arrays
def update_core(self,
f_calc = None,
f_mask = None,
f_part = None,
b_cart = None,
k_sol = None,
b_sol = None,
k_part = None,
b_part = None,
u_sol = None,
k_overall = None,
twin_fraction = None,
r_free_flags = None):
if(r_free_flags is not None):
self.r_free_flags_ = r_free_flags
if f_calc is not None:
self.data_core.renew_fatoms( f_calc.data() )
self.f_atoms = f_calc
else:
assert self.f_atoms.indices().all_eq( self.miller_set.indices() )
self.data_core.renew_fatoms( self.f_atoms.data() )
if f_mask is not None:
self.data_core.renew_fmask( f_mask.data() )
self.f_mask_array = f_mask
else:
self.data_core.renew_fmask( self.f_mask_array.data() )
if f_part is not None:
self.data_core.renew_fpart( f_part.calc() )
self.f_partial_array = f_part
else:
if self.f_partial_array is not None:
self.data_core.renew_fpart( self.f_partial_array.data() )
if b_sol is not None:
u_sol = adptbx.b_as_u( b_sol )
if u_sol is not None:
self.data_core.usol( u_sol )
self.scaling_parameters.u_sol = u_sol
if u_sol is None:
self.data_core.usol( self.scaling_parameters.u_sol )
if k_sol is not None:
self.data_core.ksol( k_sol )
self.scaling_parameters.k_sol = k_sol
else:
self.data_core.ksol( self.scaling_parameters.k_sol )
if k_overall is not None:
self.scaling_parameters.k_overall = k_overall
self.data_core.koverall( self.scaling_parameters.k_overall )
else:
self.data_core.koverall( self.scaling_parameters.k_overall )
if b_cart is not None:
u_star = adptbx.u_cart_as_u_star( self.xs.unit_cell(), adptbx.b_as_u( list(b_cart) ) )
self.data_core.ustar(u_star)
self.scaling_parameters.u_star = u_star
if twin_fraction is None:
self.twin_fraction = self.twin_fraction_object.twin_fraction
self.target_evaluator.alpha( self.twin_fraction_object.twin_fraction )
self.free_target_evaluator.alpha( self.twin_fraction_object.twin_fraction )
else:
self.twin_fraction_object.twin_fraction = twin_fraction
self.twin_fraction = twin_fraction
self.target_evaluator.alpha( twin_fraction )
self.free_target_evaluator.alpha( twin_fraction )
def f_obs_work(self):
return self.f_obs_w
def update(self, f_calc = None,
f_obs = None,
f_mask = None,
f_ordered_solvent = None,
r_free_flags = None,
b_cart = None,
k_sol = None,
b_sol = None,
sf_and_grads_accuracy_params = None,
target_name = None,
abcd = None,
alpha_beta_params = None,
xray_structure = None,
mask_params = None,
overall_scale = None,
twin_fraction = None ):
if(sf_and_grads_accuracy_params is not None):
self.sfg_params = sf_and_grads_accuracy_params
self.update_xray_structure(update_f_calc = True)
if(f_calc is not None):
assert f_calc.indices().all_eq(self.f_model.indices())
self.update_core(f_calc = f_calc)
if(mask_params is not None):
self.mask_params = mask_params
if(f_obs is not None):
assert f_obs.data().size() == self.f_obs_.data().size()
self.f_obs_ = f_obs
self.f_obs_w = self.f_obs_.select(~self.r_free_flags().data() )
self.f_obs_f = self.f_obs_.select( self.r_free_flags().data() )
if(f_mask is not None):
assert f_mask.indices().all_eq( self.f_mask_array().indices() )
assert f_mask.data().size() == self.f_mask_array().data().size()
self.update_core(f_mask = f_mask)
if(r_free_flags is not None):
self.r_free_flags_ = r_free_flags
self.update_core(r_free_flags = r_free_flags)
if(b_cart is not None):
try: assert b_cart.size() == 6
except Exception: assert len(b_cart) == 6
self.update_core(b_cart = b_cart)
if overall_scale is not None:
self.scaling_parameters.k_overall = overall_scale
self.update_core()
if twin_fraction is None:
self.twin_fraction = self.twin_fraction_object.twin_fraction
self.target
if twin_fraction is None:
self.twin_fraction = self.twin_fraction_object.twin_fraction
self.target_evaluator.alpha( self.twin_fraction_object.twin_fraction )
self.free_target_evaluator.alpha( self.twin_fraction_object.twin_fraction )
else:
self.twin_fraction = twin_fraction
self.target_evaluator.alpha( twin_fraction )
self.free_target_evaluator.alpha( twin_fraction )
def construct_miller_set(self, return_free_f_atoms_array=False, external_miller_array=None):
completion = None
tmp_miller = external_miller_array
if tmp_miller is None:
tmp_miller = self.f_obs()
completion = xray.twin_completion( tmp_miller.indices(),
self.xs.space_group(),
tmp_miller.anomalous_flag(),
self.twin_law.as_double_array()[0:9] )
indices = completion.twin_complete()
miller_set = miller.set(
crystal_symmetry = self.xs,
indices =indices,
anomalous_flag = tmp_miller.anomalous_flag() ).map_to_asu()
assert miller_set.is_unique_set_under_symmetry()
if not return_free_f_atoms_array:
return miller_set
else:
free_array_for_f_atoms = completion.get_free_model_selection(
miller_set.indices(),
self.r_free_flags().data() )
return miller_set, free_array_for_f_atoms
def compute_f_atoms(self):
"""Get f calc from the xray structure"""
if self.miller_set is None:
self.miller_set, self.free_flags_for_f_atoms = self.construct_miller_set(True)
if(self.sfg_params is not None):
tmp = self.miller_set.structure_factors_from_scatterers(
xray_structure = self.xray_structure,
algorithm = self.sfg_params.algorithm,
cos_sin_table = self.sfg_params.cos_sin_table,
grid_resolution_factor = self.sfg_params.grid_resolution_factor,
quality_factor = self.sfg_params.quality_factor,
u_base = self.sfg_params.u_base,
b_base = self.sfg_params.b_base,
wing_cutoff = self.sfg_params.wing_cutoff,
exp_table_one_over_step_size =
self.sfg_params.exp_table_one_over_step_size
)
else:
tmp = self.miller_set.structure_factors_from_scatterers(
xray_structure = self.xray_structure)
f_atoms = tmp.f_calc()
return f_atoms
def apply_back_b_iso(self):
eps = math.pi**2*8
unit_cell = self.xray_structure.unit_cell()
b_min = min(self.b_sol(), self.xray_structure.min_u_cart_eigenvalue())
if(b_min < 0):
self.xray_structure.tidy_us(u_min = 1.e-6)
b_iso = self.b_iso()
b_test = b_min+b_iso
if(b_test < 0.0): b_adj = b_iso + abs(b_test) + 0.001
else: b_adj = b_iso
if(abs(b_adj) <= 300.0):
b_cart = self.b_cart()
b_cart_new = [b_cart[0]-b_adj,b_cart[1]-b_adj,b_cart[2]-b_adj,
b_cart[3], b_cart[4], b_cart[5]]
self.update(b_cart = b_cart_new)
self.update(b_sol = self.k_sol_b_sol()[1] + b_adj)
self.xray_structure.shift_us(b_shift = b_adj)
b_min = min(self.b_sol(), self.xray_structure.min_u_cart_eigenvalue())
assert b_min >= 0.0
self.xray_structure.tidy_us(u_min = 1.e-6)
self.update_xray_structure(
xray_structure = self.xray_structure,
update_f_calc = True,
update_f_mask = False,
update_f_ordered_solvent = False,
out = None)
def _get_step(self, update_f_ordered_solvent = False):
step = self.f_obs().d_min()/self.mask_params.grid_step_factor
if(step < 0.3): step = 0.3
step = min(0.8, step)
if(update_f_ordered_solvent): step = 0.3
return step
def _update_f_mask_flag(self, xray_structure, mean_shift):
if(self.xray_structure_mask_cache is None):
self.xray_structure_mask_cache = xray_structure.deep_copy_scatterers()
return True
else:
sites_cart_1 = self.xray_structure_mask_cache.sites_cart()
sites_cart_2 = xray_structure.sites_cart()
self.xray_structure_mask_cache = xray_structure.deep_copy_scatterers()
if(sites_cart_1.size() != sites_cart_2.size()): return True
atom_atom_distances = flex.sqrt((sites_cart_1 - sites_cart_2).dot())
mean_shift_ = flex.mean(atom_atom_distances)
update_f_mask = False
if(mean_shift_ >= mean_shift):
update_f_mask = True
return update_f_mask
def print_diffs(self):
sites_cart_1 = self.xray_structure_mask_cache.sites_cart()
sites_cart_2 = self.xray_structure.sites_cart()
atom_atom_distances = flex.sqrt((sites_cart_1 - sites_cart_2).dot())
mean_shift_ = flex.mean(atom_atom_distances)
print >> self.out, "MEAN SHIFT: ", mean_shift_
def update_xray_structure(self,
xray_structure = None,
update_f_calc = False,
update_f_mask = False,
update_f_ordered_solvent = False,
force_update_f_mask = True,
out = None,
k_sol = None,
b_sol = None,
b_cart = None):
if (xray_structure is not None):
self.xray_structure = xray_structure
if(update_f_mask):
if(force_update_f_mask):
consider_mask_update = True
else:
consider_mask_update = self._update_f_mask_flag(
xray_structure = self.xray_structure,
mean_shift = self.mask_params.mean_shift_for_mask_update)
f_calc = None
if(update_f_calc):
timer = user_plus_sys_time()
assert self.xray_structure is not None
if(self.sfg_params is not None):
f_calc = self.miller_set.structure_factors_from_scatterers(
xray_structure = self.xray_structure,
algorithm = self.sfg_params.algorithm,
cos_sin_table = self.sfg_params.cos_sin_table,
grid_resolution_factor = self.sfg_params.grid_resolution_factor,
quality_factor = self.sfg_params.quality_factor,
u_base = self.sfg_params.u_base,
b_base = self.sfg_params.b_base,
wing_cutoff = self.sfg_params.wing_cutoff,
exp_table_one_over_step_size =
self.sfg_params.exp_table_one_over_step_size).f_calc()
else:
f_calc = self.miller_set.structure_factors_from_scatterers(
xray_structure = self.xray_structure).f_calc()
f_mask = None
set_core_flag=True
if(update_f_mask and consider_mask_update):
bulk_solvent_mask_obj = self.bulk_solvent_mask()
f_mask = bulk_solvent_mask_obj.structure_factors(miller_set= self.miller_set)
if([f_calc, f_mask].count(None) == 2): set_core_flag = False
else: set_core_flag = True
if(f_calc is None): f_calc = self.f_calc()
if(f_mask is None): f_mask = self.f_mask()
if(set_core_flag):
self.update_core(f_calc = f_calc,
f_mask = f_mask,
b_cart = b_cart,
k_sol = k_sol,
b_sol = b_sol)
def bulk_solvent_mask(self):
step = self._get_step()
result = masks.bulk_solvent(
xray_structure = self.xray_structure,
grid_step = step,
ignore_zero_occupancy_atoms=self.mask_params.ignore_zero_occupancy_atoms,
solvent_radius = self.mask_params.solvent_radius,
shrink_truncation_radius = self.mask_params.shrink_truncation_radius)
return result
def update_f_mask(self):
mask = self.bulk_solvent_mask()
self.f_mask_array = mask.structure_factors( self.miller_set )
def r_values(self, table=True, rows=False, d_min=None, d_max=None, again=False,
free_reflections_per_bin = 140, max_number_of_bins = 20):
if rows:
table=True
additional_selection_w = flex.bool(self.f_obs_w.data().size(), True)
d_w = self.f_obs_w.d_spacings().data()
if d_max is not None:
exclude_low_w = flex.bool(d_w<d_max)
additional_selection_w = additional_selection_w&exclude_low_w
if d_min is not None:
exclude_high_w = flex.bool(d_w>d_min)
additional_selection_w = additional_selection_w&exclude_high_w
additional_selection_f = flex.bool(self.f_obs_f.data().size(), True)
d_f = self.f_obs_f.d_spacings().data()
if d_max is not None:
exclude_low_f = flex.bool(d_f<d_max)
additional_selection_f = additional_selection_f&exclude_low_f
if d_min is not None:
exclude_high_f = flex.bool(d_f>d_min)
additional_selection_f = additional_selection_f&exclude_high_f
r_abs_work_f_overall = self.r_work_object.r_amplitude_abs(
f_obs = self.f_obs_w.data(),
f_model = self.data_core.f_model(),
selection = additional_selection_w,
twin_fraction = self.twin_fraction_object.twin_fraction)
r_abs_free_f_overall = self.r_free_object.r_amplitude_abs(
self.f_obs_f.data(),
self.data_core.f_model(),
additional_selection_f,
self.twin_fraction_object.twin_fraction)
#make a sigmaa object
tmp_sigmaa_object = self.sigmaa_object()
if table:
r_abs_work_f_bin = []
r_abs_free_f_bin = []
bin_low = []
bin_high= []
n_free = []
n_work = []
rows = []
bins = []
n_bins = self.determine_n_bins(
free_reflections_per_bin = free_reflections_per_bin,
max_n_bins = max_number_of_bins)
self.f_obs_f.setup_binner(n_bins = n_bins)
self.f_obs_w.use_binning_of(self.f_obs_f)
completeness = self.f_obs_w.completeness(use_binning=True).data
for i_bin in self.f_obs_f.binner().range_used():
selection = flex.bool( self.f_obs_w.binner().bin_indices() == i_bin )
#combine selection
n_work = selection.count(True)
tmp_work = self.r_work_object.r_amplitude_abs(
f_obs = self.f_obs_w.data(),
f_model = self.data_core.f_model(),
selection = selection,
twin_fraction = self.twin_fraction_object.twin_fraction)
mean_f_obs_w = flex.mean_default( self.f_obs_w.data().select( selection ), None )
selection = flex.bool( self.f_obs_f.binner().bin_indices() == i_bin )
selection = selection&additional_selection_f
n_free = selection.count(True)
tmp_free = self.r_free_object.r_amplitude_abs(
f_obs = self.f_obs_f.data(),
f_model = self.data_core.f_model(),
selection = selection,
twin_fraction = self.twin_fraction_object.twin_fraction)
r_abs_work_f_bin.append(tmp_work)
r_abs_free_f_bin.append(tmp_free)
d_max,d_min = self.f_obs_w.binner().bin_d_range( i_bin )
d_range = self.f_obs_w.binner().bin_legend(
i_bin=i_bin, show_bin_number=False, show_counts=False)
alpha_w, beta_w = self.alpha_beta_w()
alpha_f, beta_f = self.alpha_beta_f()
n_additional_selection_w = flex.bool(alpha_w.data().size(), True)
d_w = alpha_w.d_spacings().data()
if d_max is not None:
exclude_low_w = flex.bool(d_w<d_max)
n_additional_selection_w = n_additional_selection_w&exclude_low_w
if d_min is not None:
exclude_high_w = flex.bool(d_w>d_min)
n_additional_selection_w = n_additional_selection_w&exclude_high_w
n_additional_selection_f = flex.bool(alpha_f.data().size(), True)
d_f = alpha_f.d_spacings().data()
if d_max is not None:
exclude_low_f = flex.bool(d_f<d_max)
n_additional_selection_f = n_additional_selection_f&exclude_low_f
if d_min is not None:
exclude_high_f = flex.bool(d_f>d_min)
n_additional_selection_f = n_additional_selection_f&exclude_high_f
alpha_f = flex.mean_default( alpha_f.select( n_additional_selection_f ).data(), None )
beta_f = flex.mean_default( beta_f.select( n_additional_selection_f ).data(), None )
alpha_w = flex.mean_default( alpha_w.select( n_additional_selection_w ).data(), None )
beta_w = flex.mean_default( beta_w.select( n_additional_selection_w ).data(), None )
phase_error_w = flex.mean_default( self.phase_errors_work().select( n_additional_selection_w), None )
phase_error_f = flex.mean_default( self.phase_errors_test().select( n_additional_selection_f), None )
fom_w = flex.mean_default( self.figures_of_merit_work().select( n_additional_selection_w ), None )
tmp = [ str( "%3i"%(i_bin) ),
str( "%5.2f"%(d_max) ),
str( "%5.2f"%(d_min) ),
str( "%5i"%(n_work) ),
str( "%3.2f"%(tmp_work) ),
str( "%5i"%(n_free) ),
str( "%3.2f"%(tmp_free) ) ]
"""
i_bin = None,
d_range = None,
completeness = None,
alpha_work = None,
beta_work = None,
r_work = None,
r_free = None,
target_work = None,
target_free = None,
n_work = None,
n_free = None,
mean_f_obs = None,
fom_work = None,
pher_work = None,
pher_free = None
"""
rows.append( tmp )
bin = resolution_bin(i_bin=i_bin,
d_range=d_range,
completeness=completeness[i_bin],
alpha_work=alpha_w,
beta_work=beta_w,
r_work=tmp_work,
r_free=tmp_free,
target_work=None,
target_free=None,
n_work=n_work,
n_free=n_free,
scale_k1_work= None, # XXX for Peter to fix.
mean_f_obs = mean_f_obs_w,
fom_work = fom_w,
pher_work = phase_error_w,
pher_free = phase_error_f )
bins.append( bin )
if not rows:
header = ("bin", "d_max", "d_min", "n_work", "r_work", "n_free", "r_free")
comments = """
Overall r values
R Work : %4.3f
R Free : %4.3f
R = \sum_h( |Ft-Fo| )/ \sum_h(Fo)
Ft = Sqrt(tf*F1^2 + (1-tf)F2^2)
F1,F2 are twin related model amplitudes.
tf is the twin fraction and Fo is an observed amplitude."""%(r_abs_work_f_overall, r_abs_free_f_overall)
table_txt = table_utils.format( [header]+rows,
comments=comments,
has_header=True,
separate_rows=False,
prefix='| ',
postfix=' |')
print >> self.out, "------------------------ R values ------------------------"
print >> self.out, " twin law : %s"%( sgtbx.change_of_basis_op( self.twin_law ).as_hkl() )
print >> self.out, " twin fraction : %4.3f"%( self.twin_fraction_object.twin_fraction)
print >> self.out, table_txt
print >> self.out, "-----------------------------------------------------------"
print >> self.out
self.r_work_in_lowest_resolution_bin(show=True)
self.r_overall_low_high(show=True)
else:
return bins
else:
return r_abs_work_f_overall, r_abs_free_f_overall
def r_all(self):
selection = flex.bool( self.f_obs().data().size(), True )
overall_r = self.r_all_object.r_amplitude_abs(
f_obs = self.f_obs().data(),
f_model = self.data_core.f_model(),
selection = selection,
twin_fraction = self.twin_fraction_object.twin_fraction)
return overall_r
def r_work_in_lowest_resolution_bin(self, reflections_per_bin=200, show=False):
d_star_sq = self.f_obs_w.d_star_sq().data()
sort_permut = flex.sort_permutation( d_star_sq )
if sort_permut.size() < reflections_per_bin:
reflections_per_bin = sort_permut.size()
i_select = sort_permut[:reflections_per_bin-1]
b_select = flex.bool(sort_permut.size(), False )
b_select = b_select.set_selected( i_select, True )
tmp_work = self.r_work_object.r_amplitude_abs(
f_obs = self.f_obs_w.data(),
f_model = self.data_core.f_model(),
selection = b_select,
twin_fraction = self.twin_fraction_object.twin_fraction)
if not show:
return tmp_work
else:
print >> self.out, "-----------------------------------------------------------"
print >> self.out, " R-value for the %i lowest resolution reflections:"%(reflections_per_bin)
print >> self.out, " %4.3f" %(self.r_work_in_lowest_resolution_bin(reflections_per_bin))
print >> self.out, "-----------------------------------------------------------"
def r_overall_low_high(self, d = 6.0, show=False):
r_work = self.r_work()
d_max, d_min = self.f_obs_w.d_max_min()
if(d_max < d): d = d_max
if(d_min > d): d = d_min
n_low = self.f_obs_w.resolution_filter(d_min = d, d_max = 999.9).data().size()
if(n_low > 0):
r_work_l = self.r_values(d_min = d, d_max = 999.9, table=False )[0]
else:
r_work_l = None
n_high = self.f_obs_w.resolution_filter(d_min = 0.0, d_max = d).data().size()
if(n_high > 0):
r_work_h = self.r_values(d_min = 0.0, d_max = d,table=False)[0]
else:
r_work_h = None
if(r_work_l is not None):
r_work_l = r_work_l
else:
r_work_l = 0.0
if(r_work_h is not None):
r_work_h = r_work_h
else:
r_work_h = 0.0
if not show:
return r_work, r_work_l, r_work_h, n_low, n_high
else:
print >> self.out, "----------------------------------------------------------"
print >> self.out, "Overall, low and high resolution R-work values"
print >> self.out
print >> self.out, "Limits: Overall: %6.2f -- %6.2f"%(d_max,d_min)
print >> self.out, " Low : %6.2f -- %6.2f"%(d_max,d)
print >> self.out, " High : %6.2f -- %6.2f"%(d,d_min)
print >> self.out
print >> self.out, "R values : Overall low high"
print >> self.out, " %6.3f %6.3f %6.3f"%(r_work,r_work_l,r_work_h)
print >> self.out, "Contributors:%7i %7i %7i"%(n_low+n_high, n_low,n_high)
print >> self.out, "----------------------------------------------------------"
def twin_fraction_scan(self, n=10):
"""for each twin fraction, compute the target value and r value"""
print >> self.out
print >> self.out
print >> self.out, "------------------------ Twin fraction scan ----------------------"
print >> self.out
print >> self.out, " R-values and target values for various twin fractions are listed."
print >> self.out
current_twin_fraction = twin_fraction_object(self.twin_fraction_object.twin_fraction)
trail_twin_fractions = list( flex.double( range(n+1) )/(2.0*n) )
rows = []
for tf in trail_twin_fractions:
tmp_twin_fraction = twin_fraction_object( tf )
self.update_solvent_and_scale( twin_fraction_parameters = tmp_twin_fraction )
rw,rf = self.r_values(table=False)
ttw,ttf = self.target(print_it=False)
tmp = [ "%4.3f"%(tf),
"%4.3f"%(rw),
"%4.3f"%(rf),
"%5.4e"%(ttw),
"%5.4e"%(ttf)
]
rows.append( tmp )
legend = ( "Twin fraction", "R-work", "R-free", "Target-work", "Target-free" )
table_txt = table_utils.format( [legend]+rows,
comments=None,
has_header=True,
separate_rows=False,
prefix='| ',
postfix=' |')
print >> self.out, table_txt
print >> self.out
print >> self.out, "------------------------------------------------------------------"
print >> self.out
print >> self.out
self.update_solvent_and_scale( twin_fraction_parameters = current_twin_fraction )
def target(self, print_it=True):
tmp_w=self.target_evaluator.target( self.data_core.f_model() )/self.norma_sum_f_sq_w
if(self.norma_sum_f_sq_f == 0):
tmp_f = tmp_w
else:
tmp_f=self.free_target_evaluator.target( self.data_core.f_model() )/self.norma_sum_f_sq_f
if print_it:
print >> self.out
print >> self.out, "----------------- Target values -----------------"
print >> self.out, " working set : %8.6e "%(tmp_w)
print >> self.out, " free set : %8.6e "%(tmp_f)
print >> self.out, "-------------------------------------------------"
else:
return(tmp_w,tmp_f)
def target_functor(self, alpha_beta=None): #XXX fake
return target_functor(manager=self)
def target_f(self):
return self.target_t()
def detwin_data(self, mode=None):
#determine how to detwin
if mode is None:
mode = "auto"
if mode == "auto":
if self.twin_fraction_object.twin_fraction > self.detwin_switch_twin_fraction:
mode = "proportional"
if self.twin_fraction_object.twin_fraction > 1.0 - self.detwin_switch_twin_fraction:
mode = "algebraic"
else:
mode = "algebraic"
if mode == "algebraic":
if abs(self.twin_fraction_object.twin_fraction-0.5)<1e-3:
print >> self.out, "Automatic adjustment: detwinning mode set to proportional"
# FIXME this seems appropriate but was not implemented - bug?
#mode = "proportional"
assert mode in self.possible_detwin_modes
assert mode != "auto"
#detwinning should be done against
tmp_i_obs = self.f_obs().deep_copy().f_as_f_sq()
untouched = self.f_obs().deep_copy().f_as_f_sq()
dt_f_obs = None
tmp_free = self.r_free_flags().deep_copy()
# now please detwin the data
if mode == "proportional":
sigmas = tmp_i_obs.sigmas()
if (sigmas is None):
sigmas = flex.double(tmp_i_obs.data().size(), 1.0)
dt_iobs, dt_isigma = self.full_detwinner.detwin_with_model_data(
tmp_i_obs.data(),
sigmas,
self.data_core.f_model(),
self.twin_fraction_object.twin_fraction )
tmp_i_obs = tmp_i_obs.customized_copy(
data = dt_iobs, sigmas = dt_isigma).set_observation_type( tmp_i_obs )
dt_f_obs = tmp_i_obs.f_sq_as_f()
if mode == "algebraic":
sigmas = tmp_i_obs.sigmas()
if (sigmas is None):
sigmas = flex.double(tmp_i_obs.data().size(), 1.0)
dt_iobs, dt_isigma = self.full_detwinner.detwin_with_twin_fraction(
tmp_i_obs.data(),
sigmas,
self.twin_fraction_object.twin_fraction )
# find out which intensities are zero or negative, they will be eliminated later on
zeros = flex.bool( dt_iobs <= 0 )
x = dt_iobs.select( ~zeros )
y = tmp_i_obs.data().select( ~zeros )
tmp_i_obs = tmp_i_obs.customized_copy(
data = dt_iobs, sigmas = dt_isigma).set_observation_type( tmp_i_obs )
dt_f_obs = tmp_i_obs.select( ~zeros ).f_sq_as_f()
tmp_free = tmp_free.select( ~zeros )
untouched = untouched.select( ~zeros )
#we can now quickly scale the two and see what hapens.
fmodel = self.f_model()
# XXX Pavel: avoid floating-point crashes
re = flex.abs(flex.real(fmodel.data()))
im = flex.abs(flex.imag(fmodel.data()))
sel = re > 1.e+50
sel |= im > 1.e+50
d = fmodel.data().set_selected(sel, 0+0j)
fmodel = fmodel.array(data = d)
#
abs_tmp_f_model = abs( fmodel ).common_set(
dt_f_obs ).set_observation_type( dt_f_obs )
tmp_f_model = fmodel.common_set( dt_f_obs )
scaler = relative_scaling.ls_rel_scale_driver(
miller_native = abs_tmp_f_model,
miller_derivative = dt_f_obs,
use_intensities = False,
scale_weight = False,
use_weights = False )
if dt_f_obs.data().size()==0:
if mode == "algebraic":
raise Sorry("Algebraic detwinning of data resulted in a dataset without data! \n Please try to use detwinning using proportionality rules (detwin.mode=proportional) ")
else:
raise Sorry("This should never have happend. Please contact authors")
return dt_f_obs, tmp_f_model, tmp_free
def sigmaa_object(self, detwinned_data=None, f_model_data=None, tmp_free=None, forced_update=False):
if self.sigmaa_object_cache is None:
forced_update = True
assert ( [detwinned_data,f_model_data] ).count(None) != 1
if tmp_free is None:
tmp_free = self.r_free_flags
if (detwinned_data is None):
if forced_update or self.update_sigmaa_object is True:
self.update_sigmaa_object = True
detwinned_data,f_model_data,tmp_free = self.detwin_data(mode=self.detwin_mode)
else:
tmp_sigmaa_object = sigmaa_estimation.sigmaa_estimator(
miller_obs = detwinned_data,
miller_calc = f_model_data,
r_free_flags = tmp_free,
kernel_width_free_reflections=200,
)
if not forced_update:
return tmp_sigmaa_object
else:
self.sigmaa_object_cache = tmp_sigmaa_object
return self.sigmaa_object_cache
if forced_update:
self.update_sigmaa_object = True
detwinned_data,f_model_data,tmp_free = self.detwin_data(mode=self.detwin_mode)
if self.update_sigmaa_object:
self.update_sigmaa_object = False
if(tmp_free.data().count(True) == 0):
tmp_free = tmp_free.array(data = ~tmp_free.data())
self.sigmaa_object_cache = sigmaa_estimation.sigmaa_estimator(
miller_obs = detwinned_data,
miller_calc = f_model_data,
r_free_flags = tmp_free,
kernel_width_free_reflections=200,
)
return self.sigmaa_object_cache
def model_error_ml(self):
return None # XXX
def alpha_beta(self, external_sigmaa_object=None):
sigmaa_object = external_sigmaa_object
if sigmaa_object is None:
sigmaa_object = self.sigmaa_object()
return sigmaa_object.alpha_beta()
def alpha_beta_w(self, external_sigmaa_object=None, only_if_required_by_target=False):
a,b = self.alpha_beta(external_sigmaa_object=external_sigmaa_object)
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
a = a.select( ~tmp_free.data() )
b = b.select( ~tmp_free.data() )
return a,b
def alpha_beta_f(self,external_sigmaa_object=None,only_if_required_by_target=False):
a,b = self.alpha_beta(external_sigmaa_object=external_sigmaa_object)
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
a = a.select( tmp_free.data() )
b = b.select( tmp_free.data() )
return a,b
def figures_of_merit(self):
sigmaa_object = self.sigmaa_object()
return sigmaa_object.fom().data()
def figures_of_merit_work(self):
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
fom = self.figures_of_merit().select(
~tmp_free.data())
return fom
def figures_of_merit_t(self):
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
fom = self.figures_of_merit().select(
tmp_free.data())
return fom
def phase_errors(self):
sigmaa_object = self.sigmaa_object()
return sigmaa_object.phase_errors().data()
def phase_errors_work(self):
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
pher = self.phase_errors().select(~tmp_free.data())
return pher
def phase_errors_test(self):
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
pher = self.phase_errors().select(tmp_free.data())
return pher
def w_star(self):
t_o, t_c, t_free = self.detwin_data(mode='proportional')
t_sigmaa_object = self.sigmaa_object(t_o,t_c,t_free)
a,b = self.alpha_beta( t_sigmaa_object )
obj = max_lik.f_star_w_star_mu_nu(
f_obs = t_o.data(),
f_model = flex.abs(t_c.data()),
alpha = a.data(),
beta = b.data(),
space_group = self.f_obs_.space_group(),
miller_indices = t_o.indices())
w_star_o = miller.array(miller_set = t_o,
data = obj.w_star())
self.sigmaa_object(forced_update=True)
return w_star_o
def set_pseudo_ml_weights(self):
weights = self.w_star().data()
completion = xray.twin_completion( self.f_obs_.indices(),
self.xs.space_group(),
self.f_obs_.anomalous_flag(),
self.twin_law.as_double_array()[0:9] )
twinned_weights = completion.twin_sum(weights, self.twin_fraction_object.twin_fraction)
self.target_evaluator.set_weights( twinned_weights )
return None
def determine_n_bins(self,
free_reflections_per_bin,
max_n_bins=None,
min_n_bins=1,
min_refl_per_bin=100):
assert free_reflections_per_bin > 0
n_refl = self.r_free_flags().data().size()
n_free = self.r_free_flags().data().count(True)
n_refl_per_bin = free_reflections_per_bin
if (n_free != 0):
n_refl_per_bin *= n_refl / n_free
n_refl_per_bin = min(n_refl, iround(n_refl_per_bin))
result = max(1, iround(n_refl / max(1, n_refl_per_bin)))
if (min_n_bins is not None):
result = max(result, min(min_n_bins, iround(n_refl / min_refl_per_bin)))
if (max_n_bins is not None):
result = min(max_n_bins, result)
return result
def _map_coeff(self, f_obs, f_model, f_obs_scale, f_model_scale):
d_obs = miller.array(miller_set = f_model,
data = f_obs.data()*f_obs_scale
).phase_transfer(phase_source = f_model)
return miller.array(miller_set = f_model,
data = d_obs.data()-f_model.data()*f_model_scale)
def map_coefficients (self, **kwds) :
emap = self.electron_density_map()
return emap.map_coefficients(**kwds)
def _get_real_map (self, **kwds) :
map_coeffs = self.map_coefficients(**kwds)
return map_coeffs.fft_map(
resolution_factor=0.25).apply_sigma_scaling().real_map_unpadded()
def two_fofc_map (self, **kwds) :
kwds['map_type'] = "2mFo-DFc"
return self._get_real_map(**kwds)
def fofc_map (self, **kwds) :
kwds['map_type'] = "mFo-DFc"
return self._get_real_map(**kwds)
def anomalous_map (self, **kwds) :
if (not self.f_obs().anomalous_flag()) : return None
kwds['map_type'] = "anom"
return self._get_real_map(**kwds)
def compute_map_coefficients(self,
map_type = None,
k = None,
n = None,
w1 = None,
w2 = None,
isotropize=None,
ncs_average = None,
):
if (map_type == "Fmodel") :
map_type = "Fc"
elif (map_type == "DFmodel") :
map_type = "DFc"
if (map_type.lower().startswith("anom")) :
map_type = "anom"
supported_types = ("Fo-Fc", "Fobs-Fmodel",
"2mFo-DFc", "2mFobs-DFmodel",
"mFo-DFc", "mFobs-DFmodel",
"gradient",
"m_gradient",
"mFo",
"Fc",
"DFc",
"anom",
)
if not map_type in supported_types :
raise Sorry(("Map type '%s' not supported for twinned structures. "+
"Allowed types: %s.") % (map_type, ", ".join(supported_types)))
# this is to modify default behavior of phenix.refine
if (map_type == "mFo-DFc") or (map_type == "mFobs-DFmodel") :
if self.map_types.fofc == "gradient":
map_type = "gradient"
if self.map_types.fofc == "m_gradient":
map_type = "m_gradient"
if self.map_types.fofc == "m_dtfo_d_fc":
map_type = "m_dtfo_d_fc"
if (map_type == "2mFo-DFc") or (map_type=="2mFobs-DFmodel") :
if self.map_types.twofofc == "two_m_dtfo_d_fc":
map_type = "two_m_dtfo_d_fc"
if self.map_types.twofofc == "two_dtfo_fc":
map_type = "two_dtfo_fc"
#detwin
dt_f_obs, tmp_f_model, tmp_free = self.detwin_data(mode=self.detwin_mode)
#for aniso correction
aniso_scale = 1.0/self.data_core.overall_scale() # anisotropy correction
aniso_scale = self.f_atoms.customized_copy(
data = aniso_scale ).common_set( dt_f_obs )
aniso_scale = aniso_scale.data()
if (map_type == "anom") :
if (not dt_f_obs.anomalous_flag()) :
return None
anom_diff = dt_f_obs.anomalous_differences()
tmp_f_model = tmp_f_model.average_bijvoet_mates()
tmp_f_model, anom_diff = tmp_f_model.common_sets(other=anom_diff)
anom_diff = anom_diff.phase_transfer(phase_source=tmp_f_model)
result = miller.array(miller_set=anom_diff,
data=anom_diff.data()/(2j))
return result
elif map_type not in ["gradient","m_gradient"]:
result = None
if (map_type == "Fc") :
result = tmp_f_model
elif (map_type == "DFc") :
sigmaa_object = self.sigmaa_object(
detwinned_data=dt_f_obs,
f_model_data=tmp_f_model,
tmp_free=tmp_free,
forced_update=True)
m, dt_f_obs = sigmaa_object.fom().common_sets( dt_f_obs )
d, dt_f_obs = sigmaa_object.alpha_beta()[0].common_sets( dt_f_obs )
dt_f_obs, tmp_f_model = dt_f_obs.common_sets( tmp_f_model )
result = tmp_f_model.customized_copy(
data=tmp_f_model.data()*d.data())
elif (map_type == "mFo") :
sigmaa_object = self.sigmaa_object(
detwinned_data=dt_f_obs,
f_model_data=tmp_f_model,
tmp_free=tmp_free,
forced_update=True)
m, dt_f_obs = sigmaa_object.fom().common_sets( dt_f_obs )
dt_f_obs, tmp_f_model = dt_f_obs.common_sets( tmp_f_model )
result = dt_f_obs.customized_copy(
data=dt_f_obs.data()*m.data()).phase_transfer(
phase_source=tmp_f_model)
elif (map_type in ["Fo-Fc", "Fobs-Fmodel"]) :
if ([k,n]).count(None) > 0:
raise Sorry("Map coefficient multipliers (k and n) must be provided to generate detwinned maps")
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = k,
f_model_scale = n )
assert result is not None
else:
sigmaa_object = self.sigmaa_object(
detwinned_data=dt_f_obs,
f_model_data=tmp_f_model,
tmp_free=tmp_free,
forced_update=True)
dt_f_obs, tmp_f_model = dt_f_obs.common_sets( tmp_f_model )
m, dt_f_obs = sigmaa_object.fom().common_sets( dt_f_obs )
d, dt_f_obs = sigmaa_object.alpha_beta()[0].common_sets( dt_f_obs )
m = m.data()
d = d.data()
dt_f_obs, tmp_f_model = dt_f_obs.common_sets( tmp_f_model )
if map_type == "m_dtfo_d_fc":
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = m ,
f_model_scale =d )
if map_type == "dtfo_fc":
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = 1.0,
f_model_scale =1.0 )
if map_type == "two_m_dtfo_d_fc":
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = 2*m,
f_model_scale = d )
if map_type == "two_dtfo_fc":
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = 2,
f_model_scale = 1 )
assert result is not None
assert result != None
if self.map_types.aniso_correct:
result = result.customized_copy( data = result.data()*aniso_scale )
return result
else:
# get coefficients for a gradient map please
gradients = self.target_evaluator.d_target_d_fmodel(
self.data_core.f_model() )
gradients = self.f_atoms.customized_copy(
data = -gradients).common_set( self.f_obs() )
if map_type == "m_gradient":
# get the FOMs please
m = self.sigmaa_object().fom().common_set(self.f_obs()).data()
gradients = gradients.customized_copy(
data = gradients.data()*m )
if self.map_types.aniso_correct:
gradients = gradients.customized_copy( data = gradients.data()*aniso_scale )
return gradients
def k_part(self): return 0 # XXX to be compatible with non-twin fmodel
def b_part(self): return 0 # XXX to be compatible with non-twin fmodel
def electron_density_map(self, k = 1,
n = 1,
w1 = None,
w2 = None,
resolution_factor = 1/3.,
fill_missing_f_obs = True, # XXX not used since not available for twin case.
symmetry_flags = None,
fill_mode = None,
reverse_scale = True # XXX Dummy parameter, not used here.
): # XXX Added for compatibility.
# XXX work-around to support new developments in non-twin fmodel. PA.
class result(object):
def __init__(self, resolution_factor, symmetry_flags, fmodel, reverse_scale):
self.resolution_factor = resolution_factor
self.symmetry_flags = symmetry_flags
self.fmodel = fmodel
self.mch = None # XXX prevent crash in mmtbx.maps
# XXX: added extra keywords passed by mmtbx.maps, which will simply be
# ignored here. -nat
def map_coefficients(self,
map_type=None,
acentrics_scale=None,
centrics_pre_scale=None,
ncs_average=None,
isotropize=None,
exclude_free_r_reflections=False,
post_processing_callback=None,
fill_missing=None,
sharp=None,
pdb_hierarchy=None,
merge_anomalous=None): # FIXME ignored
if (map_type in ["gradient", "m_gradient", "anom"]) :
return self.fmodel.compute_map_coefficients(map_type=map_type)
else :
map_name_manager = mmtbx.map_names(map_name_string = map_type)
k = map_name_manager.k
n = map_name_manager.n
return self.fmodel.compute_map_coefficients(
map_type = map_type,
k = k,
n = n,
w1 = w1,
w2 = w2)
def fft_map(self, resolution_factor = None,
symmetry_flags = None,
map_coefficients = None,
other_fft_map = None,
use_all_data = False,
map_type = None):
if(resolution_factor is None):
resolution_factor = self.resolution_factor
if(symmetry_flags is None):
symmetry_flags = self.symmetry_flags
if (map_type in ["gradient", "m_gradient", "anom"]) :
map_coefficients = self.fmodel.compute_map_coefficients(
map_type=map_type)
else :
map_name_manager = mmtbx.map_names(map_name_string = map_type)
k = map_name_manager.k
n = map_name_manager.n
map_coefficients = self.fmodel.compute_map_coefficients(
map_type = map_type,
k = k,
n = n,
w1 = w1,
w2 = w2)
return map_coefficients.fft_map(
resolution_factor = resolution_factor,
symmetry_flags = symmetry_flags)
return result(resolution_factor= resolution_factor,
symmetry_flags = symmetry_flags,
fmodel = self,
reverse_scale = reverse_scale)
def u_star(self):
return self.data_core.ustar()
def u_cart(self):
tmp = self.u_star()
tmp = adptbx.u_star_as_u_cart(self.xs.unit_cell(),tmp)
return tmp
def b_cart(self):
b_cart = adptbx.u_as_b( self.u_cart() )
return b_cart
def b_iso(self):
b_cart = self.b_cart()
return (b_cart[0]+b_cart[1]+b_cart[2])/3.0
def u_iso(self):
u_cart = self.u_cart()
return (u_cart[0]+u_cart[1]+u_cart[2])/3.0
def u_iso_as_u_cart(self):
ui = self.u_iso()
return [ui,ui,ui,0.0,0.0,0.0]
def k_sol(self):
return self.data_core.ksol()
def k_sols(self):
# This is a dummy, making twin_f_model compatible with f_model
# Radial shell mask model is not implemented for twin_f_model
return [self.k_sol()]
def u_sol(self):
return self.data_core.usol()
def b_sol(self):
return adptbx.u_as_b( self.u_sol() )
def k_sol_b_sol(self):
return self.k_sol(), self.b_sol()
def k_sol_u_sol(self):
return self.k_sol(), self.u_sol()
def f_mask(self):
return self.f_mask_array
def f_mask_w(self):
return self.f_mask().select(~self.free_flags_for_f_atoms )
def f_mask_t(self):
return self.f_mask().select( self.free_flags_for_f_atoms )
def f_bulk(self):
tmp = self.data_core.f_bulk()
tmp = self.f_mask_array.customized_copy(
data = tmp ).set_observation_type( self.f_mask_array )
return tmp
def f_bulk_t(self):
tmp = self.f_bulk()
return tmp.select( self.free_flags_for_f_atoms )
def f_bulk_w(self):
tmp = self.f_bulk()
return tmp.select(~self.free_flags_for_f_atoms )
def fb_bulk(self):
tmp = self.data_core.f_bulk()
multi = self.data_core.overall_scale()
tmp = self.f_mask_array.customized_copy(
data = tmp*multi ).set_observation_type( self.f_mask_array )
return tmp
def fb_bulk_t(self):
tmp = self.f_bulk()
return tmp.select( self.free_flags_for_f_atoms )
def fb_bulk_w(self):
tmp = self.f_bulk()
return tmp.select(~self.free_flags_for_f_atoms )
def scale_k1_w(self):
return self.data_core.koverall()
def scale_k1_t(self):
return self.data_core.koverall()
def scale_k3_t(self):
return self.data_core.koverall()
def scale_k3_w(self):
return self.data_core.koverall()
def hl_coeffs(self): return None
def fft_vs_direct(self, reflections_per_bin = 250,
n_bins = 0,
out = None):
print >> self.out, "Direct vs FFT comparison not yet implemented. "
def r_work_scale_k1_completeness_in_bins(self, reflections_per_bin = 500,
n_bins = 0,
prefix = "",
out = None):
self.r_values(table=True)
def show_k_sol_b_sol_b_cart_target(self, header=None,target=None,out=None):
if(out is None): out = self.out
p = " "
if(header is None): header = ""
line_len = len("|-"+"|"+header)
fill_len = 80-line_len-1
print >> out, "|-"+header+"-"*(fill_len)+"|"
k_sol = self.k_sol()
b_sol = self.b_sol()
u0,u1,u2,u3,u4,u5 = self.b_cart()
target_w=self.target_w()
alpha, beta = self.alpha_beta_w()
alpha_d = alpha.data()
a_mean = flex.mean(alpha_d)
a_zero = (alpha_d <= 0.0).count(True)
r_work = self.r_work()
u_isos = self.xray_structure.extract_u_iso_or_u_equiv()
b_iso_mean = flex.mean(u_isos * math.pi**2*8)
print >> out, "| k_sol=%5.2f b_sol=%7.2f target_w =%20.6f r_work=%7.4f" % \
(k_sol, b_sol, target_w, r_work) + 5*p+"|"
print >> out, "| B(11,22,33,12,13,23)=%9.4f%9.4f%9.4f%9.4f%9.4f%9.4f |" % \
(u0,u1,u2,u3,u4,u5)
print >> out, "| trace(B) = (B11 + B22 + B33)/3 = %-10.3f |"%self.u_iso()
print >> out, "| mean alpha:%8.4f number of alpha <= 0.0:%7d" % \
(a_mean, a_zero)+25*p+"|"
print >> out, "|"+"-"*77+"|"
out.flush()
def show_essential(self, header = None, out=None):
if(out is None): out = self.out
out.flush()
p = " "
if(header is None): header = ""
d_max, d_min = self.f_obs().d_max_min()
line1 = "---(resolution: "
line2 = n_as_s("%6.2f",d_min)
line3 = n_as_s("%6.2f",d_max)
line4 = " - "
line5 = " A)"
tl = header+line1+line2+line4+line3+line5
line_len = len("|-"+"|"+tl)
fill_len = 80-line_len-1
print >> out, "|-"+tl+"-"*(fill_len)+"|"
print >> out, "| "+" "*38+"|"
r_work = n_as_s("%6.4f",self.r_work() )
r_free = n_as_s("%6.4f",self.r_free() )
scale = n_as_s("%6.3f",self.scale_k1_w())
k_sol = n_as_s("%4.2f",self.k_sol())
b_sol = n_as_s("%6.2f",self.b_sol())
b0,b1,b2,b3,b4,b5 = n_as_s("%7.2f",self.b_cart())
b_iso = n_as_s("%7.2f",self.b_iso())
#XXXX Model error analyses required
#err = n_as_s("%6.2f",self.model_error_ml())
err=" None "
try: target_work = n_as_s("%.4g",self.target_w())
except Exception: target_work = str(None)
line = "| r_work= "+r_work+" r_free= "+r_free+" ksol= "+k_sol+\
" Bsol= "+b_sol+" scale= "+scale
np = 79 - (len(line) + 1)
if(np < 0): np = 0
print >> out, line + p*np + "|"
print >> out, "| "+" "*38+"|"
print >> out, "| overall anisotropic scale matrix (Cartesian basis): "\
" |"
c = ","
line4 = "| (B11,B22,B33,B12,B13,B23)= ("+b0+c+b1+c+b2+c+b3+c+b4+c+b5+")"
np = 79 - (len(line4) + 1)
line4 = line4 + " "*np + "|"
print >> out, line4
line5 = "| (B11+B22+B33)/3 = "+b_iso
np = 79 - (len(line5) + 1)
line5 = line5 + " "*np + "|"
print >> out, line5
print >> out, "| "+" "*38+"|"
line5_and_a_half = "| Twin law : %s Twin fraction: %4.3f"%(self.twin_law.r().as_hkl(),self.twin_fraction_object.twin_fraction)
np = 79 - (len(line5_and_a_half) + 1)
line5_and_a_half = line5_and_a_half + " "*np + "|"
print >> out, line5_and_a_half
print >> out, "| "+" "*38+"|"
line6="| Target ("+self.target_name+")= "+target_work+\
" | ML estimate for coordinates error: "+err+" A"
np = 79 - (len(line6) + 1)
line6 = line6 + " "*np + "|"
print >> out, line6
print >> out, "|"+"-"*77+"|"
out.flush()
def show_comprehensive(self,
header = "",
free_reflections_per_bin = 140,
max_number_of_bins = 20,
out=None):
self.r_values(table=True)
self.twin_fraction_scan(n=15)
self.sigmaa_object().show(out=self.out)
def statistics_in_resolution_bins(self,
free_reflections_per_bin = 140,
max_number_of_bins = 20,
out=None):
results = self.r_values(table=True, rows=True,
free_reflections_per_bin = free_reflections_per_bin, max_number_of_bins =
max_number_of_bins)
return results
def r_factors_in_resolution_bins(self,
free_reflections_per_bin = 140,
max_number_of_bins = 20,
out=None):
results = self.r_values(table=True, rows=True)
return results
def r_work_scale_k1_completeness_in_bins(self, reflections_per_bin = 500,
n_bins = 0,
prefix = "",
out = None):
#actively ignoring input
self.r_values(table=True)
def show_fom_phase_error_alpha_beta_in_bins(self,
free_reflections_per_bin = 140,
max_number_of_bins = 20,
out=None):
self.sigmaa_object().show(out=self.out)
def export(self, out=None, format="mtz"):
assert format in ["mtz", "cns"]
file_name = None
if (out is None):
out = sys.stdout
elif (hasattr(out, "name")):
file_name = libtbx.path.canonical_path(file_name=out.name)
warning = [
"DO NOT USE THIS FILE AS INPUT FOR REFINEMENT!",
"Resolution and sigma cutoffs may have been applied to FOBS."]
width = max([len(line) for line in warning])
warning.insert(0, "*" * width)
warning.append(warning[0])
if (format == "cns"):
for line in warning:
print >> out, "{ %s%s }" % (line, " "*(width-len(line)))
print >> out, "{ %s }" % date_and_time()
if (file_name is not None):
print >> out, "{ file name: %s }" % os.path.basename(file_name)
print >> out, "{ directory: %s }" % os.path.dirname(file_name)
self.explain_members(out=out, prefix="{ ", suffix=" }")
crystal_symmetry_as_cns_comments(
crystal_symmetry=self.f_obs_, out=out)
print >> out, "NREFlection=%d" % self.f_obs_.indices().size()
print >> out, "ANOMalous=%s" % {0: "FALSE"}.get(
int(self.f_obs_.anomalous_flag()), "TRUE")
have_sigmas = self.f_obs_.sigmas() is not None
for n_t in [("FOBS", "REAL"),
("SIGFOBS", "REAL"),
("R_FREE_FLAGS", "INTEGER"),
("FMODEL", "COMPLEX"),
("FCALC", "COMPLEX"),
("FMASK", "COMPLEX"),
("FBULK", "COMPLEX"),
("FOM", "REAL"),
("ALPHA", "REAL"),
("BETA", "REAL")]:
if (not have_sigmas and n_t[0] == "SIGFOBS"): continue
print >> out, "DECLare NAME=%s DOMAin=RECIprocal TYPE=%s END" % n_t
f_model = self.f_model_scaled_with_k1()
f_model_amplitudes = f_model.amplitudes().data()
f_model_phases = f_model.phases(deg=True).data()
f_calc_amplitudes = self.f_calc().amplitudes().data()
f_calc_phases = self.f_calc().phases(deg=True).data()
f_mask_amplitudes = self.f_mask().amplitudes().data()
f_mask_phases = self.f_mask().phases(deg=True).data()
f_bulk_amplitudes = self.f_bulk().amplitudes().data()
f_bulk_phases = self.f_bulk().phases(deg=True).data()
alpha, beta = [item.data() for item in self.alpha_beta()]
arrays = [
self.f_obs_.indices(), self.f_obs_.data(), self.f_obs_.sigmas(),
self.r_free_flags.data(),
f_model_amplitudes, f_model_phases,
f_calc_amplitudes, f_calc_phases,
f_mask_amplitudes, f_mask_phases,
f_bulk_amplitudes, f_bulk_phases,
self.figures_of_merit(),
alpha, beta]
if (not have_sigmas):
del arrays[2]
i_r_free_flags = 2
else:
i_r_free_flags = 3
for values in zip(*arrays):
print >> out, "INDE %d %d %d" % values[0]
print >> out, " FOBS= %.6g" % values[1],
if (have_sigmas):
print >> out, " SIGFOBS= %.6g" % values[2],
print >> out, \
" R_FREE_FLAGS= %d FMODEL= %.6g %.6g\n" \
" FCALC= %.6g %.6g FMASK= %.6g %.6g FBULK= %.6g %.6g\n" \
" FB_CART= %.6g FOM= %.6g ALPHA= %.6g BETA= %.6g" \
% values[i_r_free_flags:]
if (file_name is not None):
out.close()
else:
assert file_name is not None
mtz_dataset = self.f_obs_.as_mtz_dataset(column_root_label="FOBS")
mtz_dataset.add_miller_array(
miller_array=self.r_free_flags(), column_root_label="R_FREE_FLAGS")
mtz_dataset.add_miller_array(
miller_array=self.f_model(), column_root_label="FMODEL")
mtz_dataset.add_miller_array(
miller_array=self.f_calc(), column_root_label="FCALC")
mtz_dataset.add_miller_array(
miller_array=self.f_mask(), column_root_label="FMASK")
mtz_dataset.add_miller_array(
miller_array=self.f_bulk(), column_root_label="FBULK")
mtz_dataset.add_miller_array(
miller_array= self.sigmaa_object().fom(),
column_root_label="FOM", column_types="W")
alpha, beta = self.alpha_beta()
mtz_dataset.add_miller_array(
miller_array=alpha, column_root_label="ALPHA", column_types="W")
mtz_dataset.add_miller_array(
miller_array=beta, column_root_label="BETA", column_types="W")
mtz_history_buffer = flex.std_string(warning)
ha = mtz_history_buffer.append
ha(date_and_time())
ha("file name: %s" % os.path.basename(file_name))
ha("directory: %s" % os.path.dirname(file_name))
s = StringIO()
self.explain_members(out=s)
for line in s.getvalue().splitlines():
ha(line)
mtz_object = mtz_dataset.mtz_object()
mtz_object.add_history(lines=mtz_history_buffer)
out.close()
mtz_object.write(file_name=file_name)
def explain_members(self, out=None, prefix="", suffix=""):
if (out is None): out = sys.stdout
def zero_if_almost_zero(v, eps=1.e-6):
if (abs(v) < eps): return 0
return v
for line in [
"Fmodel = scale_k1 * fb_cart * (Fcalc + Fbulk)",
"Fcalc = structure factors calculated from atomic model",
"Fbulk = k_sol * exp(-b_sol*s**2/4) * Fmask",
"A = orthogonalization matrix",
"k_sol = %.6g" % self.k_sol(),
"b_sol = %.6g" % zero_if_almost_zero(self.b_sol()),
"B_cart = (B11, B22, B33, B12, B13, B23)",
" = (%s)" % ", ".join(
["%.6g" % zero_if_almost_zero(v) for v in self.b_cart()])]:
print >> out, prefix + line + suffix
def export_f_obs_flags_as_mtz (self,
file_name,
merge_anomalous=False,
include_hendrickson_lattman=True) :
"""
Dump all input data to an MTZ file using standard column labels. This may
be useful when running modules or programs that require an MTZ file as
input (rather than taking f_model.manager or the Miller arrays directly).
"""
f_obs = self.f_obs()
flags = self.r_free_flags()
hl_coeffs = self.hl_coeffs()
if (merge_anomalous) :
f_obs = f_obs.average_bijvoet_mates()
flags = flags.average_bijvoet_mates()
if (hl_coeffs is not None) :
hl_coeffs = hl_coeffs.average_bijvoet_mates()
mtz_dataset = f_obs.as_mtz_dataset(column_root_label="F")
if (hl_coeffs is not None) and (include_hendrickson_lattman) :
mtz_dataset.add_miller_array(hl_coeffs, column_root_label="HL")
mtz_dataset.add_miller_array(flags, column_root_label="FreeR_flag")
mtz_dataset.mtz_object().write(file_name)
def show_targets(self, out=None, text=""):
if(out is None): out = self.out
part1 = "|-"+text
part2 = "-|"
n = 79 - len(part1+part2)
print >> out, part1 + "-"*n + part2
part3 = "| target_work(%s"%self.target_name+") = %.6e r_work = %6.4f r_free = %6.4f"%\
(self.target_w(), self.r_work(), self.r_free())
n = 78 - len(str(part3)+"|")
print >> out, part3, " "*n +"|"
print >> out, "|" +"-"*77+"|"
out.flush()
def _header_resolutions_nreflections(self, header, out):
out.flush()
if(header is None): header = ""
d_max, d_min = self.f_obs_.d_max_min()
line1 = "(resolution: "
line2 = n_as_s("%6.2f",d_min)
line3 = n_as_s("%6.2f",d_max)
line4 = " - "
line5 = " A; n_refl. = "
line6 = n_as_s("%d",self.f_obs_.data().size())
tl = header+"-"+line1+line2+line4+line3+line5+line6
line_len = len("|-"+"|"+tl)
fill_len = 80-line_len-1
print >> out, "|-"+tl+"-"*(fill_len)+"|"
out.flush()
def _rfactors_and_bulk_solvent_and_scale_params(self, out):
out.flush()
r_work = n_as_s("%6.4f",self.r_work() )
r_free = n_as_s("%6.4f",self.r_free() )
scale = n_as_s("%6.3f",self.scale_k1_w())
k_sol = n_as_s("%4.2f",self.k_sol())
b_sol = n_as_s("%6.2f",self.b_sol())
b0,b1,b2,b3,b4,b5 = n_as_s("%7.2f",self.b_cart())
b_iso = n_as_s("%7.2f",self.b_iso())
line = "| r_work= "+r_work+" r_free= "+r_free+" ksol= "+k_sol+\
" Bsol= "+b_sol+" scale= "+scale
np = 79 - (len(line) + 1)
if(np < 0): np = 0
print >> out, line + " "*np + "|"
print >> out, "| "+" "*38+"|"
print >> out, "| overall anisotropic scale matrix (Cartesian basis; B11,B22,B33,B12,B13,B23):|"
c = ","
line4 = "| ("+b0+c+b1+c+b2+c+b3+c+b4+c+b5+"); trace/3= "+b_iso
np = 79 - (len(line4) + 1)
line4 = line4 + " "*np + "|"
print >> out, line4
out.flush()
def ls_ff_weights(f_obs, atom, B):
d_star_sq_data = f_obs.d_star_sq().data()
table = wk1995(atom).fetch()
ff = table.at_d_star_sq(d_star_sq_data) * flex.exp(-B/4.0*d_star_sq_data)
weights = 1.0/flex.pow2(ff)
return weights
class target_functor(object):
def __init__(self, manager):
self.manager = manager
def prepare_for_minimization(self):
pass
def target_function_is_invariant_under_allowed_origin_shifts(self):
return True
def __call__(self, compute_gradients=False):
return target_result(manager=self.manager)
class target_result(mmtbx.refinement.targets.target_result_mixin):
def __init__(self, manager):
self.manager = manager
def target_work(self):
return self.manager.target(False)[0]
def target_test(self):
return self.manager.target(False)[1]
def d_target_d_f_model_work(self):
manager = self.manager
return manager.miller_set.array(
data=manager.target_evaluator.d_target_d_fmodel(
manager.data_core.f_model()))
def d_target_d_f_calc_work(self):
manager = self.manager
d_t_d_f_m = self.d_target_d_f_model_work()
return d_t_d_f_m.array(
data=d_t_d_f_m.data() * manager.data_core.d_f_model_core_data_d_f_atoms()
/ manager.norma_sum_f_sq)
def ls_sigma_weights(f_obs):
if(f_obs.sigmas() is not None):
sigmas_squared = flex.pow2(f_obs.sigmas())
else:
sigmas_squared = flex.double(f_obs.data().size(), 1.0)
assert sigmas_squared.all_gt(0)
weights = 1 / sigmas_squared
return weights
def kb_range(x_max, x_min, step):
x_range = []
x = x_min
while x <= x_max + 0.0001:
x_range.append(x)
x += step
return x_range
def n_as_s(format, value):
vt = type(value).__name__
if(vt in ["int","float"]):
return str(format%value).strip()
else:
new = []
for item in value:
new.append( str(format%item).strip() )
return new
class resolution_bin(object):
def __init__(self,
i_bin = None,
d_range = None,
completeness = None,
alpha_work = None,
beta_work = None,
r_work = None,
r_free = None,
target_work = None,
target_free = None,
n_work = None,
n_free = None,
mean_f_obs = None,
fom_work = None,
scale_k1_work= None,
pher_work = None,
pher_free = None,
cc_work = None,
cc_free = None):
adopt_init_args(self, locals())
def n_as_s(format, value):
if value == "none":
return "None"
if value == "None":
return "None"
if ( value is None ):
return format_value(format=format, value=value)
if (isinstance(value, (int, float))):
return (format % value).strip()
return [(format % v).strip() for v in value]
| 38.092615
| 175
| 0.599013
|
from __future__ import division
from cctbx import miller
from cctbx import crystal
from cctbx import sgtbx
from cctbx import xray
from cctbx import adptbx
from copy import deepcopy
from mmtbx import masks
import cctbx.xray.structure_factors
from cctbx.eltbx.xray_scattering import wk1995
from libtbx import adopt_init_args
from cctbx.array_family import flex
from libtbx.utils import Sorry, date_and_time
from libtbx.math_utils import iround
import iotbx.phil
from iotbx.pdb import xray_structure
import mmtbx.scaling
from mmtbx.scaling import relative_scaling
from mmtbx.scaling import sigmaa_estimation
from mmtbx import max_lik
import mmtbx.f_model
from libtbx import table_utils
from libtbx.utils import Sorry, user_plus_sys_time
from cStringIO import StringIO
import sys, os, math
import mmtbx.f_model
from libtbx.str_utils import format_value
from libtbx import Auto
import mmtbx.bulk_solvent.bulk_solvent_and_scaling as bss
import libtbx.path
import mmtbx.refinement.targets
import mmtbx.f_model.f_model_info
master_params = iotbx.phil.parse("""
twin_law = None
.type=str
.input_size = 80
.style = bold noauto
twin_target=*twin_lsq_f
.type=choice
detwin{
mode = algebraic proportional *auto
.type= choice
.short_caption = Detwin mode
map_types{
twofofc = *two_m_dtfo_d_fc two_dtfo_fc
.type = choice
.short_caption = 2Fo-Fc map type
fofc = *m_dtfo_d_fc gradient m_gradient
.type = choice
.short_caption = Fo-Fc map type
aniso_correct = False
.type=bool
.short_caption = Anisotropy correction
}
}
""")
class twin_fraction_object(object):
def __init__(self, twin_fraction = 0):
self.min_frac = 0.001
self.max_frac = 0.999
self.twin_fraction = float(twin_fraction)
if (self.twin_fraction<=self.min_frac):
self.twin_fraction = self.min_frac + self.min_frac/10.0
if (self.twin_fraction>=self.max_frac):
self.twin_fraction = self.max_frac - self.min_frac/10.0
def twin_fraction_to_ref( self ):
tmp = self.twin_fraction - self.min_frac
tmp = (self.max_frac-self.min_frac)/tmp -1.0
if tmp < 1e-70:
tmp = 1e-70
tmp = -math.log( tmp )
return tmp
def ref_to_twin_fraction(self, x):
if (x<-10):
x=-10
tmp = self.min_frac + (self.max_frac-self.min_frac)/(1+math.exp(-x) )
self.twin_fraction = tmp
def d_t_d_twin_fraction_ref(self, dtdp ):
tmp = self.twin_fraction_to_ref()
tmp2 = 1.0+math.exp(-tmp )
tmp = (self.max_frac - self.min_frac)*math.exp( -tmp )/(tmp2*tmp2)
return dtdp*tmp
def show(self,out=None):
if out is None:
out = sys.stdout
print >> out, "twin fraction: %4.3f" %( self.twin_fraction )
class scaling_parameters_object(object):
def __init__(self,
xs=None,
k_overall=1.0,
u_star=(0,0,0,0,0,0),
k_sol=0,
u_sol=0,
k_part=0,
u_part=0,
object=None,
max_u_sol = 5.0,
max_u_part = 5.0 ):
if object is not None:
k_overall = object.k_overall
u_star = object.u_star
k_sol = object.k_sol
u_sol = object.u_sol
k_part = object.k_part
u_part = object.u_part
u_star = object.u_star
self.k_overall = float(k_overall)
self.u_part = float(u_part)
self.k_sol = float(k_sol)
self.u_sol = float(u_sol)
self.k_part = float(k_part)
self.u_star = ( float(u_star[0]),
float(u_star[1]),
float(u_star[2]),
float(u_star[3]),
float(u_star[4]),
float(u_star[5])
)
self.max_u_sol = float(max_u_sol)
self.max_u_part= float(max_u_part)
if xs is None:
self.xs=object.xs
else:
self.xs=xs
assert self.xs is not None
self.adp_constraints = self.xs.space_group().adp_constraints()
self.vrwgk = math.pow(self.xs.unit_cell().volume(),-2.0/3.0)
self.n_u_indep = self.xs.space_group().adp_constraints(
).n_independent_params()
self.u_star = self.xs.space_group().average_u_star( self.u_star )
def ref_to_k_overall(self,x):
if(x > 500.): self.k_overall=math.exp(500.)
else: self.k_overall = math.exp( x )
def ref_to_k_sol(self,x):
if x>10:
self.k_sol = math.exp( 10 )
else:
self.k_sol = math.exp( x )
def ref_to_u_sol(self, x):
if x>10:
self.u_sol = math.exp(10.0)
else:
self.u_sol = math.exp( x )
if self.u_sol > self.max_u_sol:
self.u_sol = self.max_u_sol
def ref_to_k_part(self, x):
if x > 10:
self.k_part = math.exp(10)
else:
self.k_part = math.exp( x )
def ref_to_u_part(self, x):
self.u_part = math.exp( x )
if self.u_part > self.max_u_part:
self.u_part = self.max_u_part
def ref_to_u_star(self, x ):
tmp = self.adp_constraints.all_params( x )
tmp =list( flex.double(tmp) * self.vrwgk )
self.u_star = tmp
def k_overall_to_ref(self):
if self.k_overall > 0:
return math.log( self.k_overall )
else:
return None
def k_sol_to_ref(self):
if self.k_sol>0:
return math.log( self.k_sol )
else:
return 0
def k_part_to_ref(self):
if self.k_part > 0:
return math.log( self.k_part )
else:
return 0
def u_sol_to_ref(self):
if self.u_sol > 0:
return math.log( self.u_sol )
else:
return -1000.0
def u_part_to_ref(self):
if self.u_part>0:
return math.log( self.u_part )
else:
return -1000.0
def u_star_to_ref(self):
tmp = self.xs.space_group().adp_constraints(
).independent_params(all_params=self.u_star)
tmp = list( flex.double(tmp)/self.vrwgk )
return tmp
def d_t_d_k_overall_ref(self,dtdp):
return self.k_overall*dtdp
def d_t_d_k_sol_ref(self,dtdp):
return self.k_sol*dtdp
def d_t_d_k_part_ref(self,dtdp):
return self.k_part*dtdp
def d_t_d_u_sol_ref(self, dtdp):
return self.u_sol*dtdp
def d_t_d_u_part_ref(self, dtdp):
return self.u_part*dtdp
def d_t_d_u_star_ref(self, dtdp):
tmp = list( flex.double(dtdp) * self.vrwgk )
tmp = list( self.adp_constraints.independent_gradients(
list( tmp ) ) )
return tmp
def show(self,out=None):
if out is None:
out=sys.stdout
print >> out
print >> out, "F-model scaling parameters"
print >> out, "k_overall : %5.2e"%(self.k_overall)
print >> out, "u_star : %5.2e %5.2e %5.2e %5.2e %5.2e %5.2e"%(
self.u_star[0], self.u_star[1], self.u_star[2],
self.u_star[3], self.u_star[4], self.u_star[5])
print >> out, " (%i independent parameters)"%(self.n_u_indep)
print >> out, "k_sol : %5.2e"%(self.k_sol)
print >> out, "u_sol : %5.2e"%(self.u_sol)
print >> out, " B_sol : %5.2f"%(self.u_sol*79.0)
print >> out, "k_part : %5.2e"%(self.k_part)
print >> out, "u_part : %5.2e"%(self.u_part)
print >> out
def deep_copy(self):
new = scaling_parameters_object(object=self)
return new
def get_initial_scale(miller_obs,
f_atoms):
tmp_calc = f_atoms.deep_copy().map_to_asu()
tmp_obs = miller_obs.deep_copy().map_to_asu()
tmp_calc, tmp_obs = tmp_obs.common_sets(
abs(tmp_calc) )
init_scale = flex.sum( tmp_calc.data()*tmp_obs.data() )/ \
flex.sum( tmp_calc.data()*tmp_calc.data() )
return init_scale
class scaling_parameter_mask(object):
def __init__(self,
twin_fraction=True,
k_overall=True,
u_star=True,
k_sol=True,
u_sol=True):
self.twin_fraction = 0.0
self.k_overall = 0.0
self.u_star = 0.0
self.k_sol = 0.0
self.u_sol = 0.0
if twin_fraction:
self.twin_fraction = 1.0
if k_overall:
self.k_overall = 1.0
if u_star:
self.u_star = 1.0
if k_sol:
self.k_sol = 1.0
if u_sol:
self.u_sol = 1.0
class target_attributes(mmtbx.refinement.targets.target_attributes):
def __init__(self):
mmtbx.refinement.targets.target_attributes.__init__(self, family="ls")
self.twin = "amplitudes"
self.pseudo_ml = False
class twin_model_manager(mmtbx.f_model.manager_mixin):
def __init__(self,
f_obs = None,
f_mask = None,
f_calc = None,
r_free_flags = None,
xray_structure = None,
scaling_parameters = None,
sf_and_grads_accuracy_params =
mmtbx.f_model.sf_and_grads_accuracy_master_params.extract(),
mask_params = None,
out = None,
twin_law = None,
twin_law_str = None,
start_fraction = 0.1,
n_refl_bin = 2000,
max_bins = 20,
detwin_mode = None,
map_types = None,
twin_target = master_params.extract().twin_target):
self.fmodel_ts1 = None
self.f_obs_ = f_obs
if(f_calc is not None): raise RuntimeError("Not implemented.")
if(map_types is None):
map_types = master_params.extract().detwin.map_types
if(detwin_mode is None):
detwin_mode = "auto"
self.alpha_beta_params=None
self.twin = True
self.twin_law_str = twin_law_str
self.sfg_params = sf_and_grads_accuracy_params
self.target_name=twin_target
self._target_attributes = target_attributes()
if self.target_name =="pseudo_ml_f":
self.target_name = "twin_lsq_f"
self._target_attributes.pseudo_ml=True
if(out is None): out = sys.stdout
self.out = out
self.did_search = 0
if self.out is None:
self.out = sys.stdout
self.twin_fraction_object = twin_fraction_object(twin_fraction=start_fraction)
self.twin_law=twin_law
self.twin_fraction=start_fraction
self.possible_detwin_modes = ["proportional",
"algebraic",
"gradient",
"auto"
]
self.detwin_mode = detwin_mode
if self.detwin_mode is Auto:
self.detwin_mode="auto"
assert self.detwin_mode in self.possible_detwin_modes
self.detwin_switch_twin_fraction = 0.45
self.map_types = map_types
assert (self.twin_law is not None)
f_obs = f_obs.map_to_asu()
self.f_obs_ = f_obs
self.r_free_flags_ = r_free_flags.map_to_asu().common_set(f_obs)
assert self.f_obs_.indices().all_eq( self.r_free_flags_.indices() )
self.f_obs_w = f_obs.select( ~self.r_free_flags_.data() )
if(self.r_free_flags_.data().count(True)>0):
self.f_obs_f = f_obs.select( self.r_free_flags_.data() )
else:
self.f_obs_f = self.f_obs_w.deep_copy()
if(self.f_obs_w.data().size()==0 and self.f_obs_f.data().size()>0):
self.f_obs_w = self.f_obs_f
self.max_bins = max_bins
self.n_refl_bin = n_refl_bin
if (self.n_refl_bin>f_obs.data().size() ) or (self.n_refl_bin is None):
self.n_refl_bin = f_obs.data().size()
if f_obs.binner() is None:
if f_obs.indices().size()/float(n_refl_bin) > max_bins:
f_obs.setup_binner(n_bins = max_bins)
else:
f_obs.setup_binner( reflections_per_bin=self.n_refl_bin )
self.f_obs_w.use_binning_of( f_obs )
self.f_obs_f.use_binning_of( f_obs )
self.xray_structure = xray_structure
self.xs = crystal.symmetry( unit_cell=f_obs.unit_cell(),
space_group=f_obs.space_group() )
self.scaling_parameters = scaling_parameters_object(
xs = self.xs,
object = scaling_parameters)
if self.scaling_parameters is None:
self.scaling_parameters = scaling_parameters_object(self.xs)
self.mask_params=None
if mask_params is not None:
self.mask_params = mask_params
else:
self.mask_params = mmtbx.masks.mask_master_params.extract()
self.norma_sum_f_sq = flex.sum( f_obs.data() * f_obs.data() )
self.norma_sum_f_sq_w = flex.sum( self.f_obs_w.data() * self.f_obs_w.data() )
self.norma_sum_f_sq_f = flex.sum( self.f_obs_f.data() * self.f_obs_f.data() )
self.miller_set = None
self.f_atoms = None
self.free_flags_for_f_atoms = None
self.miller_set = None
self.f_atoms = self.compute_f_atoms()
self.f_mask_array = None
if f_mask is not None:
if f_mask.data().size() == self.f_atoms.data().size():
self.f_mask_array = f_mask
else:
self.update_f_mask()
else:
self.update_f_mask()
self.f_partial_array = None
self.data_core = xray.f_model_core_data(
hkl = self.f_atoms.indices(),
f_atoms= self.f_atoms.data(),
f_mask = self.f_mask_array.data(),
unit_cell = self.f_atoms.unit_cell(),
k_overall=self.scaling_parameters.k_overall,
u_star=self.scaling_parameters.u_star,
k_sol=self.scaling_parameters.k_sol,
u_sol=self.scaling_parameters.u_sol,
f_part=None,
k_part=self.scaling_parameters.k_part,
u_part=self.scaling_parameters.u_part )
self.target_evaluator = xray.least_squares_hemihedral_twinning_on_f(
hkl_obs = self.f_obs_w.indices(),
f_obs = self.f_obs_w.data(),
w_obs = self.f_obs_w.sigmas(),
hkl_calc = self.f_atoms.indices(),
space_group = f_obs.space_group(),
anomalous_flag= f_obs.anomalous_flag(),
alpha = self.twin_fraction,
twin_law = self.twin_law.as_double_array()[0:9] )
if(self.f_obs_f.indices().size() == 0):
self.free_target_evaluator = self.target_evaluator
else:
self.free_target_evaluator = xray.least_squares_hemihedral_twinning_on_f(
hkl_obs = self.f_obs_f.indices(),
f_obs = self.f_obs_f.data(),
w_obs = self.f_obs_f.sigmas(),
hkl_calc = self.f_atoms.indices(),
space_group = f_obs.space_group(),
anomalous_flag = f_obs.anomalous_flag(),
alpha = self.twin_fraction,
twin_law = self.twin_law.as_double_array()[0:9] )
self.r_all_object = xray.hemihedral_r_values(
hkl_obs = f_obs.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = f_obs.space_group(),
anomalous_flag = f_obs.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
self.r_work_object = xray.hemihedral_r_values(
hkl_obs = self.f_obs_w.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = self.f_obs_w.space_group(),
anomalous_flag = f_obs.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
self.r_free_object = xray.hemihedral_r_values(
hkl_obs = self.f_obs_f.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = self.f_obs_f.space_group(),
anomalous_flag = self.f_obs_f.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
self.work_detwinner = xray.hemihedral_detwinner(
hkl_obs = self.f_obs_w.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = self.f_obs_w.space_group(),
anomalous_flag = self.f_obs_w.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
if(self.f_obs_f.indices().size() == 0):
self.free_detwinner = self.work_detwinner
else:
self.free_detwinner = xray.hemihedral_detwinner(
hkl_obs = self.f_obs_f.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = self.f_obs_f.space_group(),
anomalous_flag = self.f_obs_f.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
self.full_detwinner = xray.hemihedral_detwinner(
hkl_obs = f_obs.indices(),
hkl_calc = self.f_atoms.indices(),
space_group = f_obs.space_group(),
anomalous_flag = f_obs.anomalous_flag(),
twin_law = self.twin_law.as_double_array()[0:9] )
if(self.sfg_params is not None):
self.structure_factor_gradients_w = cctbx.xray.structure_factors.gradients(
miller_set = self.miller_set,
cos_sin_table = self.sfg_params.cos_sin_table,
grid_resolution_factor = self.sfg_params.grid_resolution_factor,
quality_factor = self.sfg_params.quality_factor,
u_base = self.sfg_params.u_base,
b_base = self.sfg_params.b_base,
wing_cutoff = self.sfg_params.wing_cutoff,
exp_table_one_over_step_size = self.sfg_params.exp_table_one_over_step_size)
else:
self.structure_factor_gradients_w = cctbx.xray.structure_factors.gradients(
miller_set = self.miller_set)
self.sigmaa_object_cache = None
self.update_sigmaa_object = True
self.xray_structure_mask_cache = None
if self.xray_structure is not None:
self.xray_structure_mask_cache = self.xray_structure.deep_copy_scatterers()
self.epsilons_w = self.f_obs_w.epsilons().data().as_double()
self.epsilons_f = self.f_obs_f.epsilons().data().as_double()
def f_obs(self):
return self.f_obs_
def r_free_flags(self):
return self.r_free_flags_
def twin_test(self):
return "yes"
def is_twin_fmodel_manager (self) :
return True
def update_f_hydrogens(self, log):
return None
def info(self, free_reflections_per_bin = 140, max_number_of_bins = 20,
n_bins=None):
return mmtbx.f_model.f_model_info.info(
fmodel = self,
free_reflections_per_bin = free_reflections_per_bin,
max_number_of_bins = max_number_of_bins,
n_bins = n_bins)
def outlier_selection(self, show = False, log = None):
return None
def remove_outliers(self, show = False, log = None):
if (show):
if (log is None): log = sys.stdout
print >> log, """\
*****************************************************************
NOT performing outlier rejection in twin refinement mode.
If there are many outliers without twin refinement, the resulting
reflection statistics may differ significantly (for example
the percentage of R-free reflections).
*****************************************************************
"""
return self
def wilson_b(self, force_update = False):
return None
def scale_k1(self):
return self.scaling_parameters.k_overall
def show_parameter_summary(self, manager=None):
print >> self.out, "Usol ", self.scaling_parameters.u_sol, self.data_core.usol()
print >> self.out, "Ksol ", self.scaling_parameters.k_sol, self.data_core.ksol()
print >> self.out, "Koverall ", self.scaling_parameters.k_overall, self.data_core.koverall()
print >> self.out, "Ustar ", self.scaling_parameters.u_star, self.data_core.ustar()
print >> self.out, "Twin fraction ", self.twin_fraction_object.twin_fraction, self.twin_fraction, self.target_evaluator.alpha()
print >> self.out, "mask step ", self.mask_params.grid_step_factor
print >> self.out, "mask shift ", self.mask_params.mean_shift_for_mask_update
print >> self.out, "mask trunk rad ", self.mask_params.shrink_truncation_radius
print >> self.out, "mask solv rad ", self.mask_params.solvent_radius
if manager is not None:
x = self.f_model().data()
y = manager.f_model().data()
print >> self.out, "Fmodel delta " , flex.sum( flex.abs(x - y) )
x = self.f_calc().data()
y = manager.f_calc().data()
print >> self.out, "Fatoms delta ", flex.sum( flex.abs(x - y) )
x = self.f_mask_array.data()
y = manager.f_mask_array.data()
print >> self.out, "Fmask delta ", flex.sum( flex.abs(x - y) )
x = flex.abs( self.bulk_solvent_mask().data - manager.bulk_solvent_mask().data )
print >> self.out, "Bit wise diff mask ", flex.sum( x )
def deep_copy(self):
new_object = twin_model_manager(
f_obs = self.f_obs().deep_copy(),
f_mask = self.f_mask_array.deep_copy(),
r_free_flags = self.r_free_flags().deep_copy(),
xray_structure = self.xray_structure.deep_copy_scatterers(),
scaling_parameters = self.scaling_parameters.deep_copy(),
mask_params = deepcopy(self.mask_params),
out = self.out,
twin_law = self.twin_law,
twin_law_str = self.twin_law_str,
start_fraction = self.twin_fraction,
n_refl_bin = self.n_refl_bin,
max_bins = self.max_bins,
detwin_mode = self.detwin_mode,
map_types = self.map_types,
sf_and_grads_accuracy_params = self.sfg_params,
)
new_object.twin_fraction_object.twin_fraction = float(self.twin_fraction_object.twin_fraction)
new_object.twin_fraction = float(self.twin_fraction_object.twin_fraction)
new_object.update()
new_object.did_search = self.did_search
if (self.fmodel_ts1 is not None) :
new_object.fmodel_ts1 = self.fmodel_ts1.deep_copy()
return new_object
def resolution_filter(self,d_max=None,d_min=None):
dc = self.deep_copy()
dummy_obs = dc.f_obs().resolution_filter(d_max,d_min)
twin_complete = dc.construct_miller_set(external_miller_array = dummy_obs )
appropriate_f_mask_array = dc.f_mask_array.common_set( twin_complete )
new_object = twin_model_manager(
f_obs = dummy_obs,
f_mask = appropriate_f_mask_array,
r_free_flags = dc.r_free_flags().resolution_filter(d_max,d_min),
xray_structure = dc.xray_structure,
scaling_parameters = dc.scaling_parameters.deep_copy(),
mask_params = dc.mask_params,
out = dc.out,
twin_law = dc.twin_law,
twin_law_str = dc.twin_law_str,
start_fraction = dc.twin_fraction,
n_refl_bin = dc.n_refl_bin,
max_bins = dc.max_bins,
detwin_mode = dc.detwin_mode,
map_types = dc.map_types,
sf_and_grads_accuracy_params = dc.sfg_params
)
new_object.update()
new_object.did_search = self.did_search
return new_object
def select(self, selection):
self.update_f_mask()
dc = self.deep_copy()
if(selection is None): return dc
new_object = twin_model_manager(
f_obs = dc.f_obs.select(selection) ,
f_mask = dc.f_mask_array.select(selection),
r_free_flags = dc.r_free_flags().select(selection),
xray_structure = dc.xray_structure,
scaling_parameters = dc.scaling_parameters.deep_copy(),
mask_params = dc.mask_params,
out = dc.out,
twin_law = dc.twin_law,
start_fraction = dc.twin_fraction,
n_refl_bin = dc.n_refl_bin,
max_bins = dc.max_bins,
detwin_mode = dc.detwin_mode,
map_types = dc.map_types,
sf_and_grads_accuracy_params = dc.sfg_params
)
new_object.did_search = self.did_search
return new_object
def f_model_scaled_with_k1_composite_work_free(self):
ma_w = self.f_model_w()
ma_f = self.f_model_t()
if(ma_w.indices().size() == ma_f.indices().size()): return ma_w
return ma_w.concatenate(ma_f)
def f_model(self):
tmp_f_model = self.f_atoms.customized_copy(
data = self.data_core.f_model()
)
return tmp_f_model
def f_model_w(self):
tmp = self.f_model()
return tmp.select(~self.free_flags_for_f_atoms )
def f_model_t(self):
tmp = self.f_model()
return tmp.select( self.free_flags_for_f_atoms )
def f_calc(self):
if self.f_atoms is None:
self.f_atoms = self.compute_f_atoms()
return self.f_atoms
def f_calc_w(self):
tmp = self.f_calc()
return tmp.select(~self.free_flags_for_f_atoms )
def f_calc_t(self):
tmp = self.f_calc()
return tmp.select( self.free_flags_for_f_atoms )
def target_attributes(self):
return self._target_attributes
def r_work(self, d_min=None, d_max=None):
if(self.fmodel_ts1 is not None):
self.fmodel_ts1.update_xray_structure(xray_structure = self.xray_structure,
update_f_calc = True, update_f_mask=True)
return self.fmodel_ts1.r_work(d_min=d_min, d_max=d_max)
else:
w,f = self.r_values(table=False, d_min=d_min, d_max=d_max)
return w
def r_free(self, d_min=None, d_max=None):
if(self.fmodel_ts1 is not None):
self.fmodel_ts1.update_xray_structure(xray_structure = self.xray_structure,
update_f_calc = True, update_f_mask = True)
return self.fmodel_ts1.r_free(d_min=d_min, d_max=d_max)
else:
w,f = self.r_values(table=False, d_min=d_min, d_max=d_max)
return f
def f_part1(self):
return self.f_calc().customized_copy(data = self.f_calc().data()*0)
def show(self, log=None, suffix=None, show_header=False):
fmt = "r_work=%6.4f r_free=%6.4f twin_fraction=%4.2f twin_law=%s"
print >> log
print >> log, fmt%(self.r_work(), self.r_free(), self.twin_fraction,
self.twin_law_str)
def update_all_scales(self, params=None, log=None, show=False,
optimize_mask=False, nproc=None, fast=False,
remove_outliers=False,refine_hd_scattering=False,
apply_back_trace=False, update_f_part1=False):
self.update_solvent_and_scale(log=log,
apply_back_trace=apply_back_trace,
update_f_part1=update_f_part1)
def update_solvent_and_scale(self,
update_f_part1=False,
apply_back_trace=False,
optimize_mask=True,
optimize_mask_thorough=False,
params=None,
log=None,
verbose=-1,
initialise=False,
nproc=None,
fast=None):
if(self.twin_law_str is None and self.twin_law is not None):
self.twin_law_str = sgtbx.change_of_basis_op( self.twin_law ).as_hkl()
self.fmodel_ts1 = mmtbx.f_model.manager(
f_obs = self.f_obs(),
r_free_flags = self.r_free_flags(),
xray_structure = self.xray_structure,
twin_law = self.twin_law_str,
mask_params = self.mask_params,
k_sol = self.k_sol(),
b_sol = self.b_sol(),
b_cart = self.b_cart(),
twin_fraction = self.twin_fraction)
self.twin_set = self.fmodel_ts1.twin_set
X
params=params, log=log, apply_back_trace=apply_back_trace)
self.update_core(
k_sol = result.k_sol, # XXX not implemented (see above)
b_sol = result.b_sol,
twin_fraction = self.fmodel_ts1.twin_fraction,
b_cart = result.b_cart,
k_overall = self.fmodel_ts1.scale_k1_w_for_twin_targets())
self.mask_params = self.fmodel_ts1.mask_params
self.arrays = self.fmodel_ts1.arrays
def update_core(self,
f_calc = None,
f_mask = None,
f_part = None,
b_cart = None,
k_sol = None,
b_sol = None,
k_part = None,
b_part = None,
u_sol = None,
k_overall = None,
twin_fraction = None,
r_free_flags = None):
if(r_free_flags is not None):
self.r_free_flags_ = r_free_flags
if f_calc is not None:
self.data_core.renew_fatoms( f_calc.data() )
self.f_atoms = f_calc
else:
assert self.f_atoms.indices().all_eq( self.miller_set.indices() )
self.data_core.renew_fatoms( self.f_atoms.data() )
if f_mask is not None:
self.data_core.renew_fmask( f_mask.data() )
self.f_mask_array = f_mask
else:
self.data_core.renew_fmask( self.f_mask_array.data() )
if f_part is not None:
self.data_core.renew_fpart( f_part.calc() )
self.f_partial_array = f_part
else:
if self.f_partial_array is not None:
self.data_core.renew_fpart( self.f_partial_array.data() )
if b_sol is not None:
u_sol = adptbx.b_as_u( b_sol )
if u_sol is not None:
self.data_core.usol( u_sol )
self.scaling_parameters.u_sol = u_sol
if u_sol is None:
self.data_core.usol( self.scaling_parameters.u_sol )
if k_sol is not None:
self.data_core.ksol( k_sol )
self.scaling_parameters.k_sol = k_sol
else:
self.data_core.ksol( self.scaling_parameters.k_sol )
if k_overall is not None:
self.scaling_parameters.k_overall = k_overall
self.data_core.koverall( self.scaling_parameters.k_overall )
else:
self.data_core.koverall( self.scaling_parameters.k_overall )
if b_cart is not None:
u_star = adptbx.u_cart_as_u_star( self.xs.unit_cell(), adptbx.b_as_u( list(b_cart) ) )
self.data_core.ustar(u_star)
self.scaling_parameters.u_star = u_star
if twin_fraction is None:
self.twin_fraction = self.twin_fraction_object.twin_fraction
self.target_evaluator.alpha( self.twin_fraction_object.twin_fraction )
self.free_target_evaluator.alpha( self.twin_fraction_object.twin_fraction )
else:
self.twin_fraction_object.twin_fraction = twin_fraction
self.twin_fraction = twin_fraction
self.target_evaluator.alpha( twin_fraction )
self.free_target_evaluator.alpha( twin_fraction )
def f_obs_work(self):
return self.f_obs_w
def update(self, f_calc = None,
f_obs = None,
f_mask = None,
f_ordered_solvent = None,
r_free_flags = None,
b_cart = None,
k_sol = None,
b_sol = None,
sf_and_grads_accuracy_params = None,
target_name = None,
abcd = None,
alpha_beta_params = None,
xray_structure = None,
mask_params = None,
overall_scale = None,
twin_fraction = None ):
if(sf_and_grads_accuracy_params is not None):
self.sfg_params = sf_and_grads_accuracy_params
self.update_xray_structure(update_f_calc = True)
if(f_calc is not None):
assert f_calc.indices().all_eq(self.f_model.indices())
self.update_core(f_calc = f_calc)
if(mask_params is not None):
self.mask_params = mask_params
if(f_obs is not None):
assert f_obs.data().size() == self.f_obs_.data().size()
self.f_obs_ = f_obs
self.f_obs_w = self.f_obs_.select(~self.r_free_flags().data() )
self.f_obs_f = self.f_obs_.select( self.r_free_flags().data() )
if(f_mask is not None):
assert f_mask.indices().all_eq( self.f_mask_array().indices() )
assert f_mask.data().size() == self.f_mask_array().data().size()
self.update_core(f_mask = f_mask)
if(r_free_flags is not None):
self.r_free_flags_ = r_free_flags
self.update_core(r_free_flags = r_free_flags)
if(b_cart is not None):
try: assert b_cart.size() == 6
except Exception: assert len(b_cart) == 6
self.update_core(b_cart = b_cart)
if overall_scale is not None:
self.scaling_parameters.k_overall = overall_scale
self.update_core()
if twin_fraction is None:
self.twin_fraction = self.twin_fraction_object.twin_fraction
self.target
if twin_fraction is None:
self.twin_fraction = self.twin_fraction_object.twin_fraction
self.target_evaluator.alpha( self.twin_fraction_object.twin_fraction )
self.free_target_evaluator.alpha( self.twin_fraction_object.twin_fraction )
else:
self.twin_fraction = twin_fraction
self.target_evaluator.alpha( twin_fraction )
self.free_target_evaluator.alpha( twin_fraction )
def construct_miller_set(self, return_free_f_atoms_array=False, external_miller_array=None):
completion = None
tmp_miller = external_miller_array
if tmp_miller is None:
tmp_miller = self.f_obs()
completion = xray.twin_completion( tmp_miller.indices(),
self.xs.space_group(),
tmp_miller.anomalous_flag(),
self.twin_law.as_double_array()[0:9] )
indices = completion.twin_complete()
miller_set = miller.set(
crystal_symmetry = self.xs,
indices =indices,
anomalous_flag = tmp_miller.anomalous_flag() ).map_to_asu()
assert miller_set.is_unique_set_under_symmetry()
if not return_free_f_atoms_array:
return miller_set
else:
free_array_for_f_atoms = completion.get_free_model_selection(
miller_set.indices(),
self.r_free_flags().data() )
return miller_set, free_array_for_f_atoms
def compute_f_atoms(self):
if self.miller_set is None:
self.miller_set, self.free_flags_for_f_atoms = self.construct_miller_set(True)
if(self.sfg_params is not None):
tmp = self.miller_set.structure_factors_from_scatterers(
xray_structure = self.xray_structure,
algorithm = self.sfg_params.algorithm,
cos_sin_table = self.sfg_params.cos_sin_table,
grid_resolution_factor = self.sfg_params.grid_resolution_factor,
quality_factor = self.sfg_params.quality_factor,
u_base = self.sfg_params.u_base,
b_base = self.sfg_params.b_base,
wing_cutoff = self.sfg_params.wing_cutoff,
exp_table_one_over_step_size =
self.sfg_params.exp_table_one_over_step_size
)
else:
tmp = self.miller_set.structure_factors_from_scatterers(
xray_structure = self.xray_structure)
f_atoms = tmp.f_calc()
return f_atoms
def apply_back_b_iso(self):
eps = math.pi**2*8
unit_cell = self.xray_structure.unit_cell()
b_min = min(self.b_sol(), self.xray_structure.min_u_cart_eigenvalue())
if(b_min < 0):
self.xray_structure.tidy_us(u_min = 1.e-6)
b_iso = self.b_iso()
b_test = b_min+b_iso
if(b_test < 0.0): b_adj = b_iso + abs(b_test) + 0.001
else: b_adj = b_iso
if(abs(b_adj) <= 300.0):
b_cart = self.b_cart()
b_cart_new = [b_cart[0]-b_adj,b_cart[1]-b_adj,b_cart[2]-b_adj,
b_cart[3], b_cart[4], b_cart[5]]
self.update(b_cart = b_cart_new)
self.update(b_sol = self.k_sol_b_sol()[1] + b_adj)
self.xray_structure.shift_us(b_shift = b_adj)
b_min = min(self.b_sol(), self.xray_structure.min_u_cart_eigenvalue())
assert b_min >= 0.0
self.xray_structure.tidy_us(u_min = 1.e-6)
self.update_xray_structure(
xray_structure = self.xray_structure,
update_f_calc = True,
update_f_mask = False,
update_f_ordered_solvent = False,
out = None)
def _get_step(self, update_f_ordered_solvent = False):
step = self.f_obs().d_min()/self.mask_params.grid_step_factor
if(step < 0.3): step = 0.3
step = min(0.8, step)
if(update_f_ordered_solvent): step = 0.3
return step
def _update_f_mask_flag(self, xray_structure, mean_shift):
if(self.xray_structure_mask_cache is None):
self.xray_structure_mask_cache = xray_structure.deep_copy_scatterers()
return True
else:
sites_cart_1 = self.xray_structure_mask_cache.sites_cart()
sites_cart_2 = xray_structure.sites_cart()
self.xray_structure_mask_cache = xray_structure.deep_copy_scatterers()
if(sites_cart_1.size() != sites_cart_2.size()): return True
atom_atom_distances = flex.sqrt((sites_cart_1 - sites_cart_2).dot())
mean_shift_ = flex.mean(atom_atom_distances)
update_f_mask = False
if(mean_shift_ >= mean_shift):
update_f_mask = True
return update_f_mask
def print_diffs(self):
sites_cart_1 = self.xray_structure_mask_cache.sites_cart()
sites_cart_2 = self.xray_structure.sites_cart()
atom_atom_distances = flex.sqrt((sites_cart_1 - sites_cart_2).dot())
mean_shift_ = flex.mean(atom_atom_distances)
print >> self.out, "MEAN SHIFT: ", mean_shift_
def update_xray_structure(self,
xray_structure = None,
update_f_calc = False,
update_f_mask = False,
update_f_ordered_solvent = False,
force_update_f_mask = True,
out = None,
k_sol = None,
b_sol = None,
b_cart = None):
if (xray_structure is not None):
self.xray_structure = xray_structure
if(update_f_mask):
if(force_update_f_mask):
consider_mask_update = True
else:
consider_mask_update = self._update_f_mask_flag(
xray_structure = self.xray_structure,
mean_shift = self.mask_params.mean_shift_for_mask_update)
f_calc = None
if(update_f_calc):
timer = user_plus_sys_time()
assert self.xray_structure is not None
if(self.sfg_params is not None):
f_calc = self.miller_set.structure_factors_from_scatterers(
xray_structure = self.xray_structure,
algorithm = self.sfg_params.algorithm,
cos_sin_table = self.sfg_params.cos_sin_table,
grid_resolution_factor = self.sfg_params.grid_resolution_factor,
quality_factor = self.sfg_params.quality_factor,
u_base = self.sfg_params.u_base,
b_base = self.sfg_params.b_base,
wing_cutoff = self.sfg_params.wing_cutoff,
exp_table_one_over_step_size =
self.sfg_params.exp_table_one_over_step_size).f_calc()
else:
f_calc = self.miller_set.structure_factors_from_scatterers(
xray_structure = self.xray_structure).f_calc()
f_mask = None
set_core_flag=True
if(update_f_mask and consider_mask_update):
bulk_solvent_mask_obj = self.bulk_solvent_mask()
f_mask = bulk_solvent_mask_obj.structure_factors(miller_set= self.miller_set)
if([f_calc, f_mask].count(None) == 2): set_core_flag = False
else: set_core_flag = True
if(f_calc is None): f_calc = self.f_calc()
if(f_mask is None): f_mask = self.f_mask()
if(set_core_flag):
self.update_core(f_calc = f_calc,
f_mask = f_mask,
b_cart = b_cart,
k_sol = k_sol,
b_sol = b_sol)
def bulk_solvent_mask(self):
step = self._get_step()
result = masks.bulk_solvent(
xray_structure = self.xray_structure,
grid_step = step,
ignore_zero_occupancy_atoms=self.mask_params.ignore_zero_occupancy_atoms,
solvent_radius = self.mask_params.solvent_radius,
shrink_truncation_radius = self.mask_params.shrink_truncation_radius)
return result
def update_f_mask(self):
mask = self.bulk_solvent_mask()
self.f_mask_array = mask.structure_factors( self.miller_set )
def r_values(self, table=True, rows=False, d_min=None, d_max=None, again=False,
free_reflections_per_bin = 140, max_number_of_bins = 20):
if rows:
table=True
additional_selection_w = flex.bool(self.f_obs_w.data().size(), True)
d_w = self.f_obs_w.d_spacings().data()
if d_max is not None:
exclude_low_w = flex.bool(d_w<d_max)
additional_selection_w = additional_selection_w&exclude_low_w
if d_min is not None:
exclude_high_w = flex.bool(d_w>d_min)
additional_selection_w = additional_selection_w&exclude_high_w
additional_selection_f = flex.bool(self.f_obs_f.data().size(), True)
d_f = self.f_obs_f.d_spacings().data()
if d_max is not None:
exclude_low_f = flex.bool(d_f<d_max)
additional_selection_f = additional_selection_f&exclude_low_f
if d_min is not None:
exclude_high_f = flex.bool(d_f>d_min)
additional_selection_f = additional_selection_f&exclude_high_f
r_abs_work_f_overall = self.r_work_object.r_amplitude_abs(
f_obs = self.f_obs_w.data(),
f_model = self.data_core.f_model(),
selection = additional_selection_w,
twin_fraction = self.twin_fraction_object.twin_fraction)
r_abs_free_f_overall = self.r_free_object.r_amplitude_abs(
self.f_obs_f.data(),
self.data_core.f_model(),
additional_selection_f,
self.twin_fraction_object.twin_fraction)
#make a sigmaa object
tmp_sigmaa_object = self.sigmaa_object()
if table:
r_abs_work_f_bin = []
r_abs_free_f_bin = []
bin_low = []
bin_high= []
n_free = []
n_work = []
rows = []
bins = []
n_bins = self.determine_n_bins(
free_reflections_per_bin = free_reflections_per_bin,
max_n_bins = max_number_of_bins)
self.f_obs_f.setup_binner(n_bins = n_bins)
self.f_obs_w.use_binning_of(self.f_obs_f)
completeness = self.f_obs_w.completeness(use_binning=True).data
for i_bin in self.f_obs_f.binner().range_used():
selection = flex.bool( self.f_obs_w.binner().bin_indices() == i_bin )
#combine selection
n_work = selection.count(True)
tmp_work = self.r_work_object.r_amplitude_abs(
f_obs = self.f_obs_w.data(),
f_model = self.data_core.f_model(),
selection = selection,
twin_fraction = self.twin_fraction_object.twin_fraction)
mean_f_obs_w = flex.mean_default( self.f_obs_w.data().select( selection ), None )
selection = flex.bool( self.f_obs_f.binner().bin_indices() == i_bin )
selection = selection&additional_selection_f
n_free = selection.count(True)
tmp_free = self.r_free_object.r_amplitude_abs(
f_obs = self.f_obs_f.data(),
f_model = self.data_core.f_model(),
selection = selection,
twin_fraction = self.twin_fraction_object.twin_fraction)
r_abs_work_f_bin.append(tmp_work)
r_abs_free_f_bin.append(tmp_free)
d_max,d_min = self.f_obs_w.binner().bin_d_range( i_bin )
d_range = self.f_obs_w.binner().bin_legend(
i_bin=i_bin, show_bin_number=False, show_counts=False)
alpha_w, beta_w = self.alpha_beta_w()
alpha_f, beta_f = self.alpha_beta_f()
n_additional_selection_w = flex.bool(alpha_w.data().size(), True)
d_w = alpha_w.d_spacings().data()
if d_max is not None:
exclude_low_w = flex.bool(d_w<d_max)
n_additional_selection_w = n_additional_selection_w&exclude_low_w
if d_min is not None:
exclude_high_w = flex.bool(d_w>d_min)
n_additional_selection_w = n_additional_selection_w&exclude_high_w
n_additional_selection_f = flex.bool(alpha_f.data().size(), True)
d_f = alpha_f.d_spacings().data()
if d_max is not None:
exclude_low_f = flex.bool(d_f<d_max)
n_additional_selection_f = n_additional_selection_f&exclude_low_f
if d_min is not None:
exclude_high_f = flex.bool(d_f>d_min)
n_additional_selection_f = n_additional_selection_f&exclude_high_f
alpha_f = flex.mean_default( alpha_f.select( n_additional_selection_f ).data(), None )
beta_f = flex.mean_default( beta_f.select( n_additional_selection_f ).data(), None )
alpha_w = flex.mean_default( alpha_w.select( n_additional_selection_w ).data(), None )
beta_w = flex.mean_default( beta_w.select( n_additional_selection_w ).data(), None )
phase_error_w = flex.mean_default( self.phase_errors_work().select( n_additional_selection_w), None )
phase_error_f = flex.mean_default( self.phase_errors_test().select( n_additional_selection_f), None )
fom_w = flex.mean_default( self.figures_of_merit_work().select( n_additional_selection_w ), None )
tmp = [ str( "%3i"%(i_bin) ),
str( "%5.2f"%(d_max) ),
str( "%5.2f"%(d_min) ),
str( "%5i"%(n_work) ),
str( "%3.2f"%(tmp_work) ),
str( "%5i"%(n_free) ),
str( "%3.2f"%(tmp_free) ) ]
rows.append( tmp )
bin = resolution_bin(i_bin=i_bin,
d_range=d_range,
completeness=completeness[i_bin],
alpha_work=alpha_w,
beta_work=beta_w,
r_work=tmp_work,
r_free=tmp_free,
target_work=None,
target_free=None,
n_work=n_work,
n_free=n_free,
scale_k1_work= None, # XXX for Peter to fix.
mean_f_obs = mean_f_obs_w,
fom_work = fom_w,
pher_work = phase_error_w,
pher_free = phase_error_f )
bins.append( bin )
if not rows:
header = ("bin", "d_max", "d_min", "n_work", "r_work", "n_free", "r_free")
comments = """
Overall r values
R Work : %4.3f
R Free : %4.3f
R = \sum_h( |Ft-Fo| )/ \sum_h(Fo)
Ft = Sqrt(tf*F1^2 + (1-tf)F2^2)
F1,F2 are twin related model amplitudes.
tf is the twin fraction and Fo is an observed amplitude."""%(r_abs_work_f_overall, r_abs_free_f_overall)
table_txt = table_utils.format( [header]+rows,
comments=comments,
has_header=True,
separate_rows=False,
prefix='| ',
postfix=' |')
print >> self.out, "------------------------ R values ------------------------"
print >> self.out, " twin law : %s"%( sgtbx.change_of_basis_op( self.twin_law ).as_hkl() )
print >> self.out, " twin fraction : %4.3f"%( self.twin_fraction_object.twin_fraction)
print >> self.out, table_txt
print >> self.out, "-----------------------------------------------------------"
print >> self.out
self.r_work_in_lowest_resolution_bin(show=True)
self.r_overall_low_high(show=True)
else:
return bins
else:
return r_abs_work_f_overall, r_abs_free_f_overall
def r_all(self):
selection = flex.bool( self.f_obs().data().size(), True )
overall_r = self.r_all_object.r_amplitude_abs(
f_obs = self.f_obs().data(),
f_model = self.data_core.f_model(),
selection = selection,
twin_fraction = self.twin_fraction_object.twin_fraction)
return overall_r
def r_work_in_lowest_resolution_bin(self, reflections_per_bin=200, show=False):
d_star_sq = self.f_obs_w.d_star_sq().data()
sort_permut = flex.sort_permutation( d_star_sq )
if sort_permut.size() < reflections_per_bin:
reflections_per_bin = sort_permut.size()
i_select = sort_permut[:reflections_per_bin-1]
b_select = flex.bool(sort_permut.size(), False )
b_select = b_select.set_selected( i_select, True )
tmp_work = self.r_work_object.r_amplitude_abs(
f_obs = self.f_obs_w.data(),
f_model = self.data_core.f_model(),
selection = b_select,
twin_fraction = self.twin_fraction_object.twin_fraction)
if not show:
return tmp_work
else:
print >> self.out, "-----------------------------------------------------------"
print >> self.out, " R-value for the %i lowest resolution reflections:"%(reflections_per_bin)
print >> self.out, " %4.3f" %(self.r_work_in_lowest_resolution_bin(reflections_per_bin))
print >> self.out, "-----------------------------------------------------------"
def r_overall_low_high(self, d = 6.0, show=False):
r_work = self.r_work()
d_max, d_min = self.f_obs_w.d_max_min()
if(d_max < d): d = d_max
if(d_min > d): d = d_min
n_low = self.f_obs_w.resolution_filter(d_min = d, d_max = 999.9).data().size()
if(n_low > 0):
r_work_l = self.r_values(d_min = d, d_max = 999.9, table=False )[0]
else:
r_work_l = None
n_high = self.f_obs_w.resolution_filter(d_min = 0.0, d_max = d).data().size()
if(n_high > 0):
r_work_h = self.r_values(d_min = 0.0, d_max = d,table=False)[0]
else:
r_work_h = None
if(r_work_l is not None):
r_work_l = r_work_l
else:
r_work_l = 0.0
if(r_work_h is not None):
r_work_h = r_work_h
else:
r_work_h = 0.0
if not show:
return r_work, r_work_l, r_work_h, n_low, n_high
else:
print >> self.out, "----------------------------------------------------------"
print >> self.out, "Overall, low and high resolution R-work values"
print >> self.out
print >> self.out, "Limits: Overall: %6.2f -- %6.2f"%(d_max,d_min)
print >> self.out, " Low : %6.2f -- %6.2f"%(d_max,d)
print >> self.out, " High : %6.2f -- %6.2f"%(d,d_min)
print >> self.out
print >> self.out, "R values : Overall low high"
print >> self.out, " %6.3f %6.3f %6.3f"%(r_work,r_work_l,r_work_h)
print >> self.out, "Contributors:%7i %7i %7i"%(n_low+n_high, n_low,n_high)
print >> self.out, "----------------------------------------------------------"
def twin_fraction_scan(self, n=10):
print >> self.out
print >> self.out
print >> self.out, "------------------------ Twin fraction scan ----------------------"
print >> self.out
print >> self.out, " R-values and target values for various twin fractions are listed."
print >> self.out
current_twin_fraction = twin_fraction_object(self.twin_fraction_object.twin_fraction)
trail_twin_fractions = list( flex.double( range(n+1) )/(2.0*n) )
rows = []
for tf in trail_twin_fractions:
tmp_twin_fraction = twin_fraction_object( tf )
self.update_solvent_and_scale( twin_fraction_parameters = tmp_twin_fraction )
rw,rf = self.r_values(table=False)
ttw,ttf = self.target(print_it=False)
tmp = [ "%4.3f"%(tf),
"%4.3f"%(rw),
"%4.3f"%(rf),
"%5.4e"%(ttw),
"%5.4e"%(ttf)
]
rows.append( tmp )
legend = ( "Twin fraction", "R-work", "R-free", "Target-work", "Target-free" )
table_txt = table_utils.format( [legend]+rows,
comments=None,
has_header=True,
separate_rows=False,
prefix='| ',
postfix=' |')
print >> self.out, table_txt
print >> self.out
print >> self.out, "------------------------------------------------------------------"
print >> self.out
print >> self.out
self.update_solvent_and_scale( twin_fraction_parameters = current_twin_fraction )
def target(self, print_it=True):
tmp_w=self.target_evaluator.target( self.data_core.f_model() )/self.norma_sum_f_sq_w
if(self.norma_sum_f_sq_f == 0):
tmp_f = tmp_w
else:
tmp_f=self.free_target_evaluator.target( self.data_core.f_model() )/self.norma_sum_f_sq_f
if print_it:
print >> self.out
print >> self.out, "----------------- Target values -----------------"
print >> self.out, " working set : %8.6e "%(tmp_w)
print >> self.out, " free set : %8.6e "%(tmp_f)
print >> self.out, "-------------------------------------------------"
else:
return(tmp_w,tmp_f)
def target_functor(self, alpha_beta=None): #XXX fake
return target_functor(manager=self)
def target_f(self):
return self.target_t()
def detwin_data(self, mode=None):
#determine how to detwin
if mode is None:
mode = "auto"
if mode == "auto":
if self.twin_fraction_object.twin_fraction > self.detwin_switch_twin_fraction:
mode = "proportional"
if self.twin_fraction_object.twin_fraction > 1.0 - self.detwin_switch_twin_fraction:
mode = "algebraic"
else:
mode = "algebraic"
if mode == "algebraic":
if abs(self.twin_fraction_object.twin_fraction-0.5)<1e-3:
print >> self.out, "Automatic adjustment: detwinning mode set to proportional"
# FIXME this seems appropriate but was not implemented - bug?
#mode = "proportional"
assert mode in self.possible_detwin_modes
assert mode != "auto"
#detwinning should be done against
tmp_i_obs = self.f_obs().deep_copy().f_as_f_sq()
untouched = self.f_obs().deep_copy().f_as_f_sq()
dt_f_obs = None
tmp_free = self.r_free_flags().deep_copy()
# now please detwin the data
if mode == "proportional":
sigmas = tmp_i_obs.sigmas()
if (sigmas is None):
sigmas = flex.double(tmp_i_obs.data().size(), 1.0)
dt_iobs, dt_isigma = self.full_detwinner.detwin_with_model_data(
tmp_i_obs.data(),
sigmas,
self.data_core.f_model(),
self.twin_fraction_object.twin_fraction )
tmp_i_obs = tmp_i_obs.customized_copy(
data = dt_iobs, sigmas = dt_isigma).set_observation_type( tmp_i_obs )
dt_f_obs = tmp_i_obs.f_sq_as_f()
if mode == "algebraic":
sigmas = tmp_i_obs.sigmas()
if (sigmas is None):
sigmas = flex.double(tmp_i_obs.data().size(), 1.0)
dt_iobs, dt_isigma = self.full_detwinner.detwin_with_twin_fraction(
tmp_i_obs.data(),
sigmas,
self.twin_fraction_object.twin_fraction )
# find out which intensities are zero or negative, they will be eliminated later on
zeros = flex.bool( dt_iobs <= 0 )
x = dt_iobs.select( ~zeros )
y = tmp_i_obs.data().select( ~zeros )
tmp_i_obs = tmp_i_obs.customized_copy(
data = dt_iobs, sigmas = dt_isigma).set_observation_type( tmp_i_obs )
dt_f_obs = tmp_i_obs.select( ~zeros ).f_sq_as_f()
tmp_free = tmp_free.select( ~zeros )
untouched = untouched.select( ~zeros )
#we can now quickly scale the two and see what hapens.
fmodel = self.f_model()
# XXX Pavel: avoid floating-point crashes
re = flex.abs(flex.real(fmodel.data()))
im = flex.abs(flex.imag(fmodel.data()))
sel = re > 1.e+50
sel |= im > 1.e+50
d = fmodel.data().set_selected(sel, 0+0j)
fmodel = fmodel.array(data = d)
#
abs_tmp_f_model = abs( fmodel ).common_set(
dt_f_obs ).set_observation_type( dt_f_obs )
tmp_f_model = fmodel.common_set( dt_f_obs )
scaler = relative_scaling.ls_rel_scale_driver(
miller_native = abs_tmp_f_model,
miller_derivative = dt_f_obs,
use_intensities = False,
scale_weight = False,
use_weights = False )
if dt_f_obs.data().size()==0:
if mode == "algebraic":
raise Sorry("Algebraic detwinning of data resulted in a dataset without data! \n Please try to use detwinning using proportionality rules (detwin.mode=proportional) ")
else:
raise Sorry("This should never have happend. Please contact authors")
return dt_f_obs, tmp_f_model, tmp_free
def sigmaa_object(self, detwinned_data=None, f_model_data=None, tmp_free=None, forced_update=False):
if self.sigmaa_object_cache is None:
forced_update = True
assert ( [detwinned_data,f_model_data] ).count(None) != 1
if tmp_free is None:
tmp_free = self.r_free_flags
if (detwinned_data is None):
if forced_update or self.update_sigmaa_object is True:
self.update_sigmaa_object = True
detwinned_data,f_model_data,tmp_free = self.detwin_data(mode=self.detwin_mode)
else:
tmp_sigmaa_object = sigmaa_estimation.sigmaa_estimator(
miller_obs = detwinned_data,
miller_calc = f_model_data,
r_free_flags = tmp_free,
kernel_width_free_reflections=200,
)
if not forced_update:
return tmp_sigmaa_object
else:
self.sigmaa_object_cache = tmp_sigmaa_object
return self.sigmaa_object_cache
if forced_update:
self.update_sigmaa_object = True
detwinned_data,f_model_data,tmp_free = self.detwin_data(mode=self.detwin_mode)
if self.update_sigmaa_object:
self.update_sigmaa_object = False
if(tmp_free.data().count(True) == 0):
tmp_free = tmp_free.array(data = ~tmp_free.data())
self.sigmaa_object_cache = sigmaa_estimation.sigmaa_estimator(
miller_obs = detwinned_data,
miller_calc = f_model_data,
r_free_flags = tmp_free,
kernel_width_free_reflections=200,
)
return self.sigmaa_object_cache
def model_error_ml(self):
return None # XXX
def alpha_beta(self, external_sigmaa_object=None):
sigmaa_object = external_sigmaa_object
if sigmaa_object is None:
sigmaa_object = self.sigmaa_object()
return sigmaa_object.alpha_beta()
def alpha_beta_w(self, external_sigmaa_object=None, only_if_required_by_target=False):
a,b = self.alpha_beta(external_sigmaa_object=external_sigmaa_object)
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
a = a.select( ~tmp_free.data() )
b = b.select( ~tmp_free.data() )
return a,b
def alpha_beta_f(self,external_sigmaa_object=None,only_if_required_by_target=False):
a,b = self.alpha_beta(external_sigmaa_object=external_sigmaa_object)
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
a = a.select( tmp_free.data() )
b = b.select( tmp_free.data() )
return a,b
def figures_of_merit(self):
sigmaa_object = self.sigmaa_object()
return sigmaa_object.fom().data()
def figures_of_merit_work(self):
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
fom = self.figures_of_merit().select(
~tmp_free.data())
return fom
def figures_of_merit_t(self):
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
fom = self.figures_of_merit().select(
tmp_free.data())
return fom
def phase_errors(self):
sigmaa_object = self.sigmaa_object()
return sigmaa_object.phase_errors().data()
def phase_errors_work(self):
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
pher = self.phase_errors().select(~tmp_free.data())
return pher
def phase_errors_test(self):
tmp_sigmaa = self.sigmaa_object()
tmp_free = tmp_sigmaa.r_free_flags
pher = self.phase_errors().select(tmp_free.data())
return pher
def w_star(self):
t_o, t_c, t_free = self.detwin_data(mode='proportional')
t_sigmaa_object = self.sigmaa_object(t_o,t_c,t_free)
a,b = self.alpha_beta( t_sigmaa_object )
obj = max_lik.f_star_w_star_mu_nu(
f_obs = t_o.data(),
f_model = flex.abs(t_c.data()),
alpha = a.data(),
beta = b.data(),
space_group = self.f_obs_.space_group(),
miller_indices = t_o.indices())
w_star_o = miller.array(miller_set = t_o,
data = obj.w_star())
self.sigmaa_object(forced_update=True)
return w_star_o
def set_pseudo_ml_weights(self):
weights = self.w_star().data()
completion = xray.twin_completion( self.f_obs_.indices(),
self.xs.space_group(),
self.f_obs_.anomalous_flag(),
self.twin_law.as_double_array()[0:9] )
twinned_weights = completion.twin_sum(weights, self.twin_fraction_object.twin_fraction)
self.target_evaluator.set_weights( twinned_weights )
return None
def determine_n_bins(self,
free_reflections_per_bin,
max_n_bins=None,
min_n_bins=1,
min_refl_per_bin=100):
assert free_reflections_per_bin > 0
n_refl = self.r_free_flags().data().size()
n_free = self.r_free_flags().data().count(True)
n_refl_per_bin = free_reflections_per_bin
if (n_free != 0):
n_refl_per_bin *= n_refl / n_free
n_refl_per_bin = min(n_refl, iround(n_refl_per_bin))
result = max(1, iround(n_refl / max(1, n_refl_per_bin)))
if (min_n_bins is not None):
result = max(result, min(min_n_bins, iround(n_refl / min_refl_per_bin)))
if (max_n_bins is not None):
result = min(max_n_bins, result)
return result
def _map_coeff(self, f_obs, f_model, f_obs_scale, f_model_scale):
d_obs = miller.array(miller_set = f_model,
data = f_obs.data()*f_obs_scale
).phase_transfer(phase_source = f_model)
return miller.array(miller_set = f_model,
data = d_obs.data()-f_model.data()*f_model_scale)
def map_coefficients (self, **kwds) :
emap = self.electron_density_map()
return emap.map_coefficients(**kwds)
def _get_real_map (self, **kwds) :
map_coeffs = self.map_coefficients(**kwds)
return map_coeffs.fft_map(
resolution_factor=0.25).apply_sigma_scaling().real_map_unpadded()
def two_fofc_map (self, **kwds) :
kwds['map_type'] = "2mFo-DFc"
return self._get_real_map(**kwds)
def fofc_map (self, **kwds) :
kwds['map_type'] = "mFo-DFc"
return self._get_real_map(**kwds)
def anomalous_map (self, **kwds) :
if (not self.f_obs().anomalous_flag()) : return None
kwds['map_type'] = "anom"
return self._get_real_map(**kwds)
def compute_map_coefficients(self,
map_type = None,
k = None,
n = None,
w1 = None,
w2 = None,
isotropize=None,
ncs_average = None,
):
if (map_type == "Fmodel") :
map_type = "Fc"
elif (map_type == "DFmodel") :
map_type = "DFc"
if (map_type.lower().startswith("anom")) :
map_type = "anom"
supported_types = ("Fo-Fc", "Fobs-Fmodel",
"2mFo-DFc", "2mFobs-DFmodel",
"mFo-DFc", "mFobs-DFmodel",
"gradient",
"m_gradient",
"mFo",
"Fc",
"DFc",
"anom",
)
if not map_type in supported_types :
raise Sorry(("Map type '%s' not supported for twinned structures. "+
"Allowed types: %s.") % (map_type, ", ".join(supported_types)))
# this is to modify default behavior of phenix.refine
if (map_type == "mFo-DFc") or (map_type == "mFobs-DFmodel") :
if self.map_types.fofc == "gradient":
map_type = "gradient"
if self.map_types.fofc == "m_gradient":
map_type = "m_gradient"
if self.map_types.fofc == "m_dtfo_d_fc":
map_type = "m_dtfo_d_fc"
if (map_type == "2mFo-DFc") or (map_type=="2mFobs-DFmodel") :
if self.map_types.twofofc == "two_m_dtfo_d_fc":
map_type = "two_m_dtfo_d_fc"
if self.map_types.twofofc == "two_dtfo_fc":
map_type = "two_dtfo_fc"
#detwin
dt_f_obs, tmp_f_model, tmp_free = self.detwin_data(mode=self.detwin_mode)
#for aniso correction
aniso_scale = 1.0/self.data_core.overall_scale() # anisotropy correction
aniso_scale = self.f_atoms.customized_copy(
data = aniso_scale ).common_set( dt_f_obs )
aniso_scale = aniso_scale.data()
if (map_type == "anom") :
if (not dt_f_obs.anomalous_flag()) :
return None
anom_diff = dt_f_obs.anomalous_differences()
tmp_f_model = tmp_f_model.average_bijvoet_mates()
tmp_f_model, anom_diff = tmp_f_model.common_sets(other=anom_diff)
anom_diff = anom_diff.phase_transfer(phase_source=tmp_f_model)
result = miller.array(miller_set=anom_diff,
data=anom_diff.data()/(2j))
return result
elif map_type not in ["gradient","m_gradient"]:
result = None
if (map_type == "Fc") :
result = tmp_f_model
elif (map_type == "DFc") :
sigmaa_object = self.sigmaa_object(
detwinned_data=dt_f_obs,
f_model_data=tmp_f_model,
tmp_free=tmp_free,
forced_update=True)
m, dt_f_obs = sigmaa_object.fom().common_sets( dt_f_obs )
d, dt_f_obs = sigmaa_object.alpha_beta()[0].common_sets( dt_f_obs )
dt_f_obs, tmp_f_model = dt_f_obs.common_sets( tmp_f_model )
result = tmp_f_model.customized_copy(
data=tmp_f_model.data()*d.data())
elif (map_type == "mFo") :
sigmaa_object = self.sigmaa_object(
detwinned_data=dt_f_obs,
f_model_data=tmp_f_model,
tmp_free=tmp_free,
forced_update=True)
m, dt_f_obs = sigmaa_object.fom().common_sets( dt_f_obs )
dt_f_obs, tmp_f_model = dt_f_obs.common_sets( tmp_f_model )
result = dt_f_obs.customized_copy(
data=dt_f_obs.data()*m.data()).phase_transfer(
phase_source=tmp_f_model)
elif (map_type in ["Fo-Fc", "Fobs-Fmodel"]) :
if ([k,n]).count(None) > 0:
raise Sorry("Map coefficient multipliers (k and n) must be provided to generate detwinned maps")
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = k,
f_model_scale = n )
assert result is not None
else:
sigmaa_object = self.sigmaa_object(
detwinned_data=dt_f_obs,
f_model_data=tmp_f_model,
tmp_free=tmp_free,
forced_update=True)
dt_f_obs, tmp_f_model = dt_f_obs.common_sets( tmp_f_model )
m, dt_f_obs = sigmaa_object.fom().common_sets( dt_f_obs )
d, dt_f_obs = sigmaa_object.alpha_beta()[0].common_sets( dt_f_obs )
m = m.data()
d = d.data()
dt_f_obs, tmp_f_model = dt_f_obs.common_sets( tmp_f_model )
if map_type == "m_dtfo_d_fc":
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = m ,
f_model_scale =d )
if map_type == "dtfo_fc":
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = 1.0,
f_model_scale =1.0 )
if map_type == "two_m_dtfo_d_fc":
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = 2*m,
f_model_scale = d )
if map_type == "two_dtfo_fc":
result = self._map_coeff( f_obs = dt_f_obs,
f_model = tmp_f_model,
f_obs_scale = 2,
f_model_scale = 1 )
assert result is not None
assert result != None
if self.map_types.aniso_correct:
result = result.customized_copy( data = result.data()*aniso_scale )
return result
else:
# get coefficients for a gradient map please
gradients = self.target_evaluator.d_target_d_fmodel(
self.data_core.f_model() )
gradients = self.f_atoms.customized_copy(
data = -gradients).common_set( self.f_obs() )
if map_type == "m_gradient":
# get the FOMs please
m = self.sigmaa_object().fom().common_set(self.f_obs()).data()
gradients = gradients.customized_copy(
data = gradients.data()*m )
if self.map_types.aniso_correct:
gradients = gradients.customized_copy( data = gradients.data()*aniso_scale )
return gradients
def k_part(self): return 0 # XXX to be compatible with non-twin fmodel
def b_part(self): return 0 # XXX to be compatible with non-twin fmodel
def electron_density_map(self, k = 1,
n = 1,
w1 = None,
w2 = None,
resolution_factor = 1/3.,
fill_missing_f_obs = True, # XXX not used since not available for twin case.
symmetry_flags = None,
fill_mode = None,
reverse_scale = True # XXX Dummy parameter, not used here.
): # XXX Added for compatibility.
# XXX work-around to support new developments in non-twin fmodel. PA.
class result(object):
def __init__(self, resolution_factor, symmetry_flags, fmodel, reverse_scale):
self.resolution_factor = resolution_factor
self.symmetry_flags = symmetry_flags
self.fmodel = fmodel
self.mch = None # XXX prevent crash in mmtbx.maps
# XXX: added extra keywords passed by mmtbx.maps, which will simply be
# ignored here. -nat
def map_coefficients(self,
map_type=None,
acentrics_scale=None,
centrics_pre_scale=None,
ncs_average=None,
isotropize=None,
exclude_free_r_reflections=False,
post_processing_callback=None,
fill_missing=None,
sharp=None,
pdb_hierarchy=None,
merge_anomalous=None): # FIXME ignored
if (map_type in ["gradient", "m_gradient", "anom"]) :
return self.fmodel.compute_map_coefficients(map_type=map_type)
else :
map_name_manager = mmtbx.map_names(map_name_string = map_type)
k = map_name_manager.k
n = map_name_manager.n
return self.fmodel.compute_map_coefficients(
map_type = map_type,
k = k,
n = n,
w1 = w1,
w2 = w2)
def fft_map(self, resolution_factor = None,
symmetry_flags = None,
map_coefficients = None,
other_fft_map = None,
use_all_data = False,
map_type = None):
if(resolution_factor is None):
resolution_factor = self.resolution_factor
if(symmetry_flags is None):
symmetry_flags = self.symmetry_flags
if (map_type in ["gradient", "m_gradient", "anom"]) :
map_coefficients = self.fmodel.compute_map_coefficients(
map_type=map_type)
else :
map_name_manager = mmtbx.map_names(map_name_string = map_type)
k = map_name_manager.k
n = map_name_manager.n
map_coefficients = self.fmodel.compute_map_coefficients(
map_type = map_type,
k = k,
n = n,
w1 = w1,
w2 = w2)
return map_coefficients.fft_map(
resolution_factor = resolution_factor,
symmetry_flags = symmetry_flags)
return result(resolution_factor= resolution_factor,
symmetry_flags = symmetry_flags,
fmodel = self,
reverse_scale = reverse_scale)
def u_star(self):
return self.data_core.ustar()
def u_cart(self):
tmp = self.u_star()
tmp = adptbx.u_star_as_u_cart(self.xs.unit_cell(),tmp)
return tmp
def b_cart(self):
b_cart = adptbx.u_as_b( self.u_cart() )
return b_cart
def b_iso(self):
b_cart = self.b_cart()
return (b_cart[0]+b_cart[1]+b_cart[2])/3.0
def u_iso(self):
u_cart = self.u_cart()
return (u_cart[0]+u_cart[1]+u_cart[2])/3.0
def u_iso_as_u_cart(self):
ui = self.u_iso()
return [ui,ui,ui,0.0,0.0,0.0]
def k_sol(self):
return self.data_core.ksol()
def k_sols(self):
# This is a dummy, making twin_f_model compatible with f_model
# Radial shell mask model is not implemented for twin_f_model
return [self.k_sol()]
def u_sol(self):
return self.data_core.usol()
def b_sol(self):
return adptbx.u_as_b( self.u_sol() )
def k_sol_b_sol(self):
return self.k_sol(), self.b_sol()
def k_sol_u_sol(self):
return self.k_sol(), self.u_sol()
def f_mask(self):
return self.f_mask_array
def f_mask_w(self):
return self.f_mask().select(~self.free_flags_for_f_atoms )
def f_mask_t(self):
return self.f_mask().select( self.free_flags_for_f_atoms )
def f_bulk(self):
tmp = self.data_core.f_bulk()
tmp = self.f_mask_array.customized_copy(
data = tmp ).set_observation_type( self.f_mask_array )
return tmp
def f_bulk_t(self):
tmp = self.f_bulk()
return tmp.select( self.free_flags_for_f_atoms )
def f_bulk_w(self):
tmp = self.f_bulk()
return tmp.select(~self.free_flags_for_f_atoms )
def fb_bulk(self):
tmp = self.data_core.f_bulk()
multi = self.data_core.overall_scale()
tmp = self.f_mask_array.customized_copy(
data = tmp*multi ).set_observation_type( self.f_mask_array )
return tmp
def fb_bulk_t(self):
tmp = self.f_bulk()
return tmp.select( self.free_flags_for_f_atoms )
def fb_bulk_w(self):
tmp = self.f_bulk()
return tmp.select(~self.free_flags_for_f_atoms )
def scale_k1_w(self):
return self.data_core.koverall()
def scale_k1_t(self):
return self.data_core.koverall()
def scale_k3_t(self):
return self.data_core.koverall()
def scale_k3_w(self):
return self.data_core.koverall()
def hl_coeffs(self): return None
def fft_vs_direct(self, reflections_per_bin = 250,
n_bins = 0,
out = None):
print >> self.out, "Direct vs FFT comparison not yet implemented. "
def r_work_scale_k1_completeness_in_bins(self, reflections_per_bin = 500,
n_bins = 0,
prefix = "",
out = None):
self.r_values(table=True)
def show_k_sol_b_sol_b_cart_target(self, header=None,target=None,out=None):
if(out is None): out = self.out
p = " "
if(header is None): header = ""
line_len = len("|-"+"|"+header)
fill_len = 80-line_len-1
print >> out, "|-"+header+"-"*(fill_len)+"|"
k_sol = self.k_sol()
b_sol = self.b_sol()
u0,u1,u2,u3,u4,u5 = self.b_cart()
target_w=self.target_w()
alpha, beta = self.alpha_beta_w()
alpha_d = alpha.data()
a_mean = flex.mean(alpha_d)
a_zero = (alpha_d <= 0.0).count(True)
r_work = self.r_work()
u_isos = self.xray_structure.extract_u_iso_or_u_equiv()
b_iso_mean = flex.mean(u_isos * math.pi**2*8)
print >> out, "| k_sol=%5.2f b_sol=%7.2f target_w =%20.6f r_work=%7.4f" % \
(k_sol, b_sol, target_w, r_work) + 5*p+"|"
print >> out, "| B(11,22,33,12,13,23)=%9.4f%9.4f%9.4f%9.4f%9.4f%9.4f |" % \
(u0,u1,u2,u3,u4,u5)
print >> out, "| trace(B) = (B11 + B22 + B33)/3 = %-10.3f |"%self.u_iso()
print >> out, "| mean alpha:%8.4f number of alpha <= 0.0:%7d" % \
(a_mean, a_zero)+25*p+"|"
print >> out, "|"+"-"*77+"|"
out.flush()
def show_essential(self, header = None, out=None):
if(out is None): out = self.out
out.flush()
p = " "
if(header is None): header = ""
d_max, d_min = self.f_obs().d_max_min()
line1 = "---(resolution: "
line2 = n_as_s("%6.2f",d_min)
line3 = n_as_s("%6.2f",d_max)
line4 = " - "
line5 = " A)"
tl = header+line1+line2+line4+line3+line5
line_len = len("|-"+"|"+tl)
fill_len = 80-line_len-1
print >> out, "|-"+tl+"-"*(fill_len)+"|"
print >> out, "| "+" "*38+"|"
r_work = n_as_s("%6.4f",self.r_work() )
r_free = n_as_s("%6.4f",self.r_free() )
scale = n_as_s("%6.3f",self.scale_k1_w())
k_sol = n_as_s("%4.2f",self.k_sol())
b_sol = n_as_s("%6.2f",self.b_sol())
b0,b1,b2,b3,b4,b5 = n_as_s("%7.2f",self.b_cart())
b_iso = n_as_s("%7.2f",self.b_iso())
#XXXX Model error analyses required
#err = n_as_s("%6.2f",self.model_error_ml())
err=" None "
try: target_work = n_as_s("%.4g",self.target_w())
except Exception: target_work = str(None)
line = "| r_work= "+r_work+" r_free= "+r_free+" ksol= "+k_sol+\
" Bsol= "+b_sol+" scale= "+scale
np = 79 - (len(line) + 1)
if(np < 0): np = 0
print >> out, line + p*np + "|"
print >> out, "| "+" "*38+"|"
print >> out, "| overall anisotropic scale matrix (Cartesian basis): "\
" |"
c = ","
line4 = "| (B11,B22,B33,B12,B13,B23)= ("+b0+c+b1+c+b2+c+b3+c+b4+c+b5+")"
np = 79 - (len(line4) + 1)
line4 = line4 + " "*np + "|"
print >> out, line4
line5 = "| (B11+B22+B33)/3 = "+b_iso
np = 79 - (len(line5) + 1)
line5 = line5 + " "*np + "|"
print >> out, line5
print >> out, "| "+" "*38+"|"
line5_and_a_half = "| Twin law : %s Twin fraction: %4.3f"%(self.twin_law.r().as_hkl(),self.twin_fraction_object.twin_fraction)
np = 79 - (len(line5_and_a_half) + 1)
line5_and_a_half = line5_and_a_half + " "*np + "|"
print >> out, line5_and_a_half
print >> out, "| "+" "*38+"|"
line6="| Target ("+self.target_name+")= "+target_work+\
" | ML estimate for coordinates error: "+err+" A"
np = 79 - (len(line6) + 1)
line6 = line6 + " "*np + "|"
print >> out, line6
print >> out, "|"+"-"*77+"|"
out.flush()
def show_comprehensive(self,
header = "",
free_reflections_per_bin = 140,
max_number_of_bins = 20,
out=None):
self.r_values(table=True)
self.twin_fraction_scan(n=15)
self.sigmaa_object().show(out=self.out)
def statistics_in_resolution_bins(self,
free_reflections_per_bin = 140,
max_number_of_bins = 20,
out=None):
results = self.r_values(table=True, rows=True,
free_reflections_per_bin = free_reflections_per_bin, max_number_of_bins =
max_number_of_bins)
return results
def r_factors_in_resolution_bins(self,
free_reflections_per_bin = 140,
max_number_of_bins = 20,
out=None):
results = self.r_values(table=True, rows=True)
return results
def r_work_scale_k1_completeness_in_bins(self, reflections_per_bin = 500,
n_bins = 0,
prefix = "",
out = None):
#actively ignoring input
self.r_values(table=True)
def show_fom_phase_error_alpha_beta_in_bins(self,
free_reflections_per_bin = 140,
max_number_of_bins = 20,
out=None):
self.sigmaa_object().show(out=self.out)
def export(self, out=None, format="mtz"):
assert format in ["mtz", "cns"]
file_name = None
if (out is None):
out = sys.stdout
elif (hasattr(out, "name")):
file_name = libtbx.path.canonical_path(file_name=out.name)
warning = [
"DO NOT USE THIS FILE AS INPUT FOR REFINEMENT!",
"Resolution and sigma cutoffs may have been applied to FOBS."]
width = max([len(line) for line in warning])
warning.insert(0, "*" * width)
warning.append(warning[0])
if (format == "cns"):
for line in warning:
print >> out, "{ %s%s }" % (line, " "*(width-len(line)))
print >> out, "{ %s }" % date_and_time()
if (file_name is not None):
print >> out, "{ file name: %s }" % os.path.basename(file_name)
print >> out, "{ directory: %s }" % os.path.dirname(file_name)
self.explain_members(out=out, prefix="{ ", suffix=" }")
crystal_symmetry_as_cns_comments(
crystal_symmetry=self.f_obs_, out=out)
print >> out, "NREFlection=%d" % self.f_obs_.indices().size()
print >> out, "ANOMalous=%s" % {0: "FALSE"}.get(
int(self.f_obs_.anomalous_flag()), "TRUE")
have_sigmas = self.f_obs_.sigmas() is not None
for n_t in [("FOBS", "REAL"),
("SIGFOBS", "REAL"),
("R_FREE_FLAGS", "INTEGER"),
("FMODEL", "COMPLEX"),
("FCALC", "COMPLEX"),
("FMASK", "COMPLEX"),
("FBULK", "COMPLEX"),
("FOM", "REAL"),
("ALPHA", "REAL"),
("BETA", "REAL")]:
if (not have_sigmas and n_t[0] == "SIGFOBS"): continue
print >> out, "DECLare NAME=%s DOMAin=RECIprocal TYPE=%s END" % n_t
f_model = self.f_model_scaled_with_k1()
f_model_amplitudes = f_model.amplitudes().data()
f_model_phases = f_model.phases(deg=True).data()
f_calc_amplitudes = self.f_calc().amplitudes().data()
f_calc_phases = self.f_calc().phases(deg=True).data()
f_mask_amplitudes = self.f_mask().amplitudes().data()
f_mask_phases = self.f_mask().phases(deg=True).data()
f_bulk_amplitudes = self.f_bulk().amplitudes().data()
f_bulk_phases = self.f_bulk().phases(deg=True).data()
alpha, beta = [item.data() for item in self.alpha_beta()]
arrays = [
self.f_obs_.indices(), self.f_obs_.data(), self.f_obs_.sigmas(),
self.r_free_flags.data(),
f_model_amplitudes, f_model_phases,
f_calc_amplitudes, f_calc_phases,
f_mask_amplitudes, f_mask_phases,
f_bulk_amplitudes, f_bulk_phases,
self.figures_of_merit(),
alpha, beta]
if (not have_sigmas):
del arrays[2]
i_r_free_flags = 2
else:
i_r_free_flags = 3
for values in zip(*arrays):
print >> out, "INDE %d %d %d" % values[0]
print >> out, " FOBS= %.6g" % values[1],
if (have_sigmas):
print >> out, " SIGFOBS= %.6g" % values[2],
print >> out, \
" R_FREE_FLAGS= %d FMODEL= %.6g %.6g\n" \
" FCALC= %.6g %.6g FMASK= %.6g %.6g FBULK= %.6g %.6g\n" \
" FB_CART= %.6g FOM= %.6g ALPHA= %.6g BETA= %.6g" \
% values[i_r_free_flags:]
if (file_name is not None):
out.close()
else:
assert file_name is not None
mtz_dataset = self.f_obs_.as_mtz_dataset(column_root_label="FOBS")
mtz_dataset.add_miller_array(
miller_array=self.r_free_flags(), column_root_label="R_FREE_FLAGS")
mtz_dataset.add_miller_array(
miller_array=self.f_model(), column_root_label="FMODEL")
mtz_dataset.add_miller_array(
miller_array=self.f_calc(), column_root_label="FCALC")
mtz_dataset.add_miller_array(
miller_array=self.f_mask(), column_root_label="FMASK")
mtz_dataset.add_miller_array(
miller_array=self.f_bulk(), column_root_label="FBULK")
mtz_dataset.add_miller_array(
miller_array= self.sigmaa_object().fom(),
column_root_label="FOM", column_types="W")
alpha, beta = self.alpha_beta()
mtz_dataset.add_miller_array(
miller_array=alpha, column_root_label="ALPHA", column_types="W")
mtz_dataset.add_miller_array(
miller_array=beta, column_root_label="BETA", column_types="W")
mtz_history_buffer = flex.std_string(warning)
ha = mtz_history_buffer.append
ha(date_and_time())
ha("file name: %s" % os.path.basename(file_name))
ha("directory: %s" % os.path.dirname(file_name))
s = StringIO()
self.explain_members(out=s)
for line in s.getvalue().splitlines():
ha(line)
mtz_object = mtz_dataset.mtz_object()
mtz_object.add_history(lines=mtz_history_buffer)
out.close()
mtz_object.write(file_name=file_name)
def explain_members(self, out=None, prefix="", suffix=""):
if (out is None): out = sys.stdout
def zero_if_almost_zero(v, eps=1.e-6):
if (abs(v) < eps): return 0
return v
for line in [
"Fmodel = scale_k1 * fb_cart * (Fcalc + Fbulk)",
"Fcalc = structure factors calculated from atomic model",
"Fbulk = k_sol * exp(-b_sol*s**2/4) * Fmask",
"A = orthogonalization matrix",
"k_sol = %.6g" % self.k_sol(),
"b_sol = %.6g" % zero_if_almost_zero(self.b_sol()),
"B_cart = (B11, B22, B33, B12, B13, B23)",
" = (%s)" % ", ".join(
["%.6g" % zero_if_almost_zero(v) for v in self.b_cart()])]:
print >> out, prefix + line + suffix
def export_f_obs_flags_as_mtz (self,
file_name,
merge_anomalous=False,
include_hendrickson_lattman=True) :
f_obs = self.f_obs()
flags = self.r_free_flags()
hl_coeffs = self.hl_coeffs()
if (merge_anomalous) :
f_obs = f_obs.average_bijvoet_mates()
flags = flags.average_bijvoet_mates()
if (hl_coeffs is not None) :
hl_coeffs = hl_coeffs.average_bijvoet_mates()
mtz_dataset = f_obs.as_mtz_dataset(column_root_label="F")
if (hl_coeffs is not None) and (include_hendrickson_lattman) :
mtz_dataset.add_miller_array(hl_coeffs, column_root_label="HL")
mtz_dataset.add_miller_array(flags, column_root_label="FreeR_flag")
mtz_dataset.mtz_object().write(file_name)
def show_targets(self, out=None, text=""):
if(out is None): out = self.out
part1 = "|-"+text
part2 = "-|"
n = 79 - len(part1+part2)
print >> out, part1 + "-"*n + part2
part3 = "| target_work(%s"%self.target_name+") = %.6e r_work = %6.4f r_free = %6.4f"%\
(self.target_w(), self.r_work(), self.r_free())
n = 78 - len(str(part3)+"|")
print >> out, part3, " "*n +"|"
print >> out, "|" +"-"*77+"|"
out.flush()
def _header_resolutions_nreflections(self, header, out):
out.flush()
if(header is None): header = ""
d_max, d_min = self.f_obs_.d_max_min()
line1 = "(resolution: "
line2 = n_as_s("%6.2f",d_min)
line3 = n_as_s("%6.2f",d_max)
line4 = " - "
line5 = " A; n_refl. = "
line6 = n_as_s("%d",self.f_obs_.data().size())
tl = header+"-"+line1+line2+line4+line3+line5+line6
line_len = len("|-"+"|"+tl)
fill_len = 80-line_len-1
print >> out, "|-"+tl+"-"*(fill_len)+"|"
out.flush()
def _rfactors_and_bulk_solvent_and_scale_params(self, out):
out.flush()
r_work = n_as_s("%6.4f",self.r_work() )
r_free = n_as_s("%6.4f",self.r_free() )
scale = n_as_s("%6.3f",self.scale_k1_w())
k_sol = n_as_s("%4.2f",self.k_sol())
b_sol = n_as_s("%6.2f",self.b_sol())
b0,b1,b2,b3,b4,b5 = n_as_s("%7.2f",self.b_cart())
b_iso = n_as_s("%7.2f",self.b_iso())
line = "| r_work= "+r_work+" r_free= "+r_free+" ksol= "+k_sol+\
" Bsol= "+b_sol+" scale= "+scale
np = 79 - (len(line) + 1)
if(np < 0): np = 0
print >> out, line + " "*np + "|"
print >> out, "| "+" "*38+"|"
print >> out, "| overall anisotropic scale matrix (Cartesian basis; B11,B22,B33,B12,B13,B23):|"
c = ","
line4 = "| ("+b0+c+b1+c+b2+c+b3+c+b4+c+b5+"); trace/3= "+b_iso
np = 79 - (len(line4) + 1)
line4 = line4 + " "*np + "|"
print >> out, line4
out.flush()
def ls_ff_weights(f_obs, atom, B):
d_star_sq_data = f_obs.d_star_sq().data()
table = wk1995(atom).fetch()
ff = table.at_d_star_sq(d_star_sq_data) * flex.exp(-B/4.0*d_star_sq_data)
weights = 1.0/flex.pow2(ff)
return weights
class target_functor(object):
def __init__(self, manager):
self.manager = manager
def prepare_for_minimization(self):
pass
def target_function_is_invariant_under_allowed_origin_shifts(self):
return True
def __call__(self, compute_gradients=False):
return target_result(manager=self.manager)
class target_result(mmtbx.refinement.targets.target_result_mixin):
def __init__(self, manager):
self.manager = manager
def target_work(self):
return self.manager.target(False)[0]
def target_test(self):
return self.manager.target(False)[1]
def d_target_d_f_model_work(self):
manager = self.manager
return manager.miller_set.array(
data=manager.target_evaluator.d_target_d_fmodel(
manager.data_core.f_model()))
def d_target_d_f_calc_work(self):
manager = self.manager
d_t_d_f_m = self.d_target_d_f_model_work()
return d_t_d_f_m.array(
data=d_t_d_f_m.data() * manager.data_core.d_f_model_core_data_d_f_atoms()
/ manager.norma_sum_f_sq)
def ls_sigma_weights(f_obs):
if(f_obs.sigmas() is not None):
sigmas_squared = flex.pow2(f_obs.sigmas())
else:
sigmas_squared = flex.double(f_obs.data().size(), 1.0)
assert sigmas_squared.all_gt(0)
weights = 1 / sigmas_squared
return weights
def kb_range(x_max, x_min, step):
x_range = []
x = x_min
while x <= x_max + 0.0001:
x_range.append(x)
x += step
return x_range
def n_as_s(format, value):
vt = type(value).__name__
if(vt in ["int","float"]):
return str(format%value).strip()
else:
new = []
for item in value:
new.append( str(format%item).strip() )
return new
class resolution_bin(object):
def __init__(self,
i_bin = None,
d_range = None,
completeness = None,
alpha_work = None,
beta_work = None,
r_work = None,
r_free = None,
target_work = None,
target_free = None,
n_work = None,
n_free = None,
mean_f_obs = None,
fom_work = None,
scale_k1_work= None,
pher_work = None,
pher_free = None,
cc_work = None,
cc_free = None):
adopt_init_args(self, locals())
def n_as_s(format, value):
if value == "none":
return "None"
if value == "None":
return "None"
if ( value is None ):
return format_value(format=format, value=value)
if (isinstance(value, (int, float))):
return (format % value).strip()
return [(format % v).strip() for v in value]
| true
| true
|
79084e399ac257749fd04e3da4f96fea526c430d
| 4,802
|
py
|
Python
|
src/sentry/utils/distutils/commands/build_assets.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/utils/distutils/commands/build_assets.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/utils/distutils/commands/build_assets.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import json
import datetime
import os
import os.path
import sys
import traceback
from distutils import log
from .base import BaseBuildCommand
class BuildAssetsCommand(BaseBuildCommand):
user_options = BaseBuildCommand.user_options + [
(
"asset-json-path=",
None,
"Relative path for JSON manifest. Defaults to {dist_name}/assets.json",
),
(
"inplace",
"i",
"ignore build-lib and put compiled javascript files into the source "
+ "directory alongside your pure Python modules",
),
(
"force",
"f",
"Force rebuilding of static content. Defaults to rebuilding on version "
"change detection.",
),
]
description = "build static media assets"
def initialize_options(self):
self.asset_json_path = u"{}/assets.json".format(self.distribution.get_name())
BaseBuildCommand.initialize_options(self)
def get_dist_paths(self):
return ["src/sentry/static/sentry/dist"]
def get_manifest_additions(self):
return ("src/" + self.asset_json_path,)
def _get_package_version(self):
"""
Attempt to get the most correct current version of Sentry.
"""
pkg_path = os.path.join(self.work_path, "src")
sys.path.insert(0, pkg_path)
try:
import sentry
except Exception:
version = None
build = None
else:
log.info(u"pulled version information from 'sentry' module".format(sentry.__file__))
version = self.distribution.get_version()
build = sentry.__build__
finally:
sys.path.pop(0)
if not (version and build):
json_path = self.get_asset_json_path()
try:
with open(json_path) as fp:
data = json.loads(fp.read())
except Exception:
pass
else:
log.info(u"pulled version information from '{}'".format(json_path))
version, build = data["version"], data["build"]
return {"version": version, "build": build}
def _needs_static(self, version_info):
json_path = self.get_asset_json_path()
if not os.path.exists(json_path):
return True
with open(json_path) as fp:
data = json.load(fp)
if data.get("version") != version_info.get("version"):
return True
if data.get("build") != version_info.get("build"):
return True
return False
def _needs_built(self):
if BaseBuildCommand._needs_built(self):
return True
version_info = self._get_package_version()
return self._needs_static(version_info)
def _build(self):
version_info = self._get_package_version()
log.info(
u"building assets for {} v{} (build {})".format(
self.distribution.get_name(),
version_info["version"] or "UNKNOWN",
version_info["build"] or "UNKNOWN",
)
)
if not version_info["version"] or not version_info["build"]:
log.fatal("Could not determine sentry version or build")
sys.exit(1)
try:
self._build_static()
except Exception:
traceback.print_exc()
log.fatal("unable to build Sentry's static assets!")
sys.exit(1)
log.info("writing version manifest")
manifest = self._write_version_file(version_info)
log.info(u"recorded manifest\n{}".format(json.dumps(manifest, indent=2)))
def _build_static(self):
# By setting NODE_ENV=production, a few things happen
# * React optimizes out certain code paths
# * Webpack will add version strings to built/referenced assets
env = dict(os.environ)
env["SENTRY_STATIC_DIST_PATH"] = self.sentry_static_dist_path
env["NODE_ENV"] = "production"
self._run_yarn_command(["webpack", "--bail"], env=env)
def _write_version_file(self, version_info):
manifest = {
"createdAt": datetime.datetime.utcnow().isoformat() + "Z",
"version": version_info["version"],
"build": version_info["build"],
}
with open(self.get_asset_json_path(), "w") as fp:
json.dump(manifest, fp)
return manifest
@property
def sentry_static_dist_path(self):
return os.path.abspath(os.path.join(self.build_lib, "sentry/static/sentry/dist"))
def get_asset_json_path(self):
return os.path.abspath(os.path.join(self.build_lib, self.asset_json_path))
| 32.445946
| 96
| 0.591628
|
from __future__ import absolute_import
import json
import datetime
import os
import os.path
import sys
import traceback
from distutils import log
from .base import BaseBuildCommand
class BuildAssetsCommand(BaseBuildCommand):
user_options = BaseBuildCommand.user_options + [
(
"asset-json-path=",
None,
"Relative path for JSON manifest. Defaults to {dist_name}/assets.json",
),
(
"inplace",
"i",
"ignore build-lib and put compiled javascript files into the source "
+ "directory alongside your pure Python modules",
),
(
"force",
"f",
"Force rebuilding of static content. Defaults to rebuilding on version "
"change detection.",
),
]
description = "build static media assets"
def initialize_options(self):
self.asset_json_path = u"{}/assets.json".format(self.distribution.get_name())
BaseBuildCommand.initialize_options(self)
def get_dist_paths(self):
return ["src/sentry/static/sentry/dist"]
def get_manifest_additions(self):
return ("src/" + self.asset_json_path,)
def _get_package_version(self):
pkg_path = os.path.join(self.work_path, "src")
sys.path.insert(0, pkg_path)
try:
import sentry
except Exception:
version = None
build = None
else:
log.info(u"pulled version information from 'sentry' module".format(sentry.__file__))
version = self.distribution.get_version()
build = sentry.__build__
finally:
sys.path.pop(0)
if not (version and build):
json_path = self.get_asset_json_path()
try:
with open(json_path) as fp:
data = json.loads(fp.read())
except Exception:
pass
else:
log.info(u"pulled version information from '{}'".format(json_path))
version, build = data["version"], data["build"]
return {"version": version, "build": build}
def _needs_static(self, version_info):
json_path = self.get_asset_json_path()
if not os.path.exists(json_path):
return True
with open(json_path) as fp:
data = json.load(fp)
if data.get("version") != version_info.get("version"):
return True
if data.get("build") != version_info.get("build"):
return True
return False
def _needs_built(self):
if BaseBuildCommand._needs_built(self):
return True
version_info = self._get_package_version()
return self._needs_static(version_info)
def _build(self):
version_info = self._get_package_version()
log.info(
u"building assets for {} v{} (build {})".format(
self.distribution.get_name(),
version_info["version"] or "UNKNOWN",
version_info["build"] or "UNKNOWN",
)
)
if not version_info["version"] or not version_info["build"]:
log.fatal("Could not determine sentry version or build")
sys.exit(1)
try:
self._build_static()
except Exception:
traceback.print_exc()
log.fatal("unable to build Sentry's static assets!")
sys.exit(1)
log.info("writing version manifest")
manifest = self._write_version_file(version_info)
log.info(u"recorded manifest\n{}".format(json.dumps(manifest, indent=2)))
def _build_static(self):
# By setting NODE_ENV=production, a few things happen
# * React optimizes out certain code paths
# * Webpack will add version strings to built/referenced assets
env = dict(os.environ)
env["SENTRY_STATIC_DIST_PATH"] = self.sentry_static_dist_path
env["NODE_ENV"] = "production"
self._run_yarn_command(["webpack", "--bail"], env=env)
def _write_version_file(self, version_info):
manifest = {
"createdAt": datetime.datetime.utcnow().isoformat() + "Z",
"version": version_info["version"],
"build": version_info["build"],
}
with open(self.get_asset_json_path(), "w") as fp:
json.dump(manifest, fp)
return manifest
@property
def sentry_static_dist_path(self):
return os.path.abspath(os.path.join(self.build_lib, "sentry/static/sentry/dist"))
def get_asset_json_path(self):
return os.path.abspath(os.path.join(self.build_lib, self.asset_json_path))
| true
| true
|
79084f131a2a1af3dcb9660c0d56edb7174ed380
| 910
|
py
|
Python
|
app/http/controllers/QuoteController.py
|
code-weather/capstone_backend_tailwindCSS
|
57c02c5301c14a5b18fbf062bba6934386df32af
|
[
"MIT"
] | null | null | null |
app/http/controllers/QuoteController.py
|
code-weather/capstone_backend_tailwindCSS
|
57c02c5301c14a5b18fbf062bba6934386df32af
|
[
"MIT"
] | null | null | null |
app/http/controllers/QuoteController.py
|
code-weather/capstone_backend_tailwindCSS
|
57c02c5301c14a5b18fbf062bba6934386df32af
|
[
"MIT"
] | null | null | null |
""" A QuoteController Module """
from masonite.controllers import Controller
from masonite.request import Request
from app.Quote import Quote
class QuoteController(Controller):
def __init__(self, request: Request):
self.request = request
def show(self):
id = self.request.param("id")
return Quote.find(id)
def index(self):
return Quote.all()
def create(self):
subject = self.request.input("subject")
quote = Quote.create({"subject": subject})
return quote
def update(self):
subject = self.request.input("subject")
id = self.request.param("id")
Quote.where("id", id).update({"subject": subject})
return Quote.where("id", id).get()
def destroy(self):
id = self.request.param("id")
quote = Quote.where("id", id).get()
Quote.where("id", id).delete()
return quote
| 26.764706
| 58
| 0.613187
|
from masonite.controllers import Controller
from masonite.request import Request
from app.Quote import Quote
class QuoteController(Controller):
def __init__(self, request: Request):
self.request = request
def show(self):
id = self.request.param("id")
return Quote.find(id)
def index(self):
return Quote.all()
def create(self):
subject = self.request.input("subject")
quote = Quote.create({"subject": subject})
return quote
def update(self):
subject = self.request.input("subject")
id = self.request.param("id")
Quote.where("id", id).update({"subject": subject})
return Quote.where("id", id).get()
def destroy(self):
id = self.request.param("id")
quote = Quote.where("id", id).get()
Quote.where("id", id).delete()
return quote
| true
| true
|
7908504b14297053b56a66d358d5e0e8e487d6ba
| 1,886
|
py
|
Python
|
sky/engine/build/scripts/make_element_factory.py
|
gitFreeByte/sky_engine
|
05c9048930f8a0d39c2f6385ba691eccbbdabb20
|
[
"BSD-3-Clause"
] | 1
|
2021-06-12T00:47:11.000Z
|
2021-06-12T00:47:11.000Z
|
sky/engine/build/scripts/make_element_factory.py
|
gitFreeByte/sky_engine
|
05c9048930f8a0d39c2f6385ba691eccbbdabb20
|
[
"BSD-3-Clause"
] | null | null | null |
sky/engine/build/scripts/make_element_factory.py
|
gitFreeByte/sky_engine
|
05c9048930f8a0d39c2f6385ba691eccbbdabb20
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from collections import defaultdict
import in_generator
import template_expander
import name_utilities
from make_qualified_names import MakeQualifiedNamesWriter
class MakeElementFactoryWriter(MakeQualifiedNamesWriter):
pass
if __name__ == "__main__":
in_generator.Maker(MakeElementFactoryWriter).main(sys.argv)
| 42.863636
| 72
| 0.792683
|
import sys
from collections import defaultdict
import in_generator
import template_expander
import name_utilities
from make_qualified_names import MakeQualifiedNamesWriter
class MakeElementFactoryWriter(MakeQualifiedNamesWriter):
pass
if __name__ == "__main__":
in_generator.Maker(MakeElementFactoryWriter).main(sys.argv)
| true
| true
|
790850e2acd33fd1d6014089093e4ebb5705db43
| 2,543
|
py
|
Python
|
test/functional/wallet_startup.py
|
ludirium/ludirium
|
d2c6d7855ed98c62b6c9431e695d9b1a791255a6
|
[
"MIT"
] | null | null | null |
test/functional/wallet_startup.py
|
ludirium/ludirium
|
d2c6d7855ed98c62b6c9431e695d9b1a791255a6
|
[
"MIT"
] | null | null | null |
test/functional/wallet_startup.py
|
ludirium/ludirium
|
d2c6d7855ed98c62b6c9431e695d9b1a791255a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Ludirium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet load on startup.
Verify that a ludiriumd node can maintain list of wallets loading on startup
"""
from test_framework.test_framework import LudiriumTestFramework
from test_framework.util import (
assert_equal,
)
class WalletStartupTest(LudiriumTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_nodes(self):
self.add_nodes(self.num_nodes)
self.start_nodes()
def run_test(self):
self.log.info('Should start without any wallets')
assert_equal(self.nodes[0].listwallets(), [])
assert_equal(self.nodes[0].listwalletdir(), {'wallets': []})
self.log.info('New default wallet should load by default when there are no other wallets')
self.nodes[0].createwallet(wallet_name='', load_on_startup=False)
self.restart_node(0)
assert_equal(self.nodes[0].listwallets(), [''])
self.log.info('Test load on startup behavior')
self.nodes[0].createwallet(wallet_name='w0', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w1', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w2', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w3', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w0', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w4', load_on_startup=True)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w1', 'w2', 'w3', 'w4')))
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w2', 'w4')))
self.nodes[0].unloadwallet(wallet_name='', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w3', load_on_startup=True)
self.nodes[0].loadwallet(filename='')
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3')))
if __name__ == '__main__':
WalletStartupTest().main()
| 43.101695
| 98
| 0.69013
|
from test_framework.test_framework import LudiriumTestFramework
from test_framework.util import (
assert_equal,
)
class WalletStartupTest(LudiriumTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_nodes(self):
self.add_nodes(self.num_nodes)
self.start_nodes()
def run_test(self):
self.log.info('Should start without any wallets')
assert_equal(self.nodes[0].listwallets(), [])
assert_equal(self.nodes[0].listwalletdir(), {'wallets': []})
self.log.info('New default wallet should load by default when there are no other wallets')
self.nodes[0].createwallet(wallet_name='', load_on_startup=False)
self.restart_node(0)
assert_equal(self.nodes[0].listwallets(), [''])
self.log.info('Test load on startup behavior')
self.nodes[0].createwallet(wallet_name='w0', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w1', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w2', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w3', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w0', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w4', load_on_startup=True)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w1', 'w2', 'w3', 'w4')))
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w2', 'w4')))
self.nodes[0].unloadwallet(wallet_name='', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w3', load_on_startup=True)
self.nodes[0].loadwallet(filename='')
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3')))
if __name__ == '__main__':
WalletStartupTest().main()
| true
| true
|
79085161c84a2e62516e76faf0cf380d40a1d3b6
| 256
|
py
|
Python
|
claims/admin.py
|
joinrpg/joinrpg
|
90d00edcdc7035d31c9ffc7598158bac61e65dbd
|
[
"Apache-2.0"
] | 2
|
2016-11-02T11:15:42.000Z
|
2017-10-03T08:28:18.000Z
|
claims/admin.py
|
joinrpg/joinrpg-unused
|
90d00edcdc7035d31c9ffc7598158bac61e65dbd
|
[
"Apache-2.0"
] | 33
|
2016-03-22T16:11:37.000Z
|
2016-03-22T16:12:17.000Z
|
claims/admin.py
|
joinrpg/joinrpg
|
90d00edcdc7035d31c9ffc7598158bac61e65dbd
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from claims import models
# Register your models here.
admin.site.register(models.AddressCountry)
admin.site.register(models.AddressRegion)
admin.site.register(models.AddressCity)
admin.site.register(models.ProjectStatus)
| 28.444444
| 42
| 0.839844
|
from django.contrib import admin
from claims import models
admin.site.register(models.AddressCountry)
admin.site.register(models.AddressRegion)
admin.site.register(models.AddressCity)
admin.site.register(models.ProjectStatus)
| true
| true
|
790852d587fd06e0938160c8b2f6cd3d9398de78
| 576
|
py
|
Python
|
scripts/secex_monthly/_pci_wld_eci.py
|
DataViva/dataviva-scripts
|
1e36f11e2849c33b8118cefe1755d312b19c0ecd
|
[
"MIT"
] | 10
|
2015-05-20T14:41:23.000Z
|
2020-05-27T22:36:19.000Z
|
scripts/secex_monthly/_pci_wld_eci.py
|
DataViva/dataviva-scripts
|
1e36f11e2849c33b8118cefe1755d312b19c0ecd
|
[
"MIT"
] | 11
|
2018-05-17T14:30:58.000Z
|
2018-09-06T21:20:34.000Z
|
scripts/secex_monthly/_pci_wld_eci.py
|
DataViva/dataviva-scripts
|
1e36f11e2849c33b8118cefe1755d312b19c0ecd
|
[
"MIT"
] | 12
|
2015-07-14T13:46:41.000Z
|
2019-09-20T00:47:10.000Z
|
import sys
import pandas as pd
def pci_wld_eci(eci_file_path, pci_file_path, ymp, ymw, year):
pcis = pd.read_csv(pci_file_path, sep="\t", compression="bz2", converters={"hs_id": str})
pcis["year"] = int(year)
pcis["month"] = "00"
pcis = pcis.set_index(["year", "month", "hs_id"])
ecis = pd.read_csv(eci_file_path, sep="\t", compression="bz2")
ecis["year"] = int(year)
ecis["month"] = "00"
ecis = ecis.set_index(["year", "month", "wld_id"])
ymp["pci"] = pcis["pci"]
ymw["eci"] = ecis["eci"]
return [ymp, ymw]
| 28.8
| 93
| 0.583333
|
import sys
import pandas as pd
def pci_wld_eci(eci_file_path, pci_file_path, ymp, ymw, year):
pcis = pd.read_csv(pci_file_path, sep="\t", compression="bz2", converters={"hs_id": str})
pcis["year"] = int(year)
pcis["month"] = "00"
pcis = pcis.set_index(["year", "month", "hs_id"])
ecis = pd.read_csv(eci_file_path, sep="\t", compression="bz2")
ecis["year"] = int(year)
ecis["month"] = "00"
ecis = ecis.set_index(["year", "month", "wld_id"])
ymp["pci"] = pcis["pci"]
ymw["eci"] = ecis["eci"]
return [ymp, ymw]
| true
| true
|
7908534f4477b84eacc349451f3f5f5efa3271cb
| 2,603
|
py
|
Python
|
django_splitdate/tests/runtests.py
|
Mactory/django-splitdate
|
58a48f048a5deeb2fd7b46f9c403bb6c2f34309a
|
[
"MIT"
] | 2
|
2015-02-19T16:26:05.000Z
|
2016-07-18T23:20:35.000Z
|
django_splitdate/tests/runtests.py
|
Mactory/django-splitdate
|
58a48f048a5deeb2fd7b46f9c403bb6c2f34309a
|
[
"MIT"
] | 1
|
2018-03-04T20:56:56.000Z
|
2018-03-04T20:56:56.000Z
|
django_splitdate/tests/runtests.py
|
Mactory/django-splitdate
|
58a48f048a5deeb2fd7b46f9c403bb6c2f34309a
|
[
"MIT"
] | 1
|
2018-03-04T20:54:39.000Z
|
2018-03-04T20:54:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
__author__ = 'Tim Schneider <tim.schneider@northbridge-development.de>'
__copyright__ = "Copyright 2015, Northbridge Development Konrad & Schneider GbR"
__credits__ = ["Tim Schneider", ]
__maintainer__ = "Tim Schneider"
__email__ = "mail@northbridge-development.de"
__status__ = "Development"
logger = logging.getLogger(__name__)
import glob
import os
import sys
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
print BASE_DIR
sys.path.insert(0, os.path.abspath(BASE_DIR))
try:
import coverage # Import coverage if available
cov = coverage.coverage(
cover_pylib=False,
config_file=os.path.join(os.path.dirname(__file__), 'coverage.conf'),
include='%s/*' % BASE_DIR,
)
cov.start()
sys.stdout.write('Using coverage\n')
except ImportError:
cov = None
sys.stdout.write('Coverage not available. To evaluate the coverage, please install coverage.\n')
import django
from django.conf import settings
from django.core.management import execute_from_command_line
# Unfortunately, apps can not be installed via ``modify_settings``
# decorator, because it would miss the database setup.
INSTALLED_APPS = (
'django_splitdate',
)
settings.configure(
SECRET_KEY="django_tests_secret_key",
DEBUG=False,
TEMPLATE_DEBUG=False,
ALLOWED_HOSTS=[],
INSTALLED_APPS=INSTALLED_APPS,
MIDDLEWARE_CLASSES=[],
ROOT_URLCONF='tests.urls',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
LANGUAGE_CODE='en-us',
TIME_ZONE='UTC',
USE_I18N=True,
USE_L10N=True,
USE_TZ=True,
STATIC_URL='/static/',
# Use a fast hasher to speed up tests.
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
FIXTURE_DIRS=glob.glob(BASE_DIR + '/' + '*/fixtures/')
)
django.setup()
args = [sys.argv[0], 'test']
# Current module (``tests``) and its submodules.
test_cases = '.'
# Allow accessing test options from the command line.
offset = 1
try:
sys.argv[1]
except IndexError:
pass
else:
option = sys.argv[1].startswith('-')
if not option:
test_cases = sys.argv[1]
offset = 2
args.append(test_cases)
# ``verbosity`` can be overwritten from command line.
#args.append('--verbosity=2')
args.extend(sys.argv[offset:])
execute_from_command_line(args)
if cov is not None:
sys.stdout.write('Evaluating Coverage\n')
cov.stop()
cov.save()
sys.stdout.write('Generating HTML Report\n')
cov.html_report()
| 25.271845
| 100
| 0.688436
|
import logging
__author__ = 'Tim Schneider <tim.schneider@northbridge-development.de>'
__copyright__ = "Copyright 2015, Northbridge Development Konrad & Schneider GbR"
__credits__ = ["Tim Schneider", ]
__maintainer__ = "Tim Schneider"
__email__ = "mail@northbridge-development.de"
__status__ = "Development"
logger = logging.getLogger(__name__)
import glob
import os
import sys
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
print BASE_DIR
sys.path.insert(0, os.path.abspath(BASE_DIR))
try:
import coverage
cov = coverage.coverage(
cover_pylib=False,
config_file=os.path.join(os.path.dirname(__file__), 'coverage.conf'),
include='%s/*' % BASE_DIR,
)
cov.start()
sys.stdout.write('Using coverage\n')
except ImportError:
cov = None
sys.stdout.write('Coverage not available. To evaluate the coverage, please install coverage.\n')
import django
from django.conf import settings
from django.core.management import execute_from_command_line
INSTALLED_APPS = (
'django_splitdate',
)
settings.configure(
SECRET_KEY="django_tests_secret_key",
DEBUG=False,
TEMPLATE_DEBUG=False,
ALLOWED_HOSTS=[],
INSTALLED_APPS=INSTALLED_APPS,
MIDDLEWARE_CLASSES=[],
ROOT_URLCONF='tests.urls',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
LANGUAGE_CODE='en-us',
TIME_ZONE='UTC',
USE_I18N=True,
USE_L10N=True,
USE_TZ=True,
STATIC_URL='/static/',
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
FIXTURE_DIRS=glob.glob(BASE_DIR + '/' + '*/fixtures/')
)
django.setup()
args = [sys.argv[0], 'test']
test_cases = '.'
offset = 1
try:
sys.argv[1]
except IndexError:
pass
else:
option = sys.argv[1].startswith('-')
if not option:
test_cases = sys.argv[1]
offset = 2
args.append(test_cases)
args.extend(sys.argv[offset:])
execute_from_command_line(args)
if cov is not None:
sys.stdout.write('Evaluating Coverage\n')
cov.stop()
cov.save()
sys.stdout.write('Generating HTML Report\n')
cov.html_report()
| false
| true
|
79085417c55bd1998bdacaf37aa3f5a99211e440
| 4,902
|
py
|
Python
|
site-packages/celery/tests/utils/test_functional.py
|
suntao789/Aclsm
|
2202201c8279391386a4569e69f93d90eca5b96a
|
[
"Apache-2.0"
] | null | null | null |
site-packages/celery/tests/utils/test_functional.py
|
suntao789/Aclsm
|
2202201c8279391386a4569e69f93d90eca5b96a
|
[
"Apache-2.0"
] | null | null | null |
site-packages/celery/tests/utils/test_functional.py
|
suntao789/Aclsm
|
2202201c8279391386a4569e69f93d90eca5b96a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import pickle
from kombu.utils.functional import lazy
from celery.five import THREAD_TIMEOUT_MAX, items, range, nextfun
from celery.utils.functional import (
LRUCache,
firstmethod,
first,
mlazy,
padlist,
maybe_list,
)
from celery.tests.case import Case
class test_LRUCache(Case):
def test_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x[i] = i
self.assertListEqual(list(x.keys()), list(slots[limit:]))
self.assertTrue(x.items())
self.assertTrue(x.values())
def test_is_pickleable(self):
x = LRUCache(limit=10)
x.update(luke=1, leia=2)
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.limit, y.limit)
self.assertEqual(y, x)
def test_update_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x.update({i: i})
self.assertListEqual(list(x.keys()), list(slots[limit:]))
def test_least_recently_used(self):
x = LRUCache(3)
x[1], x[2], x[3] = 1, 2, 3
self.assertEqual(list(x.keys()), [1, 2, 3])
x[4], x[5] = 4, 5
self.assertEqual(list(x.keys()), [3, 4, 5])
# access 3, which makes it the last used key.
x[3]
x[6] = 6
self.assertEqual(list(x.keys()), [5, 3, 6])
x[7] = 7
self.assertEqual(list(x.keys()), [3, 6, 7])
def assertSafeIter(self, method, interval=0.01, size=10000):
from threading import Thread, Event
from time import sleep
x = LRUCache(size)
x.update(zip(range(size), range(size)))
class Burglar(Thread):
def __init__(self, cache):
self.cache = cache
self.__is_shutdown = Event()
self.__is_stopped = Event()
Thread.__init__(self)
def run(self):
while not self.__is_shutdown.isSet():
try:
self.cache.data.popitem(last=False)
except KeyError:
break
self.__is_stopped.set()
def stop(self):
self.__is_shutdown.set()
self.__is_stopped.wait()
self.join(THREAD_TIMEOUT_MAX)
burglar = Burglar(x)
burglar.start()
try:
for _ in getattr(x, method)():
sleep(0.0001)
finally:
burglar.stop()
def test_safe_to_remove_while_iteritems(self):
self.assertSafeIter('iteritems')
def test_safe_to_remove_while_keys(self):
self.assertSafeIter('keys')
def test_safe_to_remove_while_itervalues(self):
self.assertSafeIter('itervalues')
def test_items(self):
c = LRUCache()
c.update(a=1, b=2, c=3)
self.assertTrue(list(items(c)))
class test_utils(Case):
def test_padlist(self):
self.assertListEqual(
padlist(['George', 'Costanza', 'NYC'], 3),
['George', 'Costanza', 'NYC'],
)
self.assertListEqual(
padlist(['George', 'Costanza'], 3),
['George', 'Costanza', None],
)
self.assertListEqual(
padlist(['George', 'Costanza', 'NYC'], 4, default='Earth'),
['George', 'Costanza', 'NYC', 'Earth'],
)
def test_firstmethod_AttributeError(self):
self.assertIsNone(firstmethod('foo')([object()]))
def test_firstmethod_handles_lazy(self):
class A(object):
def __init__(self, value=None):
self.value = value
def m(self):
return self.value
self.assertEqual('four', firstmethod('m')([
A(), A(), A(), A('four'), A('five')]))
self.assertEqual('four', firstmethod('m')([
A(), A(), A(), lazy(lambda: A('four')), A('five')]))
def test_first(self):
iterations = [0]
def predicate(value):
iterations[0] += 1
if value == 5:
return True
return False
self.assertEqual(5, first(predicate, range(10)))
self.assertEqual(iterations[0], 6)
iterations[0] = 0
self.assertIsNone(first(predicate, range(10, 20)))
self.assertEqual(iterations[0], 10)
def test_maybe_list(self):
self.assertEqual(maybe_list(1), [1])
self.assertEqual(maybe_list([1]), [1])
self.assertIsNone(maybe_list(None))
class test_mlazy(Case):
def test_is_memoized(self):
it = iter(range(20, 30))
p = mlazy(nextfun(it))
self.assertEqual(p(), 20)
self.assertTrue(p.evaluated)
self.assertEqual(p(), 20)
self.assertEqual(repr(p), '20')
| 27.082873
| 71
| 0.5459
|
from __future__ import absolute_import
import pickle
from kombu.utils.functional import lazy
from celery.five import THREAD_TIMEOUT_MAX, items, range, nextfun
from celery.utils.functional import (
LRUCache,
firstmethod,
first,
mlazy,
padlist,
maybe_list,
)
from celery.tests.case import Case
class test_LRUCache(Case):
def test_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x[i] = i
self.assertListEqual(list(x.keys()), list(slots[limit:]))
self.assertTrue(x.items())
self.assertTrue(x.values())
def test_is_pickleable(self):
x = LRUCache(limit=10)
x.update(luke=1, leia=2)
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.limit, y.limit)
self.assertEqual(y, x)
def test_update_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x.update({i: i})
self.assertListEqual(list(x.keys()), list(slots[limit:]))
def test_least_recently_used(self):
x = LRUCache(3)
x[1], x[2], x[3] = 1, 2, 3
self.assertEqual(list(x.keys()), [1, 2, 3])
x[4], x[5] = 4, 5
self.assertEqual(list(x.keys()), [3, 4, 5])
x[3]
x[6] = 6
self.assertEqual(list(x.keys()), [5, 3, 6])
x[7] = 7
self.assertEqual(list(x.keys()), [3, 6, 7])
def assertSafeIter(self, method, interval=0.01, size=10000):
from threading import Thread, Event
from time import sleep
x = LRUCache(size)
x.update(zip(range(size), range(size)))
class Burglar(Thread):
def __init__(self, cache):
self.cache = cache
self.__is_shutdown = Event()
self.__is_stopped = Event()
Thread.__init__(self)
def run(self):
while not self.__is_shutdown.isSet():
try:
self.cache.data.popitem(last=False)
except KeyError:
break
self.__is_stopped.set()
def stop(self):
self.__is_shutdown.set()
self.__is_stopped.wait()
self.join(THREAD_TIMEOUT_MAX)
burglar = Burglar(x)
burglar.start()
try:
for _ in getattr(x, method)():
sleep(0.0001)
finally:
burglar.stop()
def test_safe_to_remove_while_iteritems(self):
self.assertSafeIter('iteritems')
def test_safe_to_remove_while_keys(self):
self.assertSafeIter('keys')
def test_safe_to_remove_while_itervalues(self):
self.assertSafeIter('itervalues')
def test_items(self):
c = LRUCache()
c.update(a=1, b=2, c=3)
self.assertTrue(list(items(c)))
class test_utils(Case):
def test_padlist(self):
self.assertListEqual(
padlist(['George', 'Costanza', 'NYC'], 3),
['George', 'Costanza', 'NYC'],
)
self.assertListEqual(
padlist(['George', 'Costanza'], 3),
['George', 'Costanza', None],
)
self.assertListEqual(
padlist(['George', 'Costanza', 'NYC'], 4, default='Earth'),
['George', 'Costanza', 'NYC', 'Earth'],
)
def test_firstmethod_AttributeError(self):
self.assertIsNone(firstmethod('foo')([object()]))
def test_firstmethod_handles_lazy(self):
class A(object):
def __init__(self, value=None):
self.value = value
def m(self):
return self.value
self.assertEqual('four', firstmethod('m')([
A(), A(), A(), A('four'), A('five')]))
self.assertEqual('four', firstmethod('m')([
A(), A(), A(), lazy(lambda: A('four')), A('five')]))
def test_first(self):
iterations = [0]
def predicate(value):
iterations[0] += 1
if value == 5:
return True
return False
self.assertEqual(5, first(predicate, range(10)))
self.assertEqual(iterations[0], 6)
iterations[0] = 0
self.assertIsNone(first(predicate, range(10, 20)))
self.assertEqual(iterations[0], 10)
def test_maybe_list(self):
self.assertEqual(maybe_list(1), [1])
self.assertEqual(maybe_list([1]), [1])
self.assertIsNone(maybe_list(None))
class test_mlazy(Case):
def test_is_memoized(self):
it = iter(range(20, 30))
p = mlazy(nextfun(it))
self.assertEqual(p(), 20)
self.assertTrue(p.evaluated)
self.assertEqual(p(), 20)
self.assertEqual(repr(p), '20')
| true
| true
|
7908546a82ed99e9708152d48a1748c8b6c982c4
| 232
|
py
|
Python
|
Aula 1/aula2.py
|
Carlosouzavalle/Python
|
10bfedb99fec39f679d55e6729751595b6712627
|
[
"MIT"
] | null | null | null |
Aula 1/aula2.py
|
Carlosouzavalle/Python
|
10bfedb99fec39f679d55e6729751595b6712627
|
[
"MIT"
] | null | null | null |
Aula 1/aula2.py
|
Carlosouzavalle/Python
|
10bfedb99fec39f679d55e6729751595b6712627
|
[
"MIT"
] | null | null | null |
#corresponde ao video 6 do curso
# Primeiros passos
n = input('Digite algo: ')
print(n.isnumeric()) # se é numerico
print(n.isalpha()) # se é letra
print(n.isalnum()) # se é alpha numerico
print(n.isupper()) # ta em letra maiuscula
| 29
| 42
| 0.706897
|
n = input('Digite algo: ')
print(n.isnumeric())
print(n.isalpha())
print(n.isalnum())
print(n.isupper())
| true
| true
|
7908547a3f81245633ce604c81999a78a9c5f532
| 1,970
|
py
|
Python
|
tests/bundles/sqlalchemy/services/test_session_manager.py
|
achiang/flask-unchained
|
12788a6e618904a25ff2b571eb05ff1dc8f1840f
|
[
"MIT"
] | 69
|
2018-10-10T01:59:11.000Z
|
2022-03-29T17:29:30.000Z
|
tests/bundles/sqlalchemy/services/test_session_manager.py
|
achiang/flask-unchained
|
12788a6e618904a25ff2b571eb05ff1dc8f1840f
|
[
"MIT"
] | 18
|
2018-11-17T12:42:02.000Z
|
2021-05-22T18:45:27.000Z
|
tests/bundles/sqlalchemy/services/test_session_manager.py
|
achiang/flask-unchained
|
12788a6e618904a25ff2b571eb05ff1dc8f1840f
|
[
"MIT"
] | 7
|
2018-10-12T16:20:25.000Z
|
2021-10-06T12:18:21.000Z
|
from flask_unchained.bundles.sqlalchemy import SessionManager, SQLAlchemyUnchained
def setup(db: SQLAlchemyUnchained):
session_manager = SessionManager(db)
class Foo(db.Model):
class Meta:
lazy_mapped = False
name = db.Column(db.String)
db.create_all()
return Foo, session_manager
class TestSessionManager:
def test_save(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo = Foo(name='foo')
session_manager.save(foo)
# check it's added to the session but not committed
assert foo in db.session
with db.session.no_autoflush:
assert Foo.q.get_by(name='foo') is None
# check the commit kwarg works
session_manager.save(foo, commit=True)
assert Foo.q.get_by(name='foo') == foo
def test_save_all(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo1 = Foo(name='one')
foo2 = Foo(name='two')
foo3 = Foo(name='three')
all_ = [foo1, foo2, foo3]
session_manager.save_all(all_)
with db.session.no_autoflush:
for foo in all_:
assert foo in db.session
assert Foo.q.get_by(name=foo.name) is None
session_manager.save_all(all_, commit=True)
for foo in all_:
assert Foo.q.get_by(name=foo.name) == foo
def test_delete(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo1 = Foo(name='one')
foo2 = Foo(name='two')
all_ = [foo1, foo2]
session_manager.save_all(all_, commit=True)
for foo in all_:
assert foo in db.session
assert Foo.q.get_by(name=foo.name) == foo
session_manager.delete(foo1, commit=True)
assert foo1 not in db.session
assert Foo.q.get_by(name='one') is None
assert foo2 in db.session
assert Foo.q.get_by(name='two') == foo2
| 28.970588
| 82
| 0.613198
|
from flask_unchained.bundles.sqlalchemy import SessionManager, SQLAlchemyUnchained
def setup(db: SQLAlchemyUnchained):
session_manager = SessionManager(db)
class Foo(db.Model):
class Meta:
lazy_mapped = False
name = db.Column(db.String)
db.create_all()
return Foo, session_manager
class TestSessionManager:
def test_save(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo = Foo(name='foo')
session_manager.save(foo)
assert foo in db.session
with db.session.no_autoflush:
assert Foo.q.get_by(name='foo') is None
# check the commit kwarg works
session_manager.save(foo, commit=True)
assert Foo.q.get_by(name='foo') == foo
def test_save_all(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo1 = Foo(name='one')
foo2 = Foo(name='two')
foo3 = Foo(name='three')
all_ = [foo1, foo2, foo3]
session_manager.save_all(all_)
with db.session.no_autoflush:
for foo in all_:
assert foo in db.session
assert Foo.q.get_by(name=foo.name) is None
session_manager.save_all(all_, commit=True)
for foo in all_:
assert Foo.q.get_by(name=foo.name) == foo
def test_delete(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo1 = Foo(name='one')
foo2 = Foo(name='two')
all_ = [foo1, foo2]
session_manager.save_all(all_, commit=True)
for foo in all_:
assert foo in db.session
assert Foo.q.get_by(name=foo.name) == foo
session_manager.delete(foo1, commit=True)
assert foo1 not in db.session
assert Foo.q.get_by(name='one') is None
assert foo2 in db.session
assert Foo.q.get_by(name='two') == foo2
| true
| true
|
79085487abf137bfca6657ae5c230f19f6f1a642
| 3,605
|
py
|
Python
|
steam-scrapy/steam_scrape/middlewares.py
|
argwood/IndieP
|
938e50ae6c928d9b25732afb7dbfd6e2cd8f85d2
|
[
"MIT"
] | null | null | null |
steam-scrapy/steam_scrape/middlewares.py
|
argwood/IndieP
|
938e50ae6c928d9b25732afb7dbfd6e2cd8f85d2
|
[
"MIT"
] | null | null | null |
steam-scrapy/steam_scrape/middlewares.py
|
argwood/IndieP
|
938e50ae6c928d9b25732afb7dbfd6e2cd8f85d2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SteamScrapeSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SteamScrapeDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.663462
| 78
| 0.666852
|
from scrapy import signals
class SteamScrapeSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SteamScrapeDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
return None
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| true
| true
|
79085655a2b2255e6cc4ae45576545c97d3ac894
| 144,292
|
py
|
Python
|
Chuckysb.py
|
sahrukanja/fryant1
|
8bd2ed4b3d3bda930d063a16ad9600bcb8348d68
|
[
"MIT"
] | null | null | null |
Chuckysb.py
|
sahrukanja/fryant1
|
8bd2ed4b3d3bda930d063a16ad9600bcb8348d68
|
[
"MIT"
] | null | null | null |
Chuckysb.py
|
sahrukanja/fryant1
|
8bd2ed4b3d3bda930d063a16ad9600bcb8348d68
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#Chucky_Bot
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
nadya = LINETCR.LINE()
#nadya.login(qr=True)
nadya.login(token='Eq8HO0fhYMrll5V2r6v3.uFyCY3rEW6udwsHCnFj70W.KD1Mlw3UQ67PLM8N+4pVdjTi1joYo3zu7hlhQV6XWuo=')
nadya.loginResult()
print "Nadya-Login Success\n\n=====[Sukses Login]====="
reload(sys)
sys.setdefaultencoding('utf-8')
selfMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT S E L F ☜☆
╠═════════════════════════
╠➩〘Hi〙
╠➩〘Me〙
╠➩〘Mymid〙
╠➩〘Mid @〙
╠➩〘SearchID: (ID LINE)〙
╠➩〘Checkdate (DD/MM/YY)〙
╠➩〘Kalender〙
╠➩〘Steal contact〙
╠➩〘Pp @〙
╠➩〘Cover @〙
╠➩〘Auto like〙
╠➩〘Scbc Text〙
╠➩〘Cbc Text〙
╠➩〘Gbc Text〙
╠➩〘Getbio @〙
╠➩〘Getinfo @〙
╠➩〘Getname @〙
╠➩〘Getprofile @〙
╠➩〘Getcontact @〙
╠➩〘Getvid @〙
╠➩〘Friendlist〙
╠➩〘Micadd @〙
╠➩〘Micdel @〙
╠➩〘Miclist〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
botMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT B O T ☜☆
╠═════════════════════════
╠➩〘Absen〙
╠➩〘Respon〙
╠➩〘Runtime〙
╠➩〘Mycopy @〙
╠➩〘Copycontact〙
╠➩〘Mybackup〙
╠➩〘Mybio (Text)〙
╠➩〘Myname (Text)〙
╠➩〘@bye〙
╠➩〘Bot on/off〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
mediaMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT M E D I A ☜☆
╠═════════════════════════
╠➩〘Gift〙
╠➩〘Gift1 @ s/d Gift10 @〙
╠➩〘Giftbycontact〙
╠➩〘Gif gore〙
╠➩〘Google: (Text)〙
╠➩〘Playstore NamaApp〙
╠➩〘Fancytext: Text〙
╠➩〘/musik Judul-Penyanyi〙
╠➩〘/lirik Judul-Penyanyi〙
╠➩〘/musrik Judul-Penyanyi〙
╠➩〘/ig UrsnameInstagram〙
╠➩〘Checkig UrsnameInstagram〙
╠➩〘/apakah Text (Kerang Ajaib)〙
╠➩〘/kapan Text (Kerang Ajaib)〙
╠➩〘/hari Text (Kerang Ajaib)〙
╠➩〘/berapa Text (Kerang Ajaib)〙
╠➩〘/berapakah Text〙
╠➩〘Youtubelink: Judul Video〙
╠➩〘Youtubevideo: Judul Video〙
╠➩〘Youtubesearch: Judul Video〙
╠➩〘Image NamaGambar〙
╠➩〘Say-id Text〙
╠➩〘Say-en Text〙
╠➩〘Say-jp Text〙
╠➩〘Image NamaGambar〙
╠➩〘Tr-id Text (Translate En Ke ID〙
╠➩〘Tr-en Text (Translate ID Ke En〙
╠➩〘Tr-th Text (Translate ID Ke Th〙
╠➩〘Id@en Text (Translate ID Ke En〙
╠➩〘Id@th Text (Translate ID Ke TH〙
╠➩〘En@id Text (Translate En Ke ID〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
groupMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT G R O U P ☜☆
╠═════════════════════════
╠➩〘Welcome〙
╠➩〘Say welcome〙
╠➩〘Invite creator〙
╠➩〘Setview〙
╠➩〘Viewseen〙
╠➩〘Gn: (NamaGroup)〙
╠➩〘Tag all〙
╠➩〘Recover〙
╠➩〘Cancel〙
╠➩〘Cancelall〙
╠➩〘Gcreator〙
╠➩〘Ginfo〙
╠➩〘Gurl〙
╠➩〘List group〙
╠➩〘Pict group: (NamaGroup)〙
╠➩〘Spam: (Text)〙
╠➩〘Add all〙
╠➩〘Kick: (Mid)〙
╠➩〘Invite: (Mid)〙
╠➩〘Invite〙
╠➩〘Memlist〙
╠➩〘Getgroup image〙
╠➩〘Urlgroup Image〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
tjia="u9f09cfcb17d037e2936b751bd9d40ead"
setMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT S E T ☜☆
╠═════════════════════════
╠➩〘Sambutan on/off〙
╠➩〘Mimic on/off〙
╠➩〘Url on/off〙
╠➩〘Alwaysread on/off〙
╠➩〘Sider on/off〙
╠➩〘Contact on/off〙
╠➩〘Sticker on〙
╠➩〘Simisimi on/off〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
creatorMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT C R E A T O R ☜☆
╠═════════════════════════
╠➩〘Crash〙
╠➩〘Kickall〙
╠➩〘Bc: (Text)〙
╠➩〘Join group: (NamaGroup〙
╠➩〘Leave group: (NamaGroup〙
╠➩〘Leave all group〙
╠➩〘Tag on/off〙
╠➩〘Bot restart〙
╠➩〘Turn off〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
adminMessage ="""
╔═════════════════════════
║ ☆☞ A D M I N ☜☆
╠═════════════════════════
╠➩〘Allprotect on/off〙
╠➩〘Ban〙
╠➩〘Unban〙
╠➩〘Ban @〙
╠➩〘Unban @〙
╠➩〘Ban list〙
╠➩〘Clear ban〙
╠➩〘Kill〙
╠➩〘Kick @〙
╠➩〘Set member: (Jumblah)〙
╠➩〘Ban group: (NamaGroup〙
╠➩〘Del ban: (NamaGroup〙
╠➩〘List ban〙
╠➩〘Kill ban〙
╠➩〘Glist〙
╠➩〘Glistmid〙
╠➩〘Details group: (Gid)〙
╠➩〘Cancel invite: (Gid)〙
╠➩〘Invitemeto: (Gid)〙
╠➩〘Acc invite〙
╠➩〘Removechat〙
╠➩〘Qr on/off〙
╠➩〘Autokick on/off〙
╠➩〘Autocancel on/off〙
╠➩〘Invitepro on/off〙
╠➩〘Join on/off〙
╠➩〘Joincancel on/off〙
╠➩〘Respon1 on/off〙
╠➩〘Respon2 on/off〙
╠➩〘Respon3 on/off〙
╠➩〘Responkick on/off〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
helpMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT H E L P ☜☆
╠═════════════════════════
╠➩〘Help self〙
╠➩〘Help bot〙
╠➩〘Help group〙
╠➩〘Help set〙
╠➩〘Help media〙
╠➩〘Help admin〙
╠➩〘Help creator〙
╠➩〘Owner〙
╠➩〘Pap owner〙
╠➩〘Speed〙
╠➩〘Speed test〙
╠➩〘Status〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
KAC=[nadya]
mid = nadya.getProfile().mid
Bots=[mid]
Creator=["u51f61ccb745ec3a50359285c35f27bd3"]
admin=["u51f61ccb745ec3a50359285c35f27bd3"]
contact = nadya.getProfile()
backup1 = nadya.getProfile()
backup1.displayName = contact.displayName
backup1.statusMessage = contact.statusMessage
backup1.pictureStatus = contact.pictureStatus
responsename = nadya.getProfile().displayName
wait = {
"LeaveRoom":True,
"Bot":True,
"AutoJoin":False,
"AutoJoinCancel":False,
"memberscancel":30,
"Members":1,
"AutoCancel":False,
"AutoKick":False,
'pap':{},
'invite':{},
'steal':{},
'gift':{},
'copy':{},
'likeOn':{},
'detectMention':False,
'detectMention2':False,
'detectMention3':True,
'kickMention':False,
'sticker':False,
'timeline':True,
"Timeline":True,
"comment":"Bot Auto Like ©By : Nadya\nContact Me : 👉 line.me/ti/p/~sepriche.",
"commentOn":True,
"commentBlack":{},
"message":"Thx For Add Me (^_^)\nInvite Me To Your Group ヘ(^_^)ヘ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Qr":False,
"Contact":False,
"Sambutan":True,
"inviteprotect":False,
"alwaysRead":False,
"Sider":{},
"Simi":{},
"lang":"JP",
"BlGroup":{}
}
settings = {
"simiSimi":{}
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
import urllib,request
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendAudio(self, to_, path):
M = Message()
M.text = None
M.to = to_
M.contentMetadata = None
M.contentPreview = None
M.contentType = 3
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except:
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendAudioWithURL(self, to_, url):
path = self.downloadFileWithURL(url)
try:
self.sendAudio(to_, path)
except Exception as e:
raise Exception(e)
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True, verify=False)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def downloadFileWithURL(self, fileUrl):
saveAs = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = self.get_content(fileUrl)
if r.status_code == 200:
with open(saveAs, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return saveAs
else:
raise Exception('Download file failure.')
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
nadya.sendMessage(msg)
except Exception as error:
print error
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
nadya.findAndAddContactsByMid(op.param1)
if(wait["message"]in[""," ","\n",None]):
pass
else:
nadya.sendText(op.param1,str(wait["message"]))
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = nadya.getContact(op.param2).displayName
# Name = summon(op.param2)
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
nadya.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nNgintip Aja Niih. . .\nChat Kek Idiih (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
nadya.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nBetah Banget Jadi Penonton. . .\nChat Napa (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
nadya.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nNgapain Kak Ngintip Aja???\nSini Gabung Chat... ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 22:
nadya.leaveRoom(op.param1)
if op.type == 21:
nadya.leaveRoom(op.param1)
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
nadya.acceptGroupInvitation(op.param1)
if mid in op.param3:
if wait["AutoJoinCancel"] == True:
G = nadya.getGroup(op.param1)
if len(G.members) <= wait["memberscancel"]:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"Maaf " + nadya.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami!")
nadya.leaveGroup(op.param1)
else:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
if mid in op.param3:
if wait["AutoJoin"] == True:
G = nadya.getGroup(op.param1)
if len(G.members) <= wait["Members"]:
nadya.rejectGroupInvitation(op.param1)
else:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
else:
if wait["AutoCancel"] == True:
if op.param3 in Bots:
pass
else:
nadya.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
nadya.cancelGroupInvitation(op.param1, [op.param3])
nadya.sendText(op.param1, "Blacklist Detected")
else:
pass
if op.type == 13:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
nadya.cancelGroupInvitation(op.param1,[op.param3])
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Creator:
if op.param3 in admin:
if op.param3 in Bots:
pass
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
if mid in op.param3:
if op.param2 in Creator:
if op.param2 in Bots:
pass
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Creator in op.param3:
if admin in op.param3:
if op.param2 in Bots:
pass
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param2 not in Bots:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
nadya.inviteIntoGroup(op.param1,[op.param3])
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 11:
if wait["Qr"] == True:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
nadya.kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in Creator:
return
ginfo = nadya.getGroup(op.param1)
contact = nadya.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendText(op.param1,"Hallo " + nadya.getContact(op.param2).displayName + "\nWelcome To ☞ " + str(ginfo.name) + " ☜" + "\nBudayakan Cek Note\nDan Semoga Betah Disini ^_^")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
nadya.sendMessage(c)
nadya.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "13269548",
"STKPKGID": "1329191",
"STKVER": "1" }
nadya.sendMessage(d)
print "MEMBER JOIN TO GROUP"
if op.type == 15:
if wait["Sambutan"] == True:
if op.param2 in Creator:
return
nadya.sendText(op.param1,"Good Bye " + nadya.getContact(op.param2).displayName + "\nSee You Next Time . . . (p′︵‵。) 🤗")
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "13269542",
"STKPKGID": "1329191",
"STKVER": "1" }
nadya.sendMessage(d)
print "MEMBER HAS LEFT THE GROUP"
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
nadya.sendText(msg.to,text)
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
nadya.sendText(msg.to,data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Aku Bilang Jangan Ngetag Lagi " + cName + "\nAku Kick Kamu! Sorry, Byee!!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
nadya.kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Sekali tag, berarti naksir aim😅",cName + " Follow ya id smuleku @Fryant_BSS1",cName + " Iya sayank, I love you too muacchhh😘","aih, org ganteng, ditag mulu🙄", cName + " kaka mau nikung aku yah??🙄","kalau mau didesahin\npm aja kak😂 " + cName, "kangen ya sayank??🙄 " + cName, "Follow id smule ku ya ka @Fryant_BSS1 " + cName + "😘😘😘", "Kaka mau nikung aku yah " + cName + "😰","orang ganteng " + cName + " pasti ditag mulu 😆"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention2"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["kenapa sayank,, kangen yah??","jangan tag kalau ga mau aku hamilin","jangan tag " + cName + " tuan muda lagi meeting"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "157",
"STKPKGID": "2",
"STKVER": "100" }
nadya.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention3"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Iya sayank " + cName + ", Syg kangen ya...aku lg kerja buat menata masa depan kita"]
balas1 = "Supaya aq dan kamu, bahagia selalu😘😘😘"
ret_ = random.choice(balas)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
nadya.sendText(msg.to,balas1)
nadya.sendImageWithURL(msg.to,image)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "11764508",
"STKPKGID": "6641",
"STKVER": "1" }
nadya.sendMessage(msg)
break
if op.type == 25:
msg = op.message
if msg.text in ["Bot on"]:
wait["Bot"] = True
nadya.sendText(msg.to,"Bot Sudah On Kembali.")
if op.type == 25:
if wait["Bot"] == True:
msg = op.message
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
filler = "『 Sticker Check 』\nSTKID : %s\nSTKPKGID : %s\nSTKVER : %s\n『 Link 』\nline://shop/detail/%s" % (stk_id,pkg_id,stk_ver,pkg_id)
nadya.sendText(msg.to, filler)
wait["sticker"] = False
else:
pass
if wait["alwaysRead"] == True:
if msg.toType == 0:
nadya.sendChatChecked(msg.from_,msg.id)
else:
nadya.sendChatChecked(msg.to,msg.id)
if msg.contentType == 16:
if wait['likeOn'] == True:
url = msg.contentMetadata["postEndUrl"]
nadya.like(url[25:58], url[66:], likeType=1005)
nadya.comment(url[25:58], url[66:], wait["comment"])
nadya.sendText(msg.to,"Like Success")
wait['likeOn'] = False
if msg.contentType == 13:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] not in admin:
if msg.contentMetadata["mid"] in wait["blacklist"]:
nadya.sendText(msg.to,"Sudah")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
nadya.sendText(msg.to,"Ditambahkan")
else:
nadya.sendText(msg.to,"Admin Detected~")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
nadya.sendText(msg.to,"Terhapus")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
nadya.sendText(msg.to,"Tidak Ada Black List")
elif wait["Contact"] == True:
msg.contentType = 0
nadya.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = nadya.getContact(msg.contentMetadata["mid"])
try:
cu = nadya.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
nadya.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
else:
contact = nadya.getContact(msg.contentMetadata["mid"])
try:
cu = nadya.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
nadya.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = nadya.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
nadya.sendText(msg.to,"[Group name]\n" + str(ginfo.name) + "\n\n[Gid]\n" + msg.to + "\n\n[Group creator]\n" + gCreator + "\n\n[Profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "members\nPending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
nadya.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Can not be used outside the group")
else:
nadya.sendText(msg.to,"Not for use less than group")
elif msg.text is None:
return
elif msg.text in ["Creator","Owner"]:
msg.contentType = 13
msg.contentMetadata = {'mid': tjia}
nadya.sendMessage(msg)
nadya.sendText(msg.to,"Itu Majikan Kami (^_^)")
elif msg.text in ["Group creator","Gcreator","gcreator"]:
ginfo = nadya.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
nadya.sendMessage(msg)
nadya.sendText(msg.to,"Itu Yang Buat Grup Ini")
elif msg.contentType == 16:
if wait["Timeline"] == True:
msg.contentType = 0
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
nadya.sendText(msg.to,msg.text)
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
nadya.findAndAddContactsByMid(target)
contact = nadya.getContact(target)
cu = nadya.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
nadya.sendText(msg.to,"Profile Picture " + contact.displayName)
nadya.sendImageWithURL(msg.to,image)
nadya.sendText(msg.to,"Cover " + contact.displayName)
nadya.sendImageWithURL(msg.to,path)
wait["steal"] = False
break
except:
pass
if msg.contentType == 13:
if wait["gift"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Gift"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,"Gift Sudah Terkirim!")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
wait['gift'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["gift"] = False
break
if msg.contentType == 13:
if wait["copy"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Copy"
break
else:
targets.append(copy)
if targets == []:
nadya.sendText(msg.to, "Not Found...")
pass
else:
for target in targets:
try:
nadya.CloneContactProfile(target)
nadya.sendText(msg.to, "Copied (^_^)")
wait['copy'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["copy"] = False
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
nadya.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
nadya.findAndAddContactsByMid(target)
nadya.inviteIntoGroup(msg.to,[target])
nadya.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
nadya.sendText(msg.to,"Limit Invite")
wait['invite'] = False
break
elif msg.text in ["Key creator","help creator","Fryant 1"]:
nadya.sendText(msg.to,creatorMessage)
elif msg.text in ["Key group","help group","Fryant 2"]:
nadya.sendText(msg.to,groupMessage)
elif msg.text in ["Key","Fryant","Help"]:
nadya.sendText(msg.to,helpMessage)
elif msg.text in ["Key self","help self","Fryant 3"]:
nadya.sendText(msg.to,selfMessage)
elif msg.text in ["Key bot","help bot","Fryant 4"]:
nadya.sendText(msg.to,botMessage)
elif msg.text in ["Key set","help set","Fryant 5"]:
nadya.sendText(msg.to,setMessage)
elif msg.text in ["Key media","help media","Fryant 6"]:
nadya.sendText(msg.to,mediaMessage)
elif msg.text in ["Key admin","help admin","Fryant 7"]:
nadya.sendText(msg.to,adminMessage)
elif msg.text in ["Fryant group"]:
gid = nadya.getGroupIdsJoined()
h = ""
jml = 0
for i in gid:
gn = nadya.getGroup(i).name
h += "♦【%s】\n" % (gn)
jml += 1
nadya.sendText(msg.to,"=======[List Group]=======\n"+ h +"\nTotal Group: "+str(jml))
elif "Ban group: " in msg.text:
grp = msg.text.replace("Ban group: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
h = nadya.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
nadya.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
nadya.sendText(msg.to, "Khusus Nadya")
elif msg.text in ["List ban","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
nadya.sendText(msg.to,"Tidak Ada")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +nadya.getGroup(gid).name + "\n"
nadya.sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
nadya.sendText(msg.to, "Khusus Admin")
elif msg.text in ["Del ban: "]:
if msg.from_ in admin:
ng = msg.text.replace("Del ban: ","")
for gid in wait["BlGroup"]:
if nadya.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
nadya.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
nadya.sendText(msg.to, "Khusus Nadya")
elif "Join group: " in msg.text:
ng = msg.text.replace("Join group: ","")
gid = nadya.getGroupIdsJoined()
try:
if msg.from_ in Creator:
for i in gid:
h = nadya.getGroup(i).name
if h == ng:
nadya.inviteIntoGroup(i,[Creator])
nadya.sendText(msg.to,"Success Join To ["+ h +"] Group")
else:
pass
else:
nadya.sendText(msg.to,"Khusus Nadya")
except Exception as e:
nadya.sendText(msg.to, str(e))
elif "Leave group: " in msg.text:
ng = msg.text.replace("Leave group: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = nadya.getGroup(i).name
if h == ng:
nadya.sendText(i,"Bot Di Paksa Keluar Oleh Owner!")
nadya.leaveGroup(i)
nadya.sendText(msg.to,"Success Left ["+ h +"] group")
else:
pass
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Leave all group" == msg.text:
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
nadya.sendText(i,"Bot Di Paksa Keluar Oleh Owner!")
nadya.leaveGroup(i)
nadya.sendText(msg.to,"Success Leave All Group")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Pict group: " in msg.text:
saya = msg.text.replace('Pict group: ','')
gid = nadya.getGroupIdsJoined()
for i in gid:
h = nadya.getGroup(i).name
gna = nadya.getGroup(i)
if h == saya:
nadya.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["cancelall","Cancelall"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
nadya.cancelGroupInvitation(msg.to, gInviMids)
else:
nadya.sendText(msg.to,"Tidak Ada Yang Pending")
else:
nadya.sendText(msg.to,"Tidak Bisa Digunakan Diluar Group")
elif msg.text in ["Ourl","Url on"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.preventJoinByTicket = False
nadya.updateGroup(X)
nadya.sendText(msg.to,"Url Sudah Aktif")
else:
nadya.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Curl","Url off"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.preventJoinByTicket = True
nadya.updateGroup(X)
nadya.sendText(msg.to,"Url Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Join on","Autojoin on"]:
if msg.from_ in admin:
wait["AutoJoin"] = True
wait["AutoJoinCancel"] = False
nadya.sendText(msg.to,"Auto Join Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Join off","Autojoin off"]:
if msg.from_ in admin:
wait["AutoJoin"] = False
nadya.sendText(msg.to,"Auto Join Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Joincancel on","Autojoincancel on"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = True
wait["AutoJoin"] = False
nadya.sendText(msg.to,"Auto Join Cancel Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Joincancel off","Autojoincancel off"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = False
nadya.sendText(msg.to,"Auto Join Cancel Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon1 on"]:
if msg.from_ in admin:
wait["detectMention"] = True
wait["detectMention2"] = False
wait["detectMention3"] = False
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon1 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon1 off"]:
if msg.from_ in admin:
wait["detectMention"] = False
nadya.sendText(msg.to,"Auto Respon1 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon2 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = True
wait["detectMention3"] = False
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon2 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon2 off"]:
if msg.from_ in admin:
wait["detectMention2"] = False
nadya.sendText(msg.to,"Auto Respon2 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon3 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = True
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon3 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon3 off"]:
if msg.from_ in admin:
wait["detectMention3"] = False
nadya.sendText(msg.to,"Auto Respon3 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Responkick on"]:
if msg.from_ in admin:
wait["kickMention"] = True
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = False
nadya.sendText(msg.to,"Auto Respon Kick Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Responkick off"]:
if msg.from_ in admin:
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon Kick Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Autocancel on"]:
if msg.from_ in admin:
wait["AutoCancel"] = True
nadya.sendText(msg.to,"Auto Cancel Sudah Aktif")
print wait["AutoCancel"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Autocancel off"]:
if msg.from_ in admin:
wait["AutoCancel"] = False
nadya.sendText(msg.to,"Auto Cancel Sudah Di Nonaktifkan")
print wait["AutoCancel"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Invitepro on"]:
if msg.from_ in admin:
wait["inviteprotect"] = True
nadya.sendText(msg.to,"Invite Protect Sudah Aktif")
print wait["inviteprotect"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Invitepro off"]:
if msg.from_ in admin:
wait["inviteprotect"] = False
nadya.sendText(msg.to,"Invite Protect Sudah Di Nonaktifkan")
print wait["inviteprotect"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Qr on" in msg.text:
if msg.from_ in admin:
wait["Qr"] = True
nadya.sendText(msg.to,"QR Protect Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Qr off" in msg.text:
if msg.from_ in admin:
wait["Qr"] = False
nadya.sendText(msg.to,"Qr Protect Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Autokick on" in msg.text:
if msg.from_ in admin:
wait["AutoKick"] = True
nadya.sendText(msg.to,"Auto Kick Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Autokick off" in msg.text:
if msg.from_ in admin:
wait["AutoKick"] = False
nadya.sendText(msg.to,"Auto Kick Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Allprotect on"]:
if msg.from_ in admin:
wait["AutoCancel"] = True
wait["inviteprotect"] = True
wait["AutoKick"] = True
wait["Qr"] = True
nadya.sendText(msg.to,"All Protect Sudah Aktif Semua")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Allprotect off"]:
if msg.from_ in admin:
wait["AutoCancel"] = False
wait["inviteprotect"] = False
wait["AutoKick"] = False
wait["Qr"] = False
nadya.sendText(msg.to,"All Protect Sudah Di Nonaktifkan Semua")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["K on","Contact on"]:
wait["Contact"] = True
nadya.sendText(msg.to,"Contact Sudah Aktif")
elif msg.text in ["K off","Contact off"]:
wait["Contact"] = False
nadya.sendText(msg.to,"Contact Sudah Di Nonaktifkan")
elif msg.text in ["Alwaysread on"]:
wait["alwaysRead"] = True
nadya.sendText(msg.to,"Always Read Sudah Aktif")
elif msg.text in ["Alwaysread off"]:
wait["alwaysRead"] = False
nadya.sendText(msg.to,"Always Read Sudah Di Nonaktifkan")
elif msg.text in ["Sambutan on"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sambutan Di Aktifkanヾ(*´∀`*)ノ")
else:
wait["Sambutan"] = True
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sudah Onヽ(´▽`)/")
elif msg.text in ["Sambutan off"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sambutan Di Nonaktifkan( ^∇^)")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sudah Off(p′︵‵。)")
elif "Sider on" in msg.text:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
nadya.sendText(msg.to,"Siap On Cek Sider")
elif "Sider off" in msg.text:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
nadya.sendText(msg.to, "Cek Sider Off")
else:
nadya.sendText(msg.to, "Heh Belom Di Set")
elif msg.text in ["Status"]:
md = ""
if wait["Sambutan"] == True: md+="╠➩✔️ Sambutan : On\n"
else:md+="╠➩❌ Sambutan : Off\n"
if wait["AutoJoin"] == True: md+="╠➩✔️ Auto Join : On\n"
else: md +="╠➩❌ Auto Join : Off\n"
if wait["AutoJoinCancel"] == True: md+="╠➩✔️ Auto Join Cancel : On\n"
else: md +="╠➩❌ Auto Join Cancel : Off\n"
if wait["Contact"] == True: md+="╠➩✔️ Info Contact : On\n"
else: md+="╠➩❌ Info Contact : Off\n"
if wait["AutoCancel"] == True:md+="╠➩✔️ Auto Cancel : On\n"
else: md+= "╠➩❌ Auto Cancel : Off\n"
if wait["inviteprotect"] == True:md+="╠➩✔️ Invite Protect : On\n"
else: md+= "╠➩❌ Invite Protect : Off\n"
if wait["Qr"] == True: md+="╠➩✔️ Qr Protect : On\n"
else:md+="╠➩❌ Qr Protect : Off\n"
if wait["AutoKick"] == True: md+="╠➩✔️ Auto Kick : On\n"
else:md+="╠➩❌ Auto Kick : Off\n"
if wait["alwaysRead"] == True: md+="╠➩✔️ Always Read : On\n"
else:md+="╠➩❌ Always Read: Off\n"
if wait["detectMention"] == True: md+="╠➩✔️ Auto Respon1 : On\n"
else:md+="╠➩❌ Auto Respon1 : Off\n"
if wait["detectMention2"] == True: md+="╠➩✔️ Auto Respon2 : On\n"
else:md+="╠➩❌ Auto Respon2 : Off\n"
if wait["detectMention3"] == True: md+="╠➩✔️ Auto Respon3 : On\n"
else:md+="╠➩❌ Auto Respon3 : Off\n"
if wait["kickMention"] == True: md+="╠➩✔️ Auto Respon Kick : On\n"
else:md+="╠➩❌ Auto Respon Kick : Off\n"
if wait["Sider"] == True: md+="╠➩✔️ Auto Sider : On\n"
else:md+="╠➩❌ Auto Sider: Off\n"
if wait["Simi"] == True: md+="╠➩✔️ Simisimi : On\n"
else:md+="╠➩❌ Simisimi: Off\n"
nadya.sendText(msg.to,"╔════════════════════\n""║ ☆☞ F R Y A N T S T A T U S ☜☆\n""╠════════════════════\n"+md+"╚════════════════════")
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
nadya.sendMessage(msg)
elif "Gift1 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift2 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '1360738'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift3 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift3 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '1395389'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift4 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift4 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1329191'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift5 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift5 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '9057'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift6 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift6 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '9167'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift7 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift7 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '7334'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift8 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift8 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift9 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift9 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1405277'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift10 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift10 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif msg.text.lower() in ["wkwkwk","wkwk","hahaha","haha"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '100',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["hehehe","hehe"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '10',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["galau"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '9',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["you","kau","kamu"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '7',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["marah","hadeuh","hadeh"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '6',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["please","pliss","mohon","tolong"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '4',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["haa","haaa","kaget"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '3',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["lucu","ngakak","lol"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '110',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["hmm","hmmm"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '101',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["tidur"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '1',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["gemes"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '2',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["cantik","imut"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '5',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["nyanyi","lalala"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '11',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["gugup"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '8',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["ok","oke","okay","oce","okee","sip","siph"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '13',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["mantab","mantap","nice","keren"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '14',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["ngejek"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '15',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["nangis","sedih"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '16',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["woi","kampret"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '102',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["huft"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '104',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif "tag all" == msg.text.lower():
group = nadya.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, len(nama)-1):
nm3 += [nama[k]]
summon(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, len(nama)-1):
nm4 += [nama[l]]
summon(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
nadya.sendMessage(cnt)
elif "tagall" == msg.text.lower():
group = nadya.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, len(nama)-1):
nm3 += [nama[k]]
summon(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, len(nama)-1):
nm4 += [nama[l]]
summon(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
nadya.sendMessage(cnt)
elif msg.text in ["Setview","Setpoint","Cctv"]:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
nadya.sendText(msg.to, "☆Checkpoint Checked☆")
print "Setview"
elif msg.text in ["Viewseen","Check","Ciduk","Cyduk"]:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = nadya.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "╔═════════════════════════\n║ ☆☞ LIST VIEWERS ☜☆\n╠═════════════════════════\n╠➩"
grp = '\n╠➩ '.join(str(f) for f in dataResult)
total = '\n╠═════════════════════════\n╠➩ Total %i Viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S')) + "\n╚═════════════════════════"
nadya.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
nadya.sendText(msg.to, "☆Auto Checkpoint☆")
else:
nadya.sendText(msg.to, "☆Belum Ada Viewers☆")
print "Viewseen"
elif "Kick " in msg.text:
if msg.from_ in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
nadya.kickoutFromGroup(msg.to,[mention['M']])
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["Members"] = int(jml)
nadya.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
elif "Add all" in msg.text:
thisgroup = nadya.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
nadya.findAndAddContactsByMids(mi_d)
nadya.sendText(msg.to,"Success Add all")
elif msg.text in ["Invite"]:
wait["invite"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Auto like"]:
wait["likeOn"] = True
nadya.sendText(msg.to,"Shere Post Kamu Yang Mau Di Like!")
elif msg.text in ["Steal contact"]:
wait["steal"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Giftbycontact"]:
wait["gift"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Copycontact"]:
wait["copy"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Sticker on"]:
wait["sticker"] = True
nadya.sendText(msg.to,"Sticker ID Detect Already On.")
elif msg.text in ["Bot off"]:
wait["Bot"] = False
nadya.sendText(msg.to,"Bot Sudah Di Nonaktifkan.")
elif "Recover" in msg.text:
thisgroup = nadya.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
nadya.createGroup("Recover", mi_d)
nadya.sendText(msg.to,"Success recover")
elif ("Gn: " in msg.text):
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
nadya.updateGroup(X)
else:
nadya.sendText(msg.to,"It can't be used besides the group.")
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
if midd not in admin:
nadya.kickoutFromGroup(msg.to,[midd])
else:
nadya.sendText(msg.to,"Admin Detected")
elif "Invite: " in msg.text:
midd = msg.text.replace("Invite: ","")
nadya.findAndAddContactsByMid(midd)
nadya.inviteIntoGroup(msg.to,[midd])
elif "Invite creator" in msg.text:
midd = "u14f64e139a3817afaabe27d237afb36b"
nadya.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Welcome","welcome","Welkam","welkam","Wc","wc"]:
gs = nadya.getGroup(msg.to)
nadya.sendText(msg.to,"Selamat Datang Di "+ gs.name)
msg.contentType = 7
msg.contentMetadata={'STKID': '247',
'STKPKGID': '3',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif "Bc: " in msg.text:
bc = msg.text.replace("Bc: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
nadya.sendText(i,"=======[BROADCAST]=======\n\n"+bc+"\n\nContact Me : line.me/ti/p/~nad_nad.")
nadya.sendText(msg.to,"Success BC BosQ")
else:
nadya.sendText(msg.to,"Khusus Admin")
elif msg.text in ["Cancel"]:
gid = nadya.getGroupIdsInvited()
for i in gid:
nadya.rejectGroupInvitation(i)
nadya.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = nadya.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
nadya.updateGroup(x)
gurl = nadya.reissueGroupTicket(msg.to)
nadya.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Can't be used outside the group")
else:
nadya.sendText(msg.to,"Not for use less than group")
elif msg.text in ["timeline"]:
try:
url = nadya.activity(limit=5)
nadya.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId'])
except Exception as E:
print E
elif msg.text in ["@bye","@Bye"]:
nadya.leaveGroup(msg.to)
elif msg.text in ["Absen"]:
nadya.sendText(msg.to,"Hadir!!")
elif msg.text.lower() in ["respon"]:
nadya.sendText(msg.to,responsename)
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
print("Speed")
elapsed_time = time.time() - start
nadya.sendText(msg.to, "Progress...")
nadya.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text in ["Speed test"]:
start = time.time()
nadya.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
nadya.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
nadya.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
nadya.sendText(msg.to,"send contact")
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "@Ban by mention"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendText(msg.to,"Succes BosQ")
except:
nadya.sendText(msg.to,"Error")
else:
nadya.sendText(msg.to,"Admin Detected~")
elif msg.text in ["Banlist","Ban list"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
nadya.sendText(msg.to,"Tidak Ada")
else:
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +nadya.getContact(mi_d).displayName + "\n"
nadya.sendText(msg.to,"===[Blacklist User]===\n"+mc)
elif "Unban @" in msg.text:
if msg.toType == 2:
print "@Unban by mention"
if msg.from_ in admin:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendText(msg.to,"Succes BosQ")
except:
nadya.sendText(msg.to,"Succes BosQ")
elif msg.text.lower() == 'clear ban':
if msg.from_ in admin:
wait["blacklist"] = {}
nadya.sendText(msg.to,"ヽ( ^ω^)ノ└ ❉Unbanned All Success❉ ┐")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = nadya.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
nadya.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
nadya.kickoutFromGroup(msg.to,[jj])
nadya.sendText(msg.to,"Blacklist emang pantas tuk di usir")
else:
nadya.sendText(msg.to, "Khusus creator")
elif msg.text in ["Kill"]:
if msg.toType == 2:
if msg.from_ in admin:
group = nadya.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
nadya.sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
nadya.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Kickall" == msg.text:
if msg.from_ in Creator:
if msg.toType == 2:
print "Kick all member"
_name = msg.text.replace("Kickall","")
gs = nadya.getGroup(msg.to)
nadya.sendText(msg.to,"Dadaaah~")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in admin:
try:
nadya.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except Exception as e:
nadya.sendText(msg.to,str(e))
nadya.inviteIntoGroup(msg.to, targets)
elif msg.text in ["Bot restart","Reboot"]:
if msg.from_ in Creator:
nadya.sendText(msg.to, "Bot Has Been Restarted...")
restart_program()
print "@Restart"
else:
nadya.sendText(msg.to, "No Access")
elif msg.text in ["Turn off"]:
if msg.from_ in Creator:
try:
import sys
sys.exit()
except:
pass
elif 'Crash' in msg.text:
if msg.from_ in Creator:
msg.contentType = 13
msg.contentMetadata = {'mid': "NADYA,'"}
nadya.sendMessage(msg)
elif "Mycopy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
nadya.CloneContactProfile(target)
nadya.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif msg.text in ["Mybackup"]:
try:
nadya.updateDisplayPicture(backup1.pictureStatus)
nadya.updateProfile(backup1)
nadya.sendText(msg.to, "Done (^_^)")
except Exception as e:
nadya.sendText(msg.to, str(e))
elif "/musik " in msg.text:
songname = msg.text.replace("/musik ","")
params = {"songname": songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
nadya.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4])
nadya.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
nadya.sendAudioWithURL(msg.to,abc)
nadya.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif '/lirik ' in msg.text.lower():
try:
songname = msg.text.lower().replace('/lirik ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
nadya.sendText(msg.to, hasil)
except Exception as wak:
nadya.sendText(msg.to, str(wak))
elif "/musrik " in msg.text:
songname = msg.text.replace("/musrik ","")
params = {"songname": songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
nadya.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
nadya.sendAudioWithURL(msg.to,abc)
nadya.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4] +"\n\n" + hasil)
nadya.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
nadya.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("cover @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Cover @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Cpp" in msg.text:
if msg.from_ in admin:
path = "nadya.jpg"
nadya.sendText(msg.to,"Update PP :")
nadya.sendImage(msg.to,path)
nadya.updateProfilePicture(path)
elif "pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("pp @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.getContact(target)
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Pp @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.getContact(target)
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif msg.text.lower() in ["pap owner","pap creator"]:
link = ["http://dl.profile.line-cdn.net/0hNPsZWL9WEX9OIz0lhyFuKHJmHxI5DRc3NkJaETwkRklqGwQoJkNbTGklHRo2G1B7cxFXH2NxSU03"]
pilih = random.choice(link)
nadya.sendImageWithURL(msg.to,pilih)
elif "Spam: " in msg.text:
bctxt = msg.text.replace("Spam: ", "")
t = 10
while(t):
nadya.sendText(msg.to, (bctxt))
t-=1
elif "Scbc " in msg.text:
bctxt = msg.text.replace("Scbc ", "")
orang = nadya.getAllContactIds()
t = 20
for manusia in orang:
while(t):
nadya.sendText(manusia, (bctxt))
t-=1
elif "Cbc " in msg.text:
broadcasttxt = msg.text.replace("Cbc ", "")
orang = nadya.getAllContactIds()
for manusia in orang:
nadya.sendText(manusia, (broadcasttxt))
elif '/ig ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("/ig ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html.parser')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
tj = text1[0].replace("s150x150/","")
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO ========\n"
details = "\n========INSTAGRAM INFO ========"
nadya.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
nadya.sendImageWithURL(msg.to, tj)
except Exception as njer:
nadya.sendText(msg.to, str(njer))
elif "Checkig " in msg.text:
separate = msg.text.split(" ")
user = msg.text.replace(separate[0] + " ","")
if user.startswith("@"):
user = user.replace("@","")
profile = "https://www.instagram.com/" + user
with requests.session() as x:
x.headers['user-agent'] = 'Mozilla/5.0'
end_cursor = ''
for count in range(1, 999):
print('PAGE: ', count)
r = x.get(profile, params={'max_id': end_cursor})
data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1)
j = json.loads(data)
for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']:
if node['is_video']:
page = 'https://www.instagram.com/p/' + node['code']
r = x.get(page)
url = re.search(r'"video_url": "([^"]+)"', r.text).group(1)
print(url)
nadya.sendVideoWithURL(msg.to,url)
else:
print (node['display_src'])
nadya.sendImageWithURL(msg.to,node['display_src'])
end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1)
elif 'Youtubelink: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
nadya.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
nadya.sendText(msg.to,"Could not find it")
elif 'Youtubevideo: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtubevideo: ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
nadya.sendVideoWithURL(msg.to,'https://www.youtube.com' + results['href'])
except:
nadya.sendText(msg.to, "Could not find it")
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say welcome" in msg.text:
gs = nadya.getGroup(msg.to)
say = msg.text.replace("Say welcome","Selamat Datang Di "+ gs.name)
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower() in ["hi","hai","halo","hallo"]:
beb = "Hi Sayang 😘 " +nadya.getContact(msg.from_).displayName + " starry heart"
nadya.sendText(msg.to,beb)
elif "playstore " in msg.text.lower():
tob = msg.text.lower().replace("playstore ","")
nadya.sendText(msg.to,"Sedang Mencari...")
nadya.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLink : https://play.google.com/store/search?q=" + tob)
nadya.sendText(msg.to,"Tuh Linknya Kak (^_^)")
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
nadya.sendText(msg.to, g.mid)
else:
pass
elif "Mybio " in msg.text:
string = msg.text.replace("Mybio ","")
if len(string.decode('utf-8')) <= 500:
profile = nadya.getProfile()
profile.statusMessage = string
nadya.updateProfile(profile)
nadya.sendText(msg.to,"Done")
elif "Myname " in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("Myname ","")
if len(string.decode('utf-8')) <= 5000:
profile = nadya.getProfile()
profile.displayName = string
nadya.updateProfile(profile)
nadya.sendText(msg.to,"Done")
elif msg.text.lower() in ["mymid","myid"]:
middd = "Name : " +nadya.getContact(msg.from_).displayName + "\nMid : " +msg.from_
nadya.sendText(msg.to,middd)
elif msg.text.lower() in ["me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
nadya.sendMessage(msg)
elif "/apakah " in msg.text:
apk = msg.text.replace("/apakah ","")
rnd = ["Ya","Tidak","Bisa Jadi","Mungkin"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/hari " in msg.text:
apk = msg.text.replace("/hari ","")
rnd = ["Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/berapa " in msg.text:
apk = msg.text.replace("/berapa ","")
rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/berapakah " in msg.text:
apk = msg.text.replace("/berapakah ","")
rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/kapan " in msg.text:
apk = msg.text.replace("/kapan ","")
rnd = ["kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi","Tidak Akan Pernah"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
wait["Simi"] = True
nadya.sendText(msg.to," Simisimi Di Aktifkan")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
wait["Simi"] = False
nadya.sendText(msg.to,"Simisimi Di Nonaktifkan")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
nadya.sendImageWithURL(msg.to,path)
except:
pass
elif "Youtubesearch: " in msg.text:
query = msg.text.replace("Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html.parser')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
nadya.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif msg.text in ["Friendlist"]:
contactlist = nadya.getAllContactIds()
kontak = nadya.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
nadya.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = nadya.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═�����═══════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
nadya.sendText(msg.to, msgs)
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = nadya.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getgroup image" in msg.text:
group = nadya.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
nadya.sendImageWithURL(msg.to,path)
elif "Urlgroup image" in msg.text:
group = nadya.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
nadya.sendText(msg.to,path)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
nadya.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
nadya.sendText(msg.to,"Profile Picture " + contact.displayName)
nadya.sendImageWithURL(msg.to,image)
nadya.sendText(msg.to,"Cover " + contact.displayName)
nadya.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = nadya.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
nadya.sendMessage(msg)
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
nadya.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot Sudah Berjalan Selama :\n"+waktu(eltime)
nadya.sendText(msg.to,van)
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
nadya.sendText(msg.to,"========== I N F O R M A S I ==========\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n========== I N F O R M A S I ==========")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
nadya.sendText(msg.to, rst)
elif "SearchID: " in msg.text:
userid = msg.text.replace("SearchID: ","")
contact = nadya.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
nadya.sendMessage(msg)
elif "Searchid: " in msg.text:
userid = msg.text.replace("Searchid: ","")
contact = nadya.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
nadya.sendMessage(msg)
elif "removechat" in msg.text.lower():
if msg.from_ in admin:
try:
nadya.removeAllMessages(op.param2)
print "[Command] Remove Chat"
nadya.sendText(msg.to,"Done")
except Exception as error:
print error
nadya.sendText(msg.to,"Error")
elif "Invitemeto: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Invitemeto: ","")
if gid == "":
nadya.sendText(msg.to,"Invalid group id")
else:
try:
nadya.findAndAddContactsByMid(msg.from_)
nadya.inviteIntoGroup(gid,[msg.from_])
except:
nadya.sendText(msg.to,"Mungkin Saya Tidak Di Dalaam Grup Itu")
elif msg.text in ["Glist"]:
nadya.sendText(msg.to, "Tunggu Sebentar. . .")
gid = nadya.getGroupIdsJoined()
h = ""
for i in gid:
h += "╠➩" + "%s\n" % (nadya.getGroup(i).name +" ~> ["+str(len(nadya.getGroup(i).members))+"]")
nadya.sendText(msg.to,"╔═════════════════════════\n║ ☆☞ LIST GROUPS☜☆\n╠═════════════════════════\n" + h + "╠═════════════════════════" + "\n║ Total Groups =" +" ["+str(len(gid))+"]\n╚═════════════════════════")
elif msg.text in ["Glistmid"]:
gruplist = nadya.getGroupIdsJoined()
kontak = nadya.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
nadya.sendText(msg.to, msgs)
elif "Google: " in msg.text:
a = msg.text.replace("Google: ","")
b = urllib.quote(a)
nadya.sendText(msg.to,"Sedang Mencari...")
nadya.sendText(msg.to, "https://www.google.com/" + b)
nadya.sendText(msg.to,"Itu Dia Linknya. . .")
elif "Details group: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Details group: ","")
if gid in [""," "]:
nadya.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = nadya.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
nadya.sendText(msg.to,h)
except Exception as error:
nadya.sendText(msg.to,(error))
elif "Cancel invite: " in msg.text:
if msg.from_ in admin:
gids = msg.text.replace("Cancel invite: ","")
gid = nadya.getGroup(gids)
for i in gid:
if i is not None:
try:
nadya.rejectGroupInvitation(i)
except:
nadya.sendText(msg.to,"Error!")
break
else:
break
if gid is not None:
nadya.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name)
else:
nadya.sendText(msg.to,"Grup tidak ditemukan")
elif msg.text in ["Acc invite"]:
if msg.from_ in admin:
gid = nadya.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = nadya.getGroup(i)
_list += gids.name
nadya.acceptGroupInvitation(i)
else:
break
if gid is not None:
nadya.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
nadya.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif "Gif gore" in msg.text:
gif = ("https://media.giphy.com/media/l2JHVsQiOZrNMGzYs/giphy.gif","https://media.giphy.com/media/OgltQ2hbilzJS/200w.gif")
gore = random.choice(gif)
nadya.sendGifWithURL(msg.to,gore)
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
nadya.sendText(msg.to,"Target ditambahkan!")
break
except:
nadya.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
nadya.sendText(msg.to,"Target dihapuskan!")
break
except:
nadya.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
nadya.sendText(msg.to,"Nothing")
else:
mc = "Target Mimic User:\n"
for mi_d in mimic["target"]:
mc += "?? "+nadya.getContact(mi_d).displayName + "\n"
nadya.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
nadya.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
nadya.sendText(msg.to,"Mimic change to target")
else:
nadya.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
nadya.sendText(msg.to,"Reply Message on")
else:
nadya.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
nadya.sendText(msg.to,"Reply Message off")
else:
nadya.sendText(msg.to,"Sudah off")
if op.type == 59:
print op
except Exception as error:
print error
while True:
try:
Ops = nadya.fetchOps(nadya.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(nadya.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
nadya.Poll.rev = max(nadya.Poll.rev, Op.revision)
bot(Op)
| 41.510932
| 453
| 0.419462
|
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
nadya = LINETCR.LINE()
nadya.login(token='Eq8HO0fhYMrll5V2r6v3.uFyCY3rEW6udwsHCnFj70W.KD1Mlw3UQ67PLM8N+4pVdjTi1joYo3zu7hlhQV6XWuo=')
nadya.loginResult()
print "Nadya-Login Success\n\n=====[Sukses Login]====="
reload(sys)
sys.setdefaultencoding('utf-8')
selfMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT S E L F ☜☆
╠═════════════════════════
╠➩〘Hi〙
╠➩〘Me〙
╠➩〘Mymid〙
╠➩〘Mid @〙
╠➩〘SearchID: (ID LINE)〙
╠➩〘Checkdate (DD/MM/YY)〙
╠➩〘Kalender〙
╠➩〘Steal contact〙
╠➩〘Pp @〙
╠➩〘Cover @〙
╠➩〘Auto like〙
╠➩〘Scbc Text〙
╠➩〘Cbc Text〙
╠➩〘Gbc Text〙
╠➩〘Getbio @〙
╠➩〘Getinfo @〙
╠➩〘Getname @〙
╠➩〘Getprofile @〙
╠➩〘Getcontact @〙
╠➩〘Getvid @〙
╠➩〘Friendlist〙
╠➩〘Micadd @〙
╠➩〘Micdel @〙
╠➩〘Miclist〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
botMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT B O T ☜☆
╠═════════════════════════
╠➩〘Absen〙
╠➩〘Respon〙
╠➩〘Runtime〙
╠➩〘Mycopy @〙
╠➩〘Copycontact〙
╠➩〘Mybackup〙
╠➩〘Mybio (Text)〙
╠➩〘Myname (Text)〙
╠➩〘@bye〙
╠➩〘Bot on/off〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
mediaMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT M E D I A ☜☆
╠═════════════════════════
╠➩〘Gift〙
╠➩〘Gift1 @ s/d Gift10 @〙
╠➩〘Giftbycontact〙
╠➩〘Gif gore〙
╠➩〘Google: (Text)〙
╠➩〘Playstore NamaApp〙
╠➩〘Fancytext: Text〙
╠➩〘/musik Judul-Penyanyi〙
╠➩〘/lirik Judul-Penyanyi〙
╠➩〘/musrik Judul-Penyanyi〙
╠➩〘/ig UrsnameInstagram〙
╠➩〘Checkig UrsnameInstagram〙
╠➩〘/apakah Text (Kerang Ajaib)〙
╠➩〘/kapan Text (Kerang Ajaib)〙
╠➩〘/hari Text (Kerang Ajaib)〙
╠➩〘/berapa Text (Kerang Ajaib)〙
╠➩〘/berapakah Text〙
╠➩〘Youtubelink: Judul Video〙
╠➩〘Youtubevideo: Judul Video〙
╠➩〘Youtubesearch: Judul Video〙
╠➩〘Image NamaGambar〙
╠➩〘Say-id Text〙
╠➩〘Say-en Text〙
╠➩〘Say-jp Text〙
╠➩〘Image NamaGambar〙
╠➩〘Tr-id Text (Translate En Ke ID〙
╠➩〘Tr-en Text (Translate ID Ke En〙
╠➩〘Tr-th Text (Translate ID Ke Th〙
╠➩〘Id@en Text (Translate ID Ke En〙
╠➩〘Id@th Text (Translate ID Ke TH〙
╠➩〘En@id Text (Translate En Ke ID〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
groupMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT G R O U P ☜☆
╠═════════════════════════
╠➩〘Welcome〙
╠➩〘Say welcome〙
╠➩〘Invite creator〙
╠➩〘Setview〙
╠➩〘Viewseen〙
╠➩〘Gn: (NamaGroup)〙
╠➩〘Tag all〙
╠➩〘Recover〙
╠➩〘Cancel〙
╠➩〘Cancelall〙
╠➩〘Gcreator〙
╠➩〘Ginfo〙
╠➩〘Gurl〙
╠➩〘List group〙
╠➩〘Pict group: (NamaGroup)〙
╠➩〘Spam: (Text)〙
╠➩〘Add all〙
╠➩〘Kick: (Mid)〙
╠➩〘Invite: (Mid)〙
╠➩〘Invite〙
╠➩〘Memlist〙
╠➩〘Getgroup image〙
╠➩〘Urlgroup Image〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
tjia="u9f09cfcb17d037e2936b751bd9d40ead"
setMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT S E T ☜☆
╠═════════════════════════
╠➩〘Sambutan on/off〙
╠➩〘Mimic on/off〙
╠➩〘Url on/off〙
╠➩〘Alwaysread on/off〙
╠➩〘Sider on/off〙
╠➩〘Contact on/off〙
╠➩〘Sticker on〙
╠➩〘Simisimi on/off〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
creatorMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT C R E A T O R ☜☆
╠═════════════════════════
╠➩〘Crash〙
╠➩〘Kickall〙
╠➩〘Bc: (Text)〙
╠➩〘Join group: (NamaGroup〙
╠➩〘Leave group: (NamaGroup〙
╠➩〘Leave all group〙
╠➩〘Tag on/off〙
╠➩〘Bot restart〙
╠➩〘Turn off〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
adminMessage ="""
╔═════════════════════════
║ ☆☞ A D M I N ☜☆
╠═════════════════════════
╠➩〘Allprotect on/off〙
╠➩〘Ban〙
╠➩〘Unban〙
╠➩〘Ban @〙
╠➩〘Unban @〙
╠➩〘Ban list〙
╠➩〘Clear ban〙
╠➩〘Kill〙
╠➩〘Kick @〙
╠➩〘Set member: (Jumblah)〙
╠➩〘Ban group: (NamaGroup〙
╠➩〘Del ban: (NamaGroup〙
╠➩〘List ban〙
╠➩〘Kill ban〙
╠➩〘Glist〙
╠➩〘Glistmid〙
╠➩〘Details group: (Gid)〙
╠➩〘Cancel invite: (Gid)〙
╠➩〘Invitemeto: (Gid)〙
╠➩〘Acc invite〙
╠➩〘Removechat〙
╠➩〘Qr on/off〙
╠➩〘Autokick on/off〙
╠➩〘Autocancel on/off〙
╠➩〘Invitepro on/off〙
╠➩〘Join on/off〙
╠➩〘Joincancel on/off〙
╠➩〘Respon1 on/off〙
╠➩〘Respon2 on/off〙
╠➩〘Respon3 on/off〙
╠➩〘Responkick on/off〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
helpMessage ="""
╔═════════════════════════
║ ☆☞ FRYANT H E L P ☜☆
╠═════════════════════════
╠➩〘Help self〙
╠➩〘Help bot〙
╠➩〘Help group〙
╠➩〘Help set〙
╠➩〘Help media〙
╠➩〘Help admin〙
╠➩〘Help creator〙
╠➩〘Owner〙
╠➩〘Pap owner〙
╠➩〘Speed〙
╠➩〘Speed test〙
╠➩〘Status〙
╠═════════════════════════
║ ༄ིৡ❍ᶜʰᵉ+Sepri࿐ৡ
SelfBot Versi 124V
╚═════════════════════════
"""
KAC=[nadya]
mid = nadya.getProfile().mid
Bots=[mid]
Creator=["u51f61ccb745ec3a50359285c35f27bd3"]
admin=["u51f61ccb745ec3a50359285c35f27bd3"]
contact = nadya.getProfile()
backup1 = nadya.getProfile()
backup1.displayName = contact.displayName
backup1.statusMessage = contact.statusMessage
backup1.pictureStatus = contact.pictureStatus
responsename = nadya.getProfile().displayName
wait = {
"LeaveRoom":True,
"Bot":True,
"AutoJoin":False,
"AutoJoinCancel":False,
"memberscancel":30,
"Members":1,
"AutoCancel":False,
"AutoKick":False,
'pap':{},
'invite':{},
'steal':{},
'gift':{},
'copy':{},
'likeOn':{},
'detectMention':False,
'detectMention2':False,
'detectMention3':True,
'kickMention':False,
'sticker':False,
'timeline':True,
"Timeline":True,
"comment":"Bot Auto Like ©By : Nadya\nContact Me : 👉 line.me/ti/p/~sepriche.",
"commentOn":True,
"commentBlack":{},
"message":"Thx For Add Me (^_^)\nInvite Me To Your Group ヘ(^_^)ヘ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Qr":False,
"Contact":False,
"Sambutan":True,
"inviteprotect":False,
"alwaysRead":False,
"Sider":{},
"Simi":{},
"lang":"JP",
"BlGroup":{}
}
settings = {
"simiSimi":{}
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
import urllib,request
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendAudio(self, to_, path):
M = Message()
M.text = None
M.to = to_
M.contentMetadata = None
M.contentPreview = None
M.contentType = 3
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except:
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendAudioWithURL(self, to_, url):
path = self.downloadFileWithURL(url)
try:
self.sendAudio(to_, path)
except Exception as e:
raise Exception(e)
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True, verify=False)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def downloadFileWithURL(self, fileUrl):
saveAs = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = self.get_content(fileUrl)
if r.status_code == 200:
with open(saveAs, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return saveAs
else:
raise Exception('Download file failure.')
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
nadya.sendMessage(msg)
except Exception as error:
print error
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
nadya.findAndAddContactsByMid(op.param1)
if(wait["message"]in[""," ","\n",None]):
pass
else:
nadya.sendText(op.param1,str(wait["message"]))
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = nadya.getContact(op.param2).displayName
# Name = summon(op.param2)
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
nadya.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nNgintip Aja Niih. . .\nChat Kek Idiih (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
nadya.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nBetah Banget Jadi Penonton. . .\nChat Napa (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
nadya.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nNgapain Kak Ngintip Aja???\nSini Gabung Chat... ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 22:
nadya.leaveRoom(op.param1)
if op.type == 21:
nadya.leaveRoom(op.param1)
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
nadya.acceptGroupInvitation(op.param1)
if mid in op.param3:
if wait["AutoJoinCancel"] == True:
G = nadya.getGroup(op.param1)
if len(G.members) <= wait["memberscancel"]:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"Maaf " + nadya.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami!")
nadya.leaveGroup(op.param1)
else:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
if mid in op.param3:
if wait["AutoJoin"] == True:
G = nadya.getGroup(op.param1)
if len(G.members) <= wait["Members"]:
nadya.rejectGroupInvitation(op.param1)
else:
nadya.acceptGroupInvitation(op.param1)
nadya.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
else:
if wait["AutoCancel"] == True:
if op.param3 in Bots:
pass
else:
nadya.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
nadya.cancelGroupInvitation(op.param1, [op.param3])
nadya.sendText(op.param1, "Blacklist Detected")
else:
pass
if op.type == 13:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
nadya.cancelGroupInvitation(op.param1,[op.param3])
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Creator:
if op.param3 in admin:
if op.param3 in Bots:
pass
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
if mid in op.param3:
if op.param2 in Creator:
if op.param2 in Bots:
pass
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Creator in op.param3:
if admin in op.param3:
if op.param2 in Bots:
pass
try:
nadya.kickoutFromGroup(op.param1,[op.param2])
nadya.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param2 not in Bots:
nadya.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
nadya.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
nadya.inviteIntoGroup(op.param1,[op.param3])
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 11:
if wait["Qr"] == True:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
nadya.kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in Creator:
return
ginfo = nadya.getGroup(op.param1)
contact = nadya.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendText(op.param1,"Hallo " + nadya.getContact(op.param2).displayName + "\nWelcome To ☞ " + str(ginfo.name) + " ☜" + "\nBudayakan Cek Note\nDan Semoga Betah Disini ^_^")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
nadya.sendMessage(c)
nadya.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "13269548",
"STKPKGID": "1329191",
"STKVER": "1" }
nadya.sendMessage(d)
print "MEMBER JOIN TO GROUP"
if op.type == 15:
if wait["Sambutan"] == True:
if op.param2 in Creator:
return
nadya.sendText(op.param1,"Good Bye " + nadya.getContact(op.param2).displayName + "\nSee You Next Time . . . (p′︵‵。) 🤗")
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "13269542",
"STKPKGID": "1329191",
"STKVER": "1" }
nadya.sendMessage(d)
print "MEMBER HAS LEFT THE GROUP"
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
nadya.sendText(msg.to,text)
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
nadya.sendText(msg.to,data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Aku Bilang Jangan Ngetag Lagi " + cName + "\nAku Kick Kamu! Sorry, Byee!!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
nadya.kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Sekali tag, berarti naksir aim😅",cName + " Follow ya id smuleku @Fryant_BSS1",cName + " Iya sayank, I love you too muacchhh😘","aih, org ganteng, ditag mulu🙄", cName + " kaka mau nikung aku yah??🙄","kalau mau didesahin\npm aja kak😂 " + cName, "kangen ya sayank??🙄 " + cName, "Follow id smule ku ya ka @Fryant_BSS1 " + cName + "😘😘😘", "Kaka mau nikung aku yah " + cName + "😰","orang ganteng " + cName + " pasti ditag mulu 😆"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention2"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["kenapa sayank,, kangen yah??","jangan tag kalau ga mau aku hamilin","jangan tag " + cName + " tuan muda lagi meeting"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "157",
"STKPKGID": "2",
"STKVER": "100" }
nadya.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention3"] == True:
contact = nadya.getContact(msg.from_)
cName = contact.displayName
balas = ["Iya sayank " + cName + ", Syg kangen ya...aku lg kerja buat menata masa depan kita"]
balas1 = "Supaya aq dan kamu, bahagia selalu😘😘😘"
ret_ = random.choice(balas)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
nadya.sendText(msg.to,ret_)
nadya.sendText(msg.to,balas1)
nadya.sendImageWithURL(msg.to,image)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "11764508",
"STKPKGID": "6641",
"STKVER": "1" }
nadya.sendMessage(msg)
break
if op.type == 25:
msg = op.message
if msg.text in ["Bot on"]:
wait["Bot"] = True
nadya.sendText(msg.to,"Bot Sudah On Kembali.")
if op.type == 25:
if wait["Bot"] == True:
msg = op.message
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
filler = "『 Sticker Check 』\nSTKID : %s\nSTKPKGID : %s\nSTKVER : %s\n『 Link 』\nline://shop/detail/%s" % (stk_id,pkg_id,stk_ver,pkg_id)
nadya.sendText(msg.to, filler)
wait["sticker"] = False
else:
pass
if wait["alwaysRead"] == True:
if msg.toType == 0:
nadya.sendChatChecked(msg.from_,msg.id)
else:
nadya.sendChatChecked(msg.to,msg.id)
if msg.contentType == 16:
if wait['likeOn'] == True:
url = msg.contentMetadata["postEndUrl"]
nadya.like(url[25:58], url[66:], likeType=1005)
nadya.comment(url[25:58], url[66:], wait["comment"])
nadya.sendText(msg.to,"Like Success")
wait['likeOn'] = False
if msg.contentType == 13:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] not in admin:
if msg.contentMetadata["mid"] in wait["blacklist"]:
nadya.sendText(msg.to,"Sudah")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
nadya.sendText(msg.to,"Ditambahkan")
else:
nadya.sendText(msg.to,"Admin Detected~")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
nadya.sendText(msg.to,"Terhapus")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
nadya.sendText(msg.to,"Tidak Ada Black List")
elif wait["Contact"] == True:
msg.contentType = 0
nadya.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = nadya.getContact(msg.contentMetadata["mid"])
try:
cu = nadya.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
nadya.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
else:
contact = nadya.getContact(msg.contentMetadata["mid"])
try:
cu = nadya.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
nadya.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = nadya.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
nadya.sendText(msg.to,"[Group name]\n" + str(ginfo.name) + "\n\n[Gid]\n" + msg.to + "\n\n[Group creator]\n" + gCreator + "\n\n[Profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "members\nPending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
nadya.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Can not be used outside the group")
else:
nadya.sendText(msg.to,"Not for use less than group")
elif msg.text is None:
return
elif msg.text in ["Creator","Owner"]:
msg.contentType = 13
msg.contentMetadata = {'mid': tjia}
nadya.sendMessage(msg)
nadya.sendText(msg.to,"Itu Majikan Kami (^_^)")
elif msg.text in ["Group creator","Gcreator","gcreator"]:
ginfo = nadya.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
nadya.sendMessage(msg)
nadya.sendText(msg.to,"Itu Yang Buat Grup Ini")
elif msg.contentType == 16:
if wait["Timeline"] == True:
msg.contentType = 0
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
nadya.sendText(msg.to,msg.text)
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
nadya.findAndAddContactsByMid(target)
contact = nadya.getContact(target)
cu = nadya.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
nadya.sendText(msg.to,"Profile Picture " + contact.displayName)
nadya.sendImageWithURL(msg.to,image)
nadya.sendText(msg.to,"Cover " + contact.displayName)
nadya.sendImageWithURL(msg.to,path)
wait["steal"] = False
break
except:
pass
if msg.contentType == 13:
if wait["gift"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Gift"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,"Gift Sudah Terkirim!")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
wait['gift'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["gift"] = False
break
if msg.contentType == 13:
if wait["copy"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Copy"
break
else:
targets.append(copy)
if targets == []:
nadya.sendText(msg.to, "Not Found...")
pass
else:
for target in targets:
try:
nadya.CloneContactProfile(target)
nadya.sendText(msg.to, "Copied (^_^)")
wait['copy'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["copy"] = False
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = nadya.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
nadya.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
nadya.findAndAddContactsByMid(target)
nadya.inviteIntoGroup(msg.to,[target])
nadya.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
nadya.sendText(msg.to,"Limit Invite")
wait['invite'] = False
break
elif msg.text in ["Key creator","help creator","Fryant 1"]:
nadya.sendText(msg.to,creatorMessage)
elif msg.text in ["Key group","help group","Fryant 2"]:
nadya.sendText(msg.to,groupMessage)
elif msg.text in ["Key","Fryant","Help"]:
nadya.sendText(msg.to,helpMessage)
elif msg.text in ["Key self","help self","Fryant 3"]:
nadya.sendText(msg.to,selfMessage)
elif msg.text in ["Key bot","help bot","Fryant 4"]:
nadya.sendText(msg.to,botMessage)
elif msg.text in ["Key set","help set","Fryant 5"]:
nadya.sendText(msg.to,setMessage)
elif msg.text in ["Key media","help media","Fryant 6"]:
nadya.sendText(msg.to,mediaMessage)
elif msg.text in ["Key admin","help admin","Fryant 7"]:
nadya.sendText(msg.to,adminMessage)
elif msg.text in ["Fryant group"]:
gid = nadya.getGroupIdsJoined()
h = ""
jml = 0
for i in gid:
gn = nadya.getGroup(i).name
h += "♦【%s】\n" % (gn)
jml += 1
nadya.sendText(msg.to,"=======[List Group]=======\n"+ h +"\nTotal Group: "+str(jml))
elif "Ban group: " in msg.text:
grp = msg.text.replace("Ban group: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
h = nadya.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
nadya.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
nadya.sendText(msg.to, "Khusus Nadya")
elif msg.text in ["List ban","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
nadya.sendText(msg.to,"Tidak Ada")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +nadya.getGroup(gid).name + "\n"
nadya.sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
nadya.sendText(msg.to, "Khusus Admin")
elif msg.text in ["Del ban: "]:
if msg.from_ in admin:
ng = msg.text.replace("Del ban: ","")
for gid in wait["BlGroup"]:
if nadya.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
nadya.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
nadya.sendText(msg.to, "Khusus Nadya")
elif "Join group: " in msg.text:
ng = msg.text.replace("Join group: ","")
gid = nadya.getGroupIdsJoined()
try:
if msg.from_ in Creator:
for i in gid:
h = nadya.getGroup(i).name
if h == ng:
nadya.inviteIntoGroup(i,[Creator])
nadya.sendText(msg.to,"Success Join To ["+ h +"] Group")
else:
pass
else:
nadya.sendText(msg.to,"Khusus Nadya")
except Exception as e:
nadya.sendText(msg.to, str(e))
elif "Leave group: " in msg.text:
ng = msg.text.replace("Leave group: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = nadya.getGroup(i).name
if h == ng:
nadya.sendText(i,"Bot Di Paksa Keluar Oleh Owner!")
nadya.leaveGroup(i)
nadya.sendText(msg.to,"Success Left ["+ h +"] group")
else:
pass
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Leave all group" == msg.text:
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
nadya.sendText(i,"Bot Di Paksa Keluar Oleh Owner!")
nadya.leaveGroup(i)
nadya.sendText(msg.to,"Success Leave All Group")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Pict group: " in msg.text:
saya = msg.text.replace('Pict group: ','')
gid = nadya.getGroupIdsJoined()
for i in gid:
h = nadya.getGroup(i).name
gna = nadya.getGroup(i)
if h == saya:
nadya.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["cancelall","Cancelall"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
nadya.cancelGroupInvitation(msg.to, gInviMids)
else:
nadya.sendText(msg.to,"Tidak Ada Yang Pending")
else:
nadya.sendText(msg.to,"Tidak Bisa Digunakan Diluar Group")
elif msg.text in ["Ourl","Url on"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.preventJoinByTicket = False
nadya.updateGroup(X)
nadya.sendText(msg.to,"Url Sudah Aktif")
else:
nadya.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Curl","Url off"]:
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.preventJoinByTicket = True
nadya.updateGroup(X)
nadya.sendText(msg.to,"Url Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Join on","Autojoin on"]:
if msg.from_ in admin:
wait["AutoJoin"] = True
wait["AutoJoinCancel"] = False
nadya.sendText(msg.to,"Auto Join Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Join off","Autojoin off"]:
if msg.from_ in admin:
wait["AutoJoin"] = False
nadya.sendText(msg.to,"Auto Join Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Joincancel on","Autojoincancel on"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = True
wait["AutoJoin"] = False
nadya.sendText(msg.to,"Auto Join Cancel Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Joincancel off","Autojoincancel off"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = False
nadya.sendText(msg.to,"Auto Join Cancel Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon1 on"]:
if msg.from_ in admin:
wait["detectMention"] = True
wait["detectMention2"] = False
wait["detectMention3"] = False
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon1 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon1 off"]:
if msg.from_ in admin:
wait["detectMention"] = False
nadya.sendText(msg.to,"Auto Respon1 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon2 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = True
wait["detectMention3"] = False
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon2 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon2 off"]:
if msg.from_ in admin:
wait["detectMention2"] = False
nadya.sendText(msg.to,"Auto Respon2 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon3 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = True
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon3 Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Respon3 off"]:
if msg.from_ in admin:
wait["detectMention3"] = False
nadya.sendText(msg.to,"Auto Respon3 Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Responkick on"]:
if msg.from_ in admin:
wait["kickMention"] = True
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = False
nadya.sendText(msg.to,"Auto Respon Kick Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Responkick off"]:
if msg.from_ in admin:
wait["kickMention"] = False
nadya.sendText(msg.to,"Auto Respon Kick Sudah Off")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Autocancel on"]:
if msg.from_ in admin:
wait["AutoCancel"] = True
nadya.sendText(msg.to,"Auto Cancel Sudah Aktif")
print wait["AutoCancel"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Autocancel off"]:
if msg.from_ in admin:
wait["AutoCancel"] = False
nadya.sendText(msg.to,"Auto Cancel Sudah Di Nonaktifkan")
print wait["AutoCancel"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Invitepro on"]:
if msg.from_ in admin:
wait["inviteprotect"] = True
nadya.sendText(msg.to,"Invite Protect Sudah Aktif")
print wait["inviteprotect"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Invitepro off"]:
if msg.from_ in admin:
wait["inviteprotect"] = False
nadya.sendText(msg.to,"Invite Protect Sudah Di Nonaktifkan")
print wait["inviteprotect"]
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Qr on" in msg.text:
if msg.from_ in admin:
wait["Qr"] = True
nadya.sendText(msg.to,"QR Protect Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Qr off" in msg.text:
if msg.from_ in admin:
wait["Qr"] = False
nadya.sendText(msg.to,"Qr Protect Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Autokick on" in msg.text:
if msg.from_ in admin:
wait["AutoKick"] = True
nadya.sendText(msg.to,"Auto Kick Sudah Aktif")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif "Autokick off" in msg.text:
if msg.from_ in admin:
wait["AutoKick"] = False
nadya.sendText(msg.to,"Auto Kick Sudah Di Nonaktifkan")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Allprotect on"]:
if msg.from_ in admin:
wait["AutoCancel"] = True
wait["inviteprotect"] = True
wait["AutoKick"] = True
wait["Qr"] = True
nadya.sendText(msg.to,"All Protect Sudah Aktif Semua")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["Allprotect off"]:
if msg.from_ in admin:
wait["AutoCancel"] = False
wait["inviteprotect"] = False
wait["AutoKick"] = False
wait["Qr"] = False
nadya.sendText(msg.to,"All Protect Sudah Di Nonaktifkan Semua")
else:
nadya.sendText(msg.to,"Khusus Nadya")
elif msg.text in ["K on","Contact on"]:
wait["Contact"] = True
nadya.sendText(msg.to,"Contact Sudah Aktif")
elif msg.text in ["K off","Contact off"]:
wait["Contact"] = False
nadya.sendText(msg.to,"Contact Sudah Di Nonaktifkan")
elif msg.text in ["Alwaysread on"]:
wait["alwaysRead"] = True
nadya.sendText(msg.to,"Always Read Sudah Aktif")
elif msg.text in ["Alwaysread off"]:
wait["alwaysRead"] = False
nadya.sendText(msg.to,"Always Read Sudah Di Nonaktifkan")
elif msg.text in ["Sambutan on"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sambutan Di Aktifkanヾ(*´∀`*)ノ")
else:
wait["Sambutan"] = True
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sudah Onヽ(´▽`)/")
elif msg.text in ["Sambutan off"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sambutan Di Nonaktifkan( ^∇^)")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Sudah Off(p′︵‵。)")
elif "Sider on" in msg.text:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
nadya.sendText(msg.to,"Siap On Cek Sider")
elif "Sider off" in msg.text:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
nadya.sendText(msg.to, "Cek Sider Off")
else:
nadya.sendText(msg.to, "Heh Belom Di Set")
elif msg.text in ["Status"]:
md = ""
if wait["Sambutan"] == True: md+="╠➩✔️ Sambutan : On\n"
else:md+="╠➩❌ Sambutan : Off\n"
if wait["AutoJoin"] == True: md+="╠➩✔️ Auto Join : On\n"
else: md +="╠➩❌ Auto Join : Off\n"
if wait["AutoJoinCancel"] == True: md+="╠➩✔️ Auto Join Cancel : On\n"
else: md +="╠➩❌ Auto Join Cancel : Off\n"
if wait["Contact"] == True: md+="╠➩✔️ Info Contact : On\n"
else: md+="╠➩❌ Info Contact : Off\n"
if wait["AutoCancel"] == True:md+="╠➩✔️ Auto Cancel : On\n"
else: md+= "╠➩❌ Auto Cancel : Off\n"
if wait["inviteprotect"] == True:md+="╠➩✔️ Invite Protect : On\n"
else: md+= "╠➩❌ Invite Protect : Off\n"
if wait["Qr"] == True: md+="╠➩✔️ Qr Protect : On\n"
else:md+="╠➩❌ Qr Protect : Off\n"
if wait["AutoKick"] == True: md+="╠➩✔️ Auto Kick : On\n"
else:md+="╠➩❌ Auto Kick : Off\n"
if wait["alwaysRead"] == True: md+="╠➩✔️ Always Read : On\n"
else:md+="╠➩❌ Always Read: Off\n"
if wait["detectMention"] == True: md+="╠➩✔️ Auto Respon1 : On\n"
else:md+="╠➩❌ Auto Respon1 : Off\n"
if wait["detectMention2"] == True: md+="╠➩✔️ Auto Respon2 : On\n"
else:md+="╠➩❌ Auto Respon2 : Off\n"
if wait["detectMention3"] == True: md+="╠➩✔️ Auto Respon3 : On\n"
else:md+="╠➩❌ Auto Respon3 : Off\n"
if wait["kickMention"] == True: md+="╠➩✔️ Auto Respon Kick : On\n"
else:md+="╠➩❌ Auto Respon Kick : Off\n"
if wait["Sider"] == True: md+="╠➩✔️ Auto Sider : On\n"
else:md+="╠➩❌ Auto Sider: Off\n"
if wait["Simi"] == True: md+="╠➩✔️ Simisimi : On\n"
else:md+="╠➩❌ Simisimi: Off\n"
nadya.sendText(msg.to,"╔════════════════════\n""║ ☆☞ F R Y A N T S T A T U S ☜☆\n""╠════════════════════\n"+md+"╚════════════════════")
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
nadya.sendMessage(msg)
elif "Gift1 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift2 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '1360738'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift3 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift3 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '1395389'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift4 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift4 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1329191'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift5 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift5 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '9057'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift6 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift6 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '9167'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift7 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift7 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '7334'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift8 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift8 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift9 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift9 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1405277'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift10 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift10 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = nadya.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nadya.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
nadya.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif msg.text.lower() in ["wkwkwk","wkwk","hahaha","haha"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '100',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["hehehe","hehe"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '10',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["galau"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '9',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["you","kau","kamu"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '7',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["marah","hadeuh","hadeh"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '6',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["please","pliss","mohon","tolong"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '4',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["haa","haaa","kaget"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '3',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["lucu","ngakak","lol"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '110',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["hmm","hmmm"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '101',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["tidur"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '1',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["gemes"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '2',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["cantik","imut"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '5',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["nyanyi","lalala"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '11',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["gugup"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '8',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["ok","oke","okay","oce","okee","sip","siph"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '13',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["mantab","mantap","nice","keren"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '14',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["ngejek"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '15',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["nangis","sedih"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '16',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["woi","kampret"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '102',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif msg.text.lower() in ["huft"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '104',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif "tag all" == msg.text.lower():
group = nadya.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, len(nama)-1):
nm3 += [nama[k]]
summon(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, len(nama)-1):
nm4 += [nama[l]]
summon(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
nadya.sendMessage(cnt)
elif "tagall" == msg.text.lower():
group = nadya.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, len(nama)-1):
nm3 += [nama[k]]
summon(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, len(nama)-1):
nm4 += [nama[l]]
summon(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
nadya.sendMessage(cnt)
elif msg.text in ["Setview","Setpoint","Cctv"]:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
nadya.sendText(msg.to, "☆Checkpoint Checked☆")
print "Setview"
elif msg.text in ["Viewseen","Check","Ciduk","Cyduk"]:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = nadya.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "╔═════════════════════════\n║ ☆☞ LIST VIEWERS ☜☆\n╠═════════════════════════\n╠➩"
grp = '\n╠➩ '.join(str(f) for f in dataResult)
total = '\n╠═════════════════════════\n╠➩ Total %i Viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S')) + "\n╚═════════════════════════"
nadya.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
nadya.sendText(msg.to, "☆Auto Checkpoint☆")
else:
nadya.sendText(msg.to, "☆Belum Ada Viewers☆")
print "Viewseen"
elif "Kick " in msg.text:
if msg.from_ in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
nadya.kickoutFromGroup(msg.to,[mention['M']])
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["Members"] = int(jml)
nadya.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
elif "Add all" in msg.text:
thisgroup = nadya.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
nadya.findAndAddContactsByMids(mi_d)
nadya.sendText(msg.to,"Success Add all")
elif msg.text in ["Invite"]:
wait["invite"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Auto like"]:
wait["likeOn"] = True
nadya.sendText(msg.to,"Shere Post Kamu Yang Mau Di Like!")
elif msg.text in ["Steal contact"]:
wait["steal"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Giftbycontact"]:
wait["gift"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Copycontact"]:
wait["copy"] = True
nadya.sendText(msg.to,"Send Contact")
elif msg.text in ["Sticker on"]:
wait["sticker"] = True
nadya.sendText(msg.to,"Sticker ID Detect Already On.")
elif msg.text in ["Bot off"]:
wait["Bot"] = False
nadya.sendText(msg.to,"Bot Sudah Di Nonaktifkan.")
elif "Recover" in msg.text:
thisgroup = nadya.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
nadya.createGroup("Recover", mi_d)
nadya.sendText(msg.to,"Success recover")
elif ("Gn: " in msg.text):
if msg.toType == 2:
X = nadya.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
nadya.updateGroup(X)
else:
nadya.sendText(msg.to,"It can't be used besides the group.")
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
if midd not in admin:
nadya.kickoutFromGroup(msg.to,[midd])
else:
nadya.sendText(msg.to,"Admin Detected")
elif "Invite: " in msg.text:
midd = msg.text.replace("Invite: ","")
nadya.findAndAddContactsByMid(midd)
nadya.inviteIntoGroup(msg.to,[midd])
elif "Invite creator" in msg.text:
midd = "u14f64e139a3817afaabe27d237afb36b"
nadya.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Welcome","welcome","Welkam","welkam","Wc","wc"]:
gs = nadya.getGroup(msg.to)
nadya.sendText(msg.to,"Selamat Datang Di "+ gs.name)
msg.contentType = 7
msg.contentMetadata={'STKID': '247',
'STKPKGID': '3',
'STKVER': '100'}
msg.text = None
nadya.sendMessage(msg)
elif "Bc: " in msg.text:
bc = msg.text.replace("Bc: ","")
gid = nadya.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
nadya.sendText(i,"=======[BROADCAST]=======\n\n"+bc+"\n\nContact Me : line.me/ti/p/~nad_nad.")
nadya.sendText(msg.to,"Success BC BosQ")
else:
nadya.sendText(msg.to,"Khusus Admin")
elif msg.text in ["Cancel"]:
gid = nadya.getGroupIdsInvited()
for i in gid:
nadya.rejectGroupInvitation(i)
nadya.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = nadya.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
nadya.updateGroup(x)
gurl = nadya.reissueGroupTicket(msg.to)
nadya.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
nadya.sendText(msg.to,"Can't be used outside the group")
else:
nadya.sendText(msg.to,"Not for use less than group")
elif msg.text in ["timeline"]:
try:
url = nadya.activity(limit=5)
nadya.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId'])
except Exception as E:
print E
elif msg.text in ["@bye","@Bye"]:
nadya.leaveGroup(msg.to)
elif msg.text in ["Absen"]:
nadya.sendText(msg.to,"Hadir!!")
elif msg.text.lower() in ["respon"]:
nadya.sendText(msg.to,responsename)
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
print("Speed")
elapsed_time = time.time() - start
nadya.sendText(msg.to, "Progress...")
nadya.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text in ["Speed test"]:
start = time.time()
nadya.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
nadya.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
nadya.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
nadya.sendText(msg.to,"send contact")
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "@Ban by mention"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendText(msg.to,"Succes BosQ")
except:
nadya.sendText(msg.to,"Error")
else:
nadya.sendText(msg.to,"Admin Detected~")
elif msg.text in ["Banlist","Ban list"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
nadya.sendText(msg.to,"Tidak Ada")
else:
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +nadya.getContact(mi_d).displayName + "\n"
nadya.sendText(msg.to,"===[Blacklist User]===\n"+mc)
elif "Unban @" in msg.text:
if msg.toType == 2:
print "@Unban by mention"
if msg.from_ in admin:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
nadya.sendText(msg.to,"Succes BosQ")
except:
nadya.sendText(msg.to,"Succes BosQ")
elif msg.text.lower() == 'clear ban':
if msg.from_ in admin:
wait["blacklist"] = {}
nadya.sendText(msg.to,"ヽ( ^ω^)ノ└ ❉Unbanned All Success❉ ┐")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = nadya.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
nadya.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
nadya.kickoutFromGroup(msg.to,[jj])
nadya.sendText(msg.to,"Blacklist emang pantas tuk di usir")
else:
nadya.sendText(msg.to, "Khusus creator")
elif msg.text in ["Kill"]:
if msg.toType == 2:
if msg.from_ in admin:
group = nadya.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
nadya.sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
nadya.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Kickall" == msg.text:
if msg.from_ in Creator:
if msg.toType == 2:
print "Kick all member"
_name = msg.text.replace("Kickall","")
gs = nadya.getGroup(msg.to)
nadya.sendText(msg.to,"Dadaaah~")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in admin:
try:
nadya.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except Exception as e:
nadya.sendText(msg.to,str(e))
nadya.inviteIntoGroup(msg.to, targets)
elif msg.text in ["Bot restart","Reboot"]:
if msg.from_ in Creator:
nadya.sendText(msg.to, "Bot Has Been Restarted...")
restart_program()
print "@Restart"
else:
nadya.sendText(msg.to, "No Access")
elif msg.text in ["Turn off"]:
if msg.from_ in Creator:
try:
import sys
sys.exit()
except:
pass
elif 'Crash' in msg.text:
if msg.from_ in Creator:
msg.contentType = 13
msg.contentMetadata = {'mid': "NADYA,'"}
nadya.sendMessage(msg)
elif "Mycopy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
nadya.CloneContactProfile(target)
nadya.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif msg.text in ["Mybackup"]:
try:
nadya.updateDisplayPicture(backup1.pictureStatus)
nadya.updateProfile(backup1)
nadya.sendText(msg.to, "Done (^_^)")
except Exception as e:
nadya.sendText(msg.to, str(e))
elif "/musik " in msg.text:
songname = msg.text.replace("/musik ","")
params = {"songname": songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
nadya.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4])
nadya.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
nadya.sendAudioWithURL(msg.to,abc)
nadya.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif '/lirik ' in msg.text.lower():
try:
songname = msg.text.lower().replace('/lirik ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
nadya.sendText(msg.to, hasil)
except Exception as wak:
nadya.sendText(msg.to, str(wak))
elif "/musrik " in msg.text:
songname = msg.text.replace("/musrik ","")
params = {"songname": songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
nadya.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
nadya.sendAudioWithURL(msg.to,abc)
nadya.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4] +"\n\n" + hasil)
nadya.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
nadya.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("cover @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Cover @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Cpp" in msg.text:
if msg.from_ in admin:
path = "nadya.jpg"
nadya.sendText(msg.to,"Update PP :")
nadya.sendImage(msg.to,path)
nadya.updateProfilePicture(path)
elif "pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("pp @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.getContact(target)
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif "Pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Pp @","")
_nametarget = cover.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = nadya.getContact(target)
nadya.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
nadya.sendText(msg.to,"Upload image failed.")
elif msg.text.lower() in ["pap owner","pap creator"]:
link = ["http://dl.profile.line-cdn.net/0hNPsZWL9WEX9OIz0lhyFuKHJmHxI5DRc3NkJaETwkRklqGwQoJkNbTGklHRo2G1B7cxFXH2NxSU03"]
pilih = random.choice(link)
nadya.sendImageWithURL(msg.to,pilih)
elif "Spam: " in msg.text:
bctxt = msg.text.replace("Spam: ", "")
t = 10
while(t):
nadya.sendText(msg.to, (bctxt))
t-=1
elif "Scbc " in msg.text:
bctxt = msg.text.replace("Scbc ", "")
orang = nadya.getAllContactIds()
t = 20
for manusia in orang:
while(t):
nadya.sendText(manusia, (bctxt))
t-=1
elif "Cbc " in msg.text:
broadcasttxt = msg.text.replace("Cbc ", "")
orang = nadya.getAllContactIds()
for manusia in orang:
nadya.sendText(manusia, (broadcasttxt))
elif '/ig ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("/ig ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html.parser')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
tj = text1[0].replace("s150x150/","")
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO ========\n"
details = "\n========INSTAGRAM INFO ========"
nadya.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
nadya.sendImageWithURL(msg.to, tj)
except Exception as njer:
nadya.sendText(msg.to, str(njer))
elif "Checkig " in msg.text:
separate = msg.text.split(" ")
user = msg.text.replace(separate[0] + " ","")
if user.startswith("@"):
user = user.replace("@","")
profile = "https://www.instagram.com/" + user
with requests.session() as x:
x.headers['user-agent'] = 'Mozilla/5.0'
end_cursor = ''
for count in range(1, 999):
print('PAGE: ', count)
r = x.get(profile, params={'max_id': end_cursor})
data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1)
j = json.loads(data)
for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']:
if node['is_video']:
page = 'https://www.instagram.com/p/' + node['code']
r = x.get(page)
url = re.search(r'"video_url": "([^"]+)"', r.text).group(1)
print(url)
nadya.sendVideoWithURL(msg.to,url)
else:
print (node['display_src'])
nadya.sendImageWithURL(msg.to,node['display_src'])
end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1)
elif 'Youtubelink: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
nadya.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
nadya.sendText(msg.to,"Could not find it")
elif 'Youtubevideo: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtubevideo: ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
nadya.sendVideoWithURL(msg.to,'https://www.youtube.com' + results['href'])
except:
nadya.sendText(msg.to, "Could not find it")
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "Say welcome" in msg.text:
gs = nadya.getGroup(msg.to)
say = msg.text.replace("Say welcome","Selamat Datang Di "+ gs.name)
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower() in ["hi","hai","halo","hallo"]:
beb = "Hi Sayang 😘 " +nadya.getContact(msg.from_).displayName + " starry heart"
nadya.sendText(msg.to,beb)
elif "playstore " in msg.text.lower():
tob = msg.text.lower().replace("playstore ","")
nadya.sendText(msg.to,"Sedang Mencari...")
nadya.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLink : https://play.google.com/store/search?q=" + tob)
nadya.sendText(msg.to,"Tuh Linknya Kak (^_^)")
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
nadya.sendText(msg.to, g.mid)
else:
pass
elif "Mybio " in msg.text:
string = msg.text.replace("Mybio ","")
if len(string.decode('utf-8')) <= 500:
profile = nadya.getProfile()
profile.statusMessage = string
nadya.updateProfile(profile)
nadya.sendText(msg.to,"Done")
elif "Myname " in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("Myname ","")
if len(string.decode('utf-8')) <= 5000:
profile = nadya.getProfile()
profile.displayName = string
nadya.updateProfile(profile)
nadya.sendText(msg.to,"Done")
elif msg.text.lower() in ["mymid","myid"]:
middd = "Name : " +nadya.getContact(msg.from_).displayName + "\nMid : " +msg.from_
nadya.sendText(msg.to,middd)
elif msg.text.lower() in ["me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
nadya.sendMessage(msg)
elif "/apakah " in msg.text:
apk = msg.text.replace("/apakah ","")
rnd = ["Ya","Tidak","Bisa Jadi","Mungkin"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/hari " in msg.text:
apk = msg.text.replace("/hari ","")
rnd = ["Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/berapa " in msg.text:
apk = msg.text.replace("/berapa ","")
rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/berapakah " in msg.text:
apk = msg.text.replace("/berapakah ","")
rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif "/kapan " in msg.text:
apk = msg.text.replace("/kapan ","")
rnd = ["kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi","Tidak Akan Pernah"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
nadya.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
wait["Simi"] = True
nadya.sendText(msg.to," Simisimi Di Aktifkan")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
wait["Simi"] = False
nadya.sendText(msg.to,"Simisimi Di Nonaktifkan")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
nadya.sendImageWithURL(msg.to,path)
except:
pass
elif "Youtubesearch: " in msg.text:
query = msg.text.replace("Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html.parser')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
nadya.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
nadya.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
nadya.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif msg.text in ["Friendlist"]:
contactlist = nadya.getAllContactIds()
kontak = nadya.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
nadya.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = nadya.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═�����═══════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
nadya.sendText(msg.to, msgs)
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = nadya.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
nadya.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = nadya.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
nadya.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getgroup image" in msg.text:
group = nadya.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
nadya.sendImageWithURL(msg.to,path)
elif "Urlgroup image" in msg.text:
group = nadya.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
nadya.sendText(msg.to,path)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
nadya.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
nadya.sendText(msg.to,"Profile Picture " + contact.displayName)
nadya.sendImageWithURL(msg.to,image)
nadya.sendText(msg.to,"Cover " + contact.displayName)
nadya.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = nadya.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
nadya.sendMessage(msg)
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
nadya.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = nadya.getContact(key1)
cu = nadya.channel.getCover(key1)
try:
nadya.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
nadya.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot Sudah Berjalan Selama :\n"+waktu(eltime)
nadya.sendText(msg.to,van)
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
nadya.sendText(msg.to,"========== I N F O R M A S I ==========\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n========== I N F O R M A S I ==========")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
nadya.sendText(msg.to, rst)
elif "SearchID: " in msg.text:
userid = msg.text.replace("SearchID: ","")
contact = nadya.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
nadya.sendMessage(msg)
elif "Searchid: " in msg.text:
userid = msg.text.replace("Searchid: ","")
contact = nadya.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
nadya.sendMessage(msg)
elif "removechat" in msg.text.lower():
if msg.from_ in admin:
try:
nadya.removeAllMessages(op.param2)
print "[Command] Remove Chat"
nadya.sendText(msg.to,"Done")
except Exception as error:
print error
nadya.sendText(msg.to,"Error")
elif "Invitemeto: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Invitemeto: ","")
if gid == "":
nadya.sendText(msg.to,"Invalid group id")
else:
try:
nadya.findAndAddContactsByMid(msg.from_)
nadya.inviteIntoGroup(gid,[msg.from_])
except:
nadya.sendText(msg.to,"Mungkin Saya Tidak Di Dalaam Grup Itu")
elif msg.text in ["Glist"]:
nadya.sendText(msg.to, "Tunggu Sebentar. . .")
gid = nadya.getGroupIdsJoined()
h = ""
for i in gid:
h += "╠➩" + "%s\n" % (nadya.getGroup(i).name +" ~> ["+str(len(nadya.getGroup(i).members))+"]")
nadya.sendText(msg.to,"╔═════════════════════════\n║ ☆☞ LIST GROUPS☜☆\n╠═════════════════════════\n" + h + "╠═════════════════════════" + "\n║ Total Groups =" +" ["+str(len(gid))+"]\n╚═════════════════════════")
elif msg.text in ["Glistmid"]:
gruplist = nadya.getGroupIdsJoined()
kontak = nadya.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
nadya.sendText(msg.to, msgs)
elif "Google: " in msg.text:
a = msg.text.replace("Google: ","")
b = urllib.quote(a)
nadya.sendText(msg.to,"Sedang Mencari...")
nadya.sendText(msg.to, "https://www.google.com/" + b)
nadya.sendText(msg.to,"Itu Dia Linknya. . .")
elif "Details group: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Details group: ","")
if gid in [""," "]:
nadya.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = nadya.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
nadya.sendText(msg.to,h)
except Exception as error:
nadya.sendText(msg.to,(error))
elif "Cancel invite: " in msg.text:
if msg.from_ in admin:
gids = msg.text.replace("Cancel invite: ","")
gid = nadya.getGroup(gids)
for i in gid:
if i is not None:
try:
nadya.rejectGroupInvitation(i)
except:
nadya.sendText(msg.to,"Error!")
break
else:
break
if gid is not None:
nadya.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name)
else:
nadya.sendText(msg.to,"Grup tidak ditemukan")
elif msg.text in ["Acc invite"]:
if msg.from_ in admin:
gid = nadya.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = nadya.getGroup(i)
_list += gids.name
nadya.acceptGroupInvitation(i)
else:
break
if gid is not None:
nadya.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
nadya.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif "Gif gore" in msg.text:
gif = ("https://media.giphy.com/media/l2JHVsQiOZrNMGzYs/giphy.gif","https://media.giphy.com/media/OgltQ2hbilzJS/200w.gif")
gore = random.choice(gif)
nadya.sendGifWithURL(msg.to,gore)
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
nadya.sendText(msg.to,"Target ditambahkan!")
break
except:
nadya.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
nadya.sendText(msg.to,"Target dihapuskan!")
break
except:
nadya.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
nadya.sendText(msg.to,"Nothing")
else:
mc = "Target Mimic User:\n"
for mi_d in mimic["target"]:
mc += "?? "+nadya.getContact(mi_d).displayName + "\n"
nadya.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
nadya.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
nadya.sendText(msg.to,"Mimic change to target")
else:
nadya.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
nadya.sendText(msg.to,"Reply Message on")
else:
nadya.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
nadya.sendText(msg.to,"Reply Message off")
else:
nadya.sendText(msg.to,"Sudah off")
if op.type == 59:
print op
except Exception as error:
print error
while True:
try:
Ops = nadya.fetchOps(nadya.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(nadya.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
nadya.Poll.rev = max(nadya.Poll.rev, Op.revision)
bot(Op)
| false
| true
|
7908573ec9d313bf3168d98f4cf7ead29d9e6104
| 1,590
|
py
|
Python
|
tests/test_job_slurm.py
|
boazbk/mle-scheduler
|
4cf83873d9beb75b19b2deb9baf4394931b9624d
|
[
"MIT"
] | 26
|
2021-11-12T15:06:54.000Z
|
2022-03-29T20:42:17.000Z
|
tests/test_job_slurm.py
|
boazbk/mle-scheduler
|
4cf83873d9beb75b19b2deb9baf4394931b9624d
|
[
"MIT"
] | 4
|
2021-12-12T20:37:40.000Z
|
2022-03-01T10:18:14.000Z
|
tests/test_job_slurm.py
|
boazbk/mle-scheduler
|
4cf83873d9beb75b19b2deb9baf4394931b9624d
|
[
"MIT"
] | 1
|
2021-12-13T17:24:02.000Z
|
2021-12-13T17:24:02.000Z
|
from mle_scheduler.cluster.slurm.helpers_launch_slurm import slurm_generate_startup_file
job_arguments = {
"num_logical_cores": 5,
"partition": "standard",
"job_name": "test_job",
"num_gpus": 1,
"gpu_type": "RTX2080",
"env_name": "test_env",
"use_conda_venv": True,
"script": "python run.py",
"memory_per_cpu": 2000,
"time_per_job": "10:05:02",
"modules_to_load": "nvidia/cuda/10.0",
}
job_script = """#!/bin/bash
#SBATCH --job-name=test_job
#SBATCH --output=log.txt
#SBATCH --error=err.err
#SBATCH --partition=standard
#SBATCH --cpus-per-task=5
#SBATCH --gres=gpu:RTX2080:1
#SBATCH --mem-per-cpu=2000
#SBATCH --time=10:05:02
echo "------------------------------------------------------------------------"
source ~/miniconda3/etc/profile.d/conda.sh
echo "------------------------------------------------------------------------"
. ~/.bashrc && conda activate test_env
echo "Successfully activated virtual environment - Ready to start job"
module load nvidia/cuda/10.0
echo "------------------------------------------------------------------------"
echo "Job started on" `date`
echo "------------------------------------------------------------------------"
python run.py
echo "------------------------------------------------------------------------"
echo "Job ended on" `date`
echo "------------------------------------------------------------------------"
conda deactivate
"""
def test_job_slurm():
startup_script = slurm_generate_startup_file(job_arguments).format(**job_arguments)
assert job_script == startup_script
return
| 32.44898
| 88
| 0.510063
|
from mle_scheduler.cluster.slurm.helpers_launch_slurm import slurm_generate_startup_file
job_arguments = {
"num_logical_cores": 5,
"partition": "standard",
"job_name": "test_job",
"num_gpus": 1,
"gpu_type": "RTX2080",
"env_name": "test_env",
"use_conda_venv": True,
"script": "python run.py",
"memory_per_cpu": 2000,
"time_per_job": "10:05:02",
"modules_to_load": "nvidia/cuda/10.0",
}
job_script = """#!/bin/bash
#SBATCH --job-name=test_job
#SBATCH --output=log.txt
#SBATCH --error=err.err
#SBATCH --partition=standard
#SBATCH --cpus-per-task=5
#SBATCH --gres=gpu:RTX2080:1
#SBATCH --mem-per-cpu=2000
#SBATCH --time=10:05:02
echo "------------------------------------------------------------------------"
source ~/miniconda3/etc/profile.d/conda.sh
echo "------------------------------------------------------------------------"
. ~/.bashrc && conda activate test_env
echo "Successfully activated virtual environment - Ready to start job"
module load nvidia/cuda/10.0
echo "------------------------------------------------------------------------"
echo "Job started on" `date`
echo "------------------------------------------------------------------------"
python run.py
echo "------------------------------------------------------------------------"
echo "Job ended on" `date`
echo "------------------------------------------------------------------------"
conda deactivate
"""
def test_job_slurm():
startup_script = slurm_generate_startup_file(job_arguments).format(**job_arguments)
assert job_script == startup_script
return
| true
| true
|
7908577f8c2a2ab188604d2f337300b351bf96eb
| 6,189
|
py
|
Python
|
venv/lib/python3.6/site-packages/maxminddb/decoder.py
|
jyoost/saleordjangooriginal
|
e7d0da1f6f653607580a9cd792acfc4917908649
|
[
"CC-BY-4.0"
] | 2
|
2019-12-06T15:40:14.000Z
|
2020-07-29T21:30:35.000Z
|
venv/lib/python3.6/site-packages/maxminddb/decoder.py
|
jyoost/saleor
|
e7d0da1f6f653607580a9cd792acfc4917908649
|
[
"CC-BY-4.0"
] | 13
|
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
venv/lib/python3.6/site-packages/maxminddb/decoder.py
|
jyoost/saleor
|
e7d0da1f6f653607580a9cd792acfc4917908649
|
[
"CC-BY-4.0"
] | null | null | null |
"""
maxminddb.decoder
~~~~~~~~~~~~~~~~~
This package contains code for decoding the MaxMind DB data section.
"""
from __future__ import unicode_literals
import struct
from maxminddb.compat import byte_from_int, int_from_bytes
from maxminddb.errors import InvalidDatabaseError
class Decoder(object): # pylint: disable=too-few-public-methods
"""Decoder for the data section of the MaxMind DB"""
def __init__(self, database_buffer, pointer_base=0, pointer_test=False):
"""Created a Decoder for a MaxMind DB
Arguments:
database_buffer -- an mmap'd MaxMind DB file.
pointer_base -- the base number to use when decoding a pointer
pointer_test -- used for internal unit testing of pointer code
"""
self._pointer_test = pointer_test
self._buffer = database_buffer
self._pointer_base = pointer_base
def _decode_array(self, size, offset):
array = []
for _ in range(size):
(value, offset) = self.decode(offset)
array.append(value)
return array, offset
def _decode_boolean(self, size, offset):
return size != 0, offset
def _decode_bytes(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset], new_offset
# pylint: disable=no-self-argument
# |-> I am open to better ways of doing this as long as it doesn't involve
# lots of code duplication.
def _decode_packed_type(type_code, type_size, pad=False):
# pylint: disable=protected-access, missing-docstring
def unpack_type(self, size, offset):
if not pad:
self._verify_size(size, type_size)
new_offset = offset + size
packed_bytes = self._buffer[offset:new_offset]
if pad:
packed_bytes = packed_bytes.rjust(type_size, b'\x00')
(value, ) = struct.unpack(type_code, packed_bytes)
return value, new_offset
return unpack_type
def _decode_map(self, size, offset):
container = {}
for _ in range(size):
(key, offset) = self.decode(offset)
(value, offset) = self.decode(offset)
container[key] = value
return container, offset
_pointer_value_offset = {
1: 0,
2: 2048,
3: 526336,
4: 0,
}
def _decode_pointer(self, size, offset):
pointer_size = ((size >> 3) & 0x3) + 1
new_offset = offset + pointer_size
pointer_bytes = self._buffer[offset:new_offset]
packed = pointer_bytes if pointer_size == 4 else struct.pack(
b'!c', byte_from_int(size & 0x7)) + pointer_bytes
unpacked = int_from_bytes(packed)
pointer = unpacked + self._pointer_base + \
self._pointer_value_offset[pointer_size]
if self._pointer_test:
return pointer, new_offset
(value, _) = self.decode(pointer)
return value, new_offset
def _decode_uint(self, size, offset):
new_offset = offset + size
uint_bytes = self._buffer[offset:new_offset]
return int_from_bytes(uint_bytes), new_offset
def _decode_utf8_string(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset].decode('utf-8'), new_offset
_type_decoder = {
1: _decode_pointer,
2: _decode_utf8_string,
3: _decode_packed_type(b'!d', 8), # double,
4: _decode_bytes,
5: _decode_uint, # uint16
6: _decode_uint, # uint32
7: _decode_map,
8: _decode_packed_type(b'!i', 4, pad=True), # int32
9: _decode_uint, # uint64
10: _decode_uint, # uint128
11: _decode_array,
14: _decode_boolean,
15: _decode_packed_type(b'!f', 4), # float,
}
def decode(self, offset):
"""Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode
"""
new_offset = offset + 1
(ctrl_byte, ) = struct.unpack(b'!B', self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
# Extended type
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
try:
decoder = self._type_decoder[type_num]
except KeyError:
raise InvalidDatabaseError('Unexpected type number ({type}) '
'encountered'.format(type=type_num))
(size, new_offset) = self._size_from_ctrl_byte(ctrl_byte, new_offset,
type_num)
return decoder(self, size, new_offset)
def _read_extended(self, offset):
(next_byte, ) = struct.unpack(b'!B', self._buffer[offset:offset + 1])
type_num = next_byte + 7
if type_num < 7:
raise InvalidDatabaseError(
'Something went horribly wrong in the decoder. An '
'extended type resolved to a type number < 8 '
'({type})'.format(type=type_num))
return type_num, offset + 1
def _verify_size(self, expected, actual):
if expected != actual:
raise InvalidDatabaseError(
'The MaxMind DB file\'s data section contains bad data '
'(unknown data type or corrupt data)')
def _size_from_ctrl_byte(self, ctrl_byte, offset, type_num):
size = ctrl_byte & 0x1f
if type_num == 1:
return size, offset
bytes_to_read = 0 if size < 29 else size - 28
new_offset = offset + bytes_to_read
size_bytes = self._buffer[offset:new_offset]
# Using unpack rather than int_from_bytes as it is about 200 lookups
# per second faster here.
if size == 29:
size = 29 + struct.unpack(b'!B', size_bytes)[0]
elif size == 30:
size = 285 + struct.unpack(b'!H', size_bytes)[0]
elif size > 30:
size = struct.unpack(b'!I', size_bytes.rjust(4,
b'\x00'))[0] + 65821
return size, new_offset
| 35.568966
| 78
| 0.598158
|
from __future__ import unicode_literals
import struct
from maxminddb.compat import byte_from_int, int_from_bytes
from maxminddb.errors import InvalidDatabaseError
class Decoder(object):
def __init__(self, database_buffer, pointer_base=0, pointer_test=False):
self._pointer_test = pointer_test
self._buffer = database_buffer
self._pointer_base = pointer_base
def _decode_array(self, size, offset):
array = []
for _ in range(size):
(value, offset) = self.decode(offset)
array.append(value)
return array, offset
def _decode_boolean(self, size, offset):
return size != 0, offset
def _decode_bytes(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset], new_offset
# lots of code duplication.
def _decode_packed_type(type_code, type_size, pad=False):
# pylint: disable=protected-access, missing-docstring
def unpack_type(self, size, offset):
if not pad:
self._verify_size(size, type_size)
new_offset = offset + size
packed_bytes = self._buffer[offset:new_offset]
if pad:
packed_bytes = packed_bytes.rjust(type_size, b'\x00')
(value, ) = struct.unpack(type_code, packed_bytes)
return value, new_offset
return unpack_type
def _decode_map(self, size, offset):
container = {}
for _ in range(size):
(key, offset) = self.decode(offset)
(value, offset) = self.decode(offset)
container[key] = value
return container, offset
_pointer_value_offset = {
1: 0,
2: 2048,
3: 526336,
4: 0,
}
def _decode_pointer(self, size, offset):
pointer_size = ((size >> 3) & 0x3) + 1
new_offset = offset + pointer_size
pointer_bytes = self._buffer[offset:new_offset]
packed = pointer_bytes if pointer_size == 4 else struct.pack(
b'!c', byte_from_int(size & 0x7)) + pointer_bytes
unpacked = int_from_bytes(packed)
pointer = unpacked + self._pointer_base + \
self._pointer_value_offset[pointer_size]
if self._pointer_test:
return pointer, new_offset
(value, _) = self.decode(pointer)
return value, new_offset
def _decode_uint(self, size, offset):
new_offset = offset + size
uint_bytes = self._buffer[offset:new_offset]
return int_from_bytes(uint_bytes), new_offset
def _decode_utf8_string(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset].decode('utf-8'), new_offset
_type_decoder = {
1: _decode_pointer,
2: _decode_utf8_string,
3: _decode_packed_type(b'!d', 8), # double,
4: _decode_bytes,
5: _decode_uint, # uint16
6: _decode_uint, # uint32
7: _decode_map,
8: _decode_packed_type(b'!i', 4, pad=True), # int32
9: _decode_uint, # uint64
10: _decode_uint, # uint128
11: _decode_array,
14: _decode_boolean,
15: _decode_packed_type(b'!f', 4), # float,
}
def decode(self, offset):
new_offset = offset + 1
(ctrl_byte, ) = struct.unpack(b'!B', self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
# Extended type
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
try:
decoder = self._type_decoder[type_num]
except KeyError:
raise InvalidDatabaseError('Unexpected type number ({type}) '
'encountered'.format(type=type_num))
(size, new_offset) = self._size_from_ctrl_byte(ctrl_byte, new_offset,
type_num)
return decoder(self, size, new_offset)
def _read_extended(self, offset):
(next_byte, ) = struct.unpack(b'!B', self._buffer[offset:offset + 1])
type_num = next_byte + 7
if type_num < 7:
raise InvalidDatabaseError(
'Something went horribly wrong in the decoder. An '
'extended type resolved to a type number < 8 '
'({type})'.format(type=type_num))
return type_num, offset + 1
def _verify_size(self, expected, actual):
if expected != actual:
raise InvalidDatabaseError(
'The MaxMind DB file\'s data section contains bad data '
'(unknown data type or corrupt data)')
def _size_from_ctrl_byte(self, ctrl_byte, offset, type_num):
size = ctrl_byte & 0x1f
if type_num == 1:
return size, offset
bytes_to_read = 0 if size < 29 else size - 28
new_offset = offset + bytes_to_read
size_bytes = self._buffer[offset:new_offset]
if size == 29:
size = 29 + struct.unpack(b'!B', size_bytes)[0]
elif size == 30:
size = 285 + struct.unpack(b'!H', size_bytes)[0]
elif size > 30:
size = struct.unpack(b'!I', size_bytes.rjust(4,
b'\x00'))[0] + 65821
return size, new_offset
| true
| true
|
7908595b5f84d4426174500ff5e23a1ae3aa1b6a
| 1,787
|
py
|
Python
|
src/evrythng/entities/action_types.py
|
jwpthng/evrythng-python-sdk
|
42d6ea540c1511a7a75a835fe59d9d47a252a534
|
[
"MIT"
] | null | null | null |
src/evrythng/entities/action_types.py
|
jwpthng/evrythng-python-sdk
|
42d6ea540c1511a7a75a835fe59d9d47a252a534
|
[
"MIT"
] | null | null | null |
src/evrythng/entities/action_types.py
|
jwpthng/evrythng-python-sdk
|
42d6ea540c1511a7a75a835fe59d9d47a252a534
|
[
"MIT"
] | null | null | null |
"""
Evrything Docs
https://dashboard.evrythng.com/documentation/api/actiontypes
"""
from evrythng import assertions, utils
field_specs = {
'datatypes': {
'name': 'str',
'customFields': 'dict',
'tags': 'dict_of_str',
'scopes': 'dict',
},
'required': ('name',),
'readonly': ('id', 'createdAt', 'updatedAt'),
'writable': ('customFields', 'tags', 'scopes'),
}
def create_action_type(name, customFields=None, tags=None, scopes=None,
api_key=None, request_kwargs=None):
"""Create an Action Type"""
kwargs = locals()
del kwargs['request_kwargs']
api_key = kwargs.pop('api_key', None)
assertions.validate_field_specs(kwargs, field_specs)
return utils.request('POST', '/actions', data=kwargs, api_key=api_key,
**(request_kwargs or {}))
def delete_action_type(name, api_key=None, request_kwargs=None):
"""Delete an Action Type"""
assertions.datatype_str('name', name)
url = '/actions/{}'.format(name)
return utils.request('DELETE', url, api_key=api_key, **(request_kwargs or {}))
def update_action_type(name, customFields=None, tags=None, scopes=None,
api_key=None, request_kwargs=None):
"""Update an Action Type"""
kwargs = locals()
del kwargs['request_kwargs']
api_key = kwargs.pop('api_key', None)
assertions.validate_field_specs(kwargs, field_specs)
url = '/actions/{}'.format(name)
return utils.request('POST', '/actions', data=kwargs, api_key=api_key,
**(request_kwargs or {}))
def list_action_types(api_key=None, request_kwargs=None):
"""List Action Types"""
url = '/actions'
return utils.request('GET', url, api_key=api_key, **(request_kwargs or {}))
| 32.490909
| 82
| 0.632904
|
from evrythng import assertions, utils
field_specs = {
'datatypes': {
'name': 'str',
'customFields': 'dict',
'tags': 'dict_of_str',
'scopes': 'dict',
},
'required': ('name',),
'readonly': ('id', 'createdAt', 'updatedAt'),
'writable': ('customFields', 'tags', 'scopes'),
}
def create_action_type(name, customFields=None, tags=None, scopes=None,
api_key=None, request_kwargs=None):
kwargs = locals()
del kwargs['request_kwargs']
api_key = kwargs.pop('api_key', None)
assertions.validate_field_specs(kwargs, field_specs)
return utils.request('POST', '/actions', data=kwargs, api_key=api_key,
**(request_kwargs or {}))
def delete_action_type(name, api_key=None, request_kwargs=None):
assertions.datatype_str('name', name)
url = '/actions/{}'.format(name)
return utils.request('DELETE', url, api_key=api_key, **(request_kwargs or {}))
def update_action_type(name, customFields=None, tags=None, scopes=None,
api_key=None, request_kwargs=None):
kwargs = locals()
del kwargs['request_kwargs']
api_key = kwargs.pop('api_key', None)
assertions.validate_field_specs(kwargs, field_specs)
url = '/actions/{}'.format(name)
return utils.request('POST', '/actions', data=kwargs, api_key=api_key,
**(request_kwargs or {}))
def list_action_types(api_key=None, request_kwargs=None):
url = '/actions'
return utils.request('GET', url, api_key=api_key, **(request_kwargs or {}))
| true
| true
|
790859aab3f4aac6392a67ec8ce2261a7d4d198d
| 303
|
py
|
Python
|
derbot/names/apps.py
|
bdunnette/derbot-docker
|
b28fd0bf7f078dac1f72024dbd13c233d657c043
|
[
"MIT"
] | null | null | null |
derbot/names/apps.py
|
bdunnette/derbot-docker
|
b28fd0bf7f078dac1f72024dbd13c233d657c043
|
[
"MIT"
] | 11
|
2022-02-21T05:29:51.000Z
|
2022-03-31T05:33:10.000Z
|
derbot/names/apps.py
|
bdunnette/derbot-docker
|
b28fd0bf7f078dac1f72024dbd13c233d657c043
|
[
"MIT"
] | null | null | null |
from asyncio.log import logger
from django.apps import AppConfig
class NamesConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "derbot.names"
def ready(self):
import derbot.names.signals
logger.info(f"Signals loaded: {derbot.names.signals}")
| 21.642857
| 62
| 0.716172
|
from asyncio.log import logger
from django.apps import AppConfig
class NamesConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "derbot.names"
def ready(self):
import derbot.names.signals
logger.info(f"Signals loaded: {derbot.names.signals}")
| true
| true
|
790859b59c2b3dbfc8d52a5272d6eed638a2822b
| 2,815
|
py
|
Python
|
components/collector/src/collectors/azure_devops.py
|
Hedde/quality-time
|
bfbdb2c43d7883abaf54999411aa4ac4897e9043
|
[
"Apache-2.0"
] | null | null | null |
components/collector/src/collectors/azure_devops.py
|
Hedde/quality-time
|
bfbdb2c43d7883abaf54999411aa4ac4897e9043
|
[
"Apache-2.0"
] | null | null | null |
components/collector/src/collectors/azure_devops.py
|
Hedde/quality-time
|
bfbdb2c43d7883abaf54999411aa4ac4897e9043
|
[
"Apache-2.0"
] | null | null | null |
"""Azure Devops Server metric collector."""
from typing import List
import requests
from ..collector import Collector
from ..type import Entities, URL, Value
class AzureDevopsBase(Collector):
"""Base class for Azure DevOps collectors."""
def api_url(self) -> URL:
url = super().api_url()
return URL(f"{url}/_apis/wit/wiql?api-version=4.1")
def get_source_responses(self, api_url: URL) -> List[requests.Response]:
"""Override because we need to do a post request and need to separately get the entities."""
auth = self.basic_auth_credentials()
response = requests.post(
api_url, timeout=self.TIMEOUT, auth=auth, json=dict(query=self.parameters.get("wiql", "")))
ids = ",".join([str(work_item["id"]) for work_item in response.json().get("workItems", [])])
if not ids:
return [response]
work_items_url = URL(f"{super().api_url()}/_apis/wit/workitems?ids={ids}&api-version=4.1")
return [response, requests.get(work_items_url, timeout=self.TIMEOUT, auth=auth)]
def parse_source_responses_entities(self, responses: List[requests.Response]) -> Entities:
if len(responses) < 2:
return [] # We didn't get a response with work items, so assume there are none
return [
dict(
key=str(work_item["id"]), project=work_item["fields"]["System.TeamProject"],
title=work_item["fields"]["System.Title"], work_item_type=work_item["fields"]["System.WorkItemType"],
state=work_item["fields"]["System.State"],
url=work_item["url"]) for work_item in responses[1].json()["value"]]
class AzureDevopsIssues(AzureDevopsBase):
"""Collector to get issues from Azure Devops Server."""
def parse_source_responses_value(self, responses: List[requests.Response]) -> Value:
return str(len(responses[0].json()["workItems"]))
class AzureDevopsReadyUserStoryPoints(AzureDevopsBase):
"""Collector to get ready user story points from Azure Devops Server."""
def parse_source_responses_value(self, responses: List[requests.Response]) -> Value:
return str(round(sum(
[work_item["fields"].get("Microsoft.VSTS.Scheduling.StoryPoints", 0)
for work_item in responses[1].json()["value"]]))) if len(responses) > 1 else "0"
def parse_source_responses_entities(self, responses: List[requests.Response]) -> Entities:
entities = super().parse_source_responses_entities(responses)
# Add story points to the entities:
if len(responses) > 1:
for entity, work_item in zip(entities, responses[1].json()["value"]):
entity["story_points"] = work_item["fields"].get("Microsoft.VSTS.Scheduling.StoryPoints")
return entities
| 45.403226
| 117
| 0.662522
|
from typing import List
import requests
from ..collector import Collector
from ..type import Entities, URL, Value
class AzureDevopsBase(Collector):
def api_url(self) -> URL:
url = super().api_url()
return URL(f"{url}/_apis/wit/wiql?api-version=4.1")
def get_source_responses(self, api_url: URL) -> List[requests.Response]:
auth = self.basic_auth_credentials()
response = requests.post(
api_url, timeout=self.TIMEOUT, auth=auth, json=dict(query=self.parameters.get("wiql", "")))
ids = ",".join([str(work_item["id"]) for work_item in response.json().get("workItems", [])])
if not ids:
return [response]
work_items_url = URL(f"{super().api_url()}/_apis/wit/workitems?ids={ids}&api-version=4.1")
return [response, requests.get(work_items_url, timeout=self.TIMEOUT, auth=auth)]
def parse_source_responses_entities(self, responses: List[requests.Response]) -> Entities:
if len(responses) < 2:
return []
return [
dict(
key=str(work_item["id"]), project=work_item["fields"]["System.TeamProject"],
title=work_item["fields"]["System.Title"], work_item_type=work_item["fields"]["System.WorkItemType"],
state=work_item["fields"]["System.State"],
url=work_item["url"]) for work_item in responses[1].json()["value"]]
class AzureDevopsIssues(AzureDevopsBase):
def parse_source_responses_value(self, responses: List[requests.Response]) -> Value:
return str(len(responses[0].json()["workItems"]))
class AzureDevopsReadyUserStoryPoints(AzureDevopsBase):
def parse_source_responses_value(self, responses: List[requests.Response]) -> Value:
return str(round(sum(
[work_item["fields"].get("Microsoft.VSTS.Scheduling.StoryPoints", 0)
for work_item in responses[1].json()["value"]]))) if len(responses) > 1 else "0"
def parse_source_responses_entities(self, responses: List[requests.Response]) -> Entities:
entities = super().parse_source_responses_entities(responses)
# Add story points to the entities:
if len(responses) > 1:
for entity, work_item in zip(entities, responses[1].json()["value"]):
entity["story_points"] = work_item["fields"].get("Microsoft.VSTS.Scheduling.StoryPoints")
return entities
| true
| true
|
79085a6c06f94f9781c1a341cbcc3d429b30a260
| 17,381
|
py
|
Python
|
docs/examples/Moving_Platform_Simulation.py
|
Red-Portal/Stone-Soup-1
|
267621c86161a839da9b144c2745d28d9166d903
|
[
"MIT"
] | 157
|
2019-04-14T20:43:11.000Z
|
2022-03-30T08:30:33.000Z
|
docs/examples/Moving_Platform_Simulation.py
|
Red-Portal/Stone-Soup-1
|
267621c86161a839da9b144c2745d28d9166d903
|
[
"MIT"
] | 364
|
2019-04-18T15:54:49.000Z
|
2022-03-31T09:50:02.000Z
|
docs/examples/Moving_Platform_Simulation.py
|
Red-Portal/Stone-Soup-1
|
267621c86161a839da9b144c2745d28d9166d903
|
[
"MIT"
] | 86
|
2019-04-20T02:01:18.000Z
|
2022-03-28T01:03:11.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
Multi-Sensor Moving Platform Simulation Example
===============================================
This example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving
platform as a sensor target.
"""
# %%
# Building a Simulated Multi-Sensor Moving Platform
# -------------------------------------------------
# The focus of this example is to show how to setup and configure a simulation environment in order to provide a
# multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information
# about trackers and how to configure them review of the tutorials and demonstrations is recommended.
#
# This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and
# :class:`~.Sensor` objects.
#
# In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects.
# As these have been introduced in previous tutorials they are imported upfront. New functionality within this example
# will be imported at the relevant point in order to draw attention to the new features.
# Some general imports and set up
from datetime import datetime
from datetime import timedelta
from matplotlib import pyplot as plt
import numpy as np
# Stone Soup imports:
from stonesoup.types.state import State, GaussianState
from stonesoup.types.array import StateVector
from stonesoup.types.array import CovarianceMatrix
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.predictor.particle import ParticlePredictor
from stonesoup.resampler.particle import SystematicResampler
from stonesoup.updater.particle import ParticleUpdater
from stonesoup.measures import Mahalanobis
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.tracker.simple import SingleTargetTracker
# Define the simulation start time
start_time = datetime.now()
# %%
# Create a multi-sensor platform
# ------------------------------
# We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a
# :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a
# :class:`~.MultiTargetGroundTruthSimulator`.
#
# In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within
# a 6 dimensional state space according to the following :math:`\mathbf{x}`.
#
# .. math::
# \mathbf{x} = \begin{bmatrix}
# x\\ \dot{x}\\ y\\ \dot{y}\\ z\\ \dot{z} \end{bmatrix}
# = \begin{bmatrix}
# 0\\ 0\\ 0\\ 50\\ 8000\\ 0 \end{bmatrix}
#
# The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise.
# Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by:
#
# .. math::
# F_{k} = \begin{bmatrix}
# 1 & \triangle k & 0 & 0 & 0 & 0\\
# 0 & 1 & 0 & 0 & 0 & 0\\
# 0 & 0 & 1 & \triangle k & 0 & 0\\
# 0 & 0 & 0 & 1 & 0 & 0\\
# 0 & 0 & 0 & 0 & 1 & \triangle k \\
# 0 & 0 & 0 & 0 & 0 & 1\\
# \end{bmatrix}
# First import the Moving platform
from stonesoup.platform.base import MovingPlatform
# Define the initial platform position, in this case the origin
initial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])
initial_state = State(initial_loc, start_time)
# Define transition model and position for 3D platform
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# create our fixed platform
sensor_platform = MovingPlatform(states=initial_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
transition_model=transition_model)
# %%
# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this
# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor
# (e.g. an optical sensor, which has no capability to directly measure range).
#
# First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range
# (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform.
# Import a range rate bearing elevation capable radar
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate
# Create a radar sensor
radar_noise_covar = CovarianceMatrix(np.diag(
np.array([np.deg2rad(3), # Elevation
np.deg2rad(3), # Bearing
100., # Range
25.]))) # Range Rate
# radar mountings
radar_mounting_offsets = StateVector([10, 0, 0]) # e.g. nose cone
radar_rotation_offsets = StateVector([0, 0, 0])
# Mount the radar onto the platform
radar = RadarElevationBearingRangeRate(ndim_state=6,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
noise_covar=radar_noise_covar,
mounting_offset=radar_mounting_offsets,
rotation_offset=radar_rotation_offsets,
)
sensor_platform.add_sensor(radar)
# %%
# Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\phi`) and elevation (:math:`\theta`)
# of the target platform. For the purposes of this example we will assume that the passive sensor is an imager.
# The imager sensor model is described by the following equations:
#
# .. math::
# \mathbf{z}_k = h(\mathbf{x}_k, \dot{\mathbf{x}}_k)
#
# where:
#
# * :math:`\mathbf{z}_k` is a measurement vector of the form:
#
# .. math::
# \mathbf{z}_k = \begin{bmatrix} \theta \\ \phi \end{bmatrix}
#
# * :math:`h` is a non - linear model function of the form:
#
# .. math::
# h(\mathbf{x}_k,\dot{\mathbf{x}}_k) = \begin{bmatrix}
# \arcsin(\mathcal{z} /\sqrt{\mathcal{x} ^ 2 + \mathcal{y} ^ 2 +\mathcal{z} ^ 2}) \\
# \arctan(\mathcal{y},\mathcal{x}) \ \
# \end{bmatrix} + \dot{\mathbf{x}}_k
#
# * :math:`\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.:
#
# .. math::
# \mathbf{z}_k \sim \mathcal{N}(0, R)
#
# .. math::
# R = \begin{bmatrix}
# \sigma_{\theta}^2 & 0 \\
# 0 & \sigma_{\phi}^2 \\
# \end{bmatrix}
# Import a passive sensor capability
from stonesoup.sensor.passive import PassiveElevationBearing
imager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05), # Elevation
np.deg2rad(0.05)]))) # Bearing
# imager mounting offset
imager_mounting_offsets = StateVector([0, 8, -1]) # e.g. wing mounted imaging pod
imager_rotation_offsets = StateVector([0, 0, 0])
# Mount the imager onto the platform
imager = PassiveElevationBearing(ndim_state=6,
mapping=(0, 2, 4),
noise_covar=imager_noise_covar,
mounting_offset=imager_mounting_offsets,
rotation_offset=imager_rotation_offsets,
)
sensor_platform.add_sensor(imager)
# %%
# Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter.
# The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset
# of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the
# platform.
#
# Now that we have mounted the two sensors we can see that the platform object has both associated with it:
sensor_platform.sensors
# %%
# Create a Target Platform
# ------------------------
# There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator
# functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on
# our selected parameters. The second method provides a means to generate a target which will perform specific
# behaviours, this is the approach we will take here.
#
# In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as
# sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform
# to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop
# over the transition sequence provided until the simulation ends.
#
# When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as
# potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms
# within the simulation (sensor-target geometry dependant).
#
# For this example we will create an air target which will fly a sequence of straight and level followed by a
# coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8
# seconds, and it will turn through 45 degrees over the course of the turn manoeuvre.
# Import a Constant Turn model to enable target to perform basic manoeuvre
from stonesoup.models.transition.linear import ConstantTurn
straight_level = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# Configure the aircraft turn behaviour
turn_noise_diff_coeffs = np.array([0., 0.])
turn_rate = np.pi/32 # specified in radians per seconds...
turn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)
# Configure turn model to maintain current altitude
turning = CombinedLinearGaussianTransitionModel(
[turn_model, ConstantVelocity(0.)])
manoeuvre_list = [straight_level, turning]
manoeuvre_times = [timedelta(seconds=8),
timedelta(seconds=8)]
# %%
# Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving
# platform. Because we intend for this platform to be a target we do not need to attach any sensors to it.
# Import a multi-transition moving platform
from stonesoup.platform.base import MultiTransitionMovingPlatform
initial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])
initial_target_state = State(initial_target_location, start_time)
target = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,
transition_times=manoeuvre_times,
states=initial_target_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
sensors=None)
# %%
# Creating the simulator
# ----------------------
# Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do
# not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a
# :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then
# feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built.
# Import the required simulators
from stonesoup.simulator.simple import DummyGroundTruthSimulator
from stonesoup.simulator.platform import PlatformDetectionSimulator
# %%
# We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for
# 25 seconds.
times = np.arange(0, 24, 1) # 25 seconds
timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]
truths = DummyGroundTruthSimulator(times=timestamps)
sim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])
# %%
# Create a Tracker
# ------------------------------------
# Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we
# will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example
# we will use an inflated constant noise model to account for target motion uncertainty.
#
# Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to
# each detection they generate. The tracker handles this internally by checking for a measurement model with each
# detection it receives and applying only the relevant measurement model.
target_transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])
# First add a Particle Predictor
predictor = ParticlePredictor(target_transition_model)
# Now create a resampler and particle updater
resampler = SystematicResampler()
updater = ParticleUpdater(measurement_model=None,
resampler=resampler)
# Create a particle initiator
from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator
single_point_initiator = SinglePointInitiator(
GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),
None)
initiator = GaussianParticleInitiator(number_particles=500,
initiator=single_point_initiator)
hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)
data_associator = GNNWith2DAssignment(hypothesiser)
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(time_steps_since_update=10)
# Create a Kalman single-target tracker
tracker = SingleTargetTracker(
initiator=initiator,
deleter=deleter,
detector=sim,
data_associator=data_associator,
updater=updater
)
# %%
# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing
# only sensor it does not make sense to plot out the detections without animating the resulting plot. This
# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target
# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are
# coloured green.
from matplotlib import animation
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate
from stonesoup.functions import sphere2cart
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
frames = []
for time, ctracks in tracker:
artists = []
ax.set_xlabel("$East$")
ax.set_ylabel("$North$")
ax.set_ylim(0, 2250)
ax.set_xlim(-1000, 1000)
X = [state.state_vector[0] for state in sensor_platform]
Y = [state.state_vector[2] for state in sensor_platform]
artists.extend(ax.plot(X, Y, color='b'))
for detection in sim.detections:
if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):
x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]
color = 'y'
else:
r = 10000000
# extract the platform rotation offsets
_, el_offset, az_offset = sensor_platform.orientation
# obtain measurement angles and map to cartesian
e, a = detection.state_vector
x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)
color = 'g'
X = [sensor_platform.state_vector[0], x]
Y = [sensor_platform.state_vector[2], y]
artists.extend(ax.plot(X, Y, color=color))
X = [state.state_vector[0] for state in target]
Y = [state.state_vector[2] for state in target]
artists.extend(ax.plot(X, Y, color='r'))
for track in ctracks:
X = [state.state_vector[0] for state in track]
Y = [state.state_vector[2] for state in track]
artists.extend(ax.plot(X, Y, color='k'))
frames.append(artists)
animation.ArtistAnimation(fig, frames)
# %%
# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly
# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels
# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.
# %%
# Key points
# ----------
# 1. Platforms, static or moving, can be used as targets for sensor platforms.
# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.
# 3. A tracker can be configured to exploit all sensor data created in a simulation.
| 44.452685
| 120
| 0.693976
|
from datetime import datetime
from datetime import timedelta
from matplotlib import pyplot as plt
import numpy as np
from stonesoup.types.state import State, GaussianState
from stonesoup.types.array import StateVector
from stonesoup.types.array import CovarianceMatrix
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.predictor.particle import ParticlePredictor
from stonesoup.resampler.particle import SystematicResampler
from stonesoup.updater.particle import ParticleUpdater
from stonesoup.measures import Mahalanobis
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.tracker.simple import SingleTargetTracker
start_time = datetime.now()
from stonesoup.platform.base import MovingPlatform
initial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])
initial_state = State(initial_loc, start_time)
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
sensor_platform = MovingPlatform(states=initial_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
transition_model=transition_model)
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate
radar_noise_covar = CovarianceMatrix(np.diag(
np.array([np.deg2rad(3),
np.deg2rad(3),
100.,
25.])))
radar_mounting_offsets = StateVector([10, 0, 0])
radar_rotation_offsets = StateVector([0, 0, 0])
radar = RadarElevationBearingRangeRate(ndim_state=6,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
noise_covar=radar_noise_covar,
mounting_offset=radar_mounting_offsets,
rotation_offset=radar_rotation_offsets,
)
sensor_platform.add_sensor(radar)
from stonesoup.sensor.passive import PassiveElevationBearing
imager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05),
np.deg2rad(0.05)])))
imager_mounting_offsets = StateVector([0, 8, -1])
imager_rotation_offsets = StateVector([0, 0, 0])
imager = PassiveElevationBearing(ndim_state=6,
mapping=(0, 2, 4),
noise_covar=imager_noise_covar,
mounting_offset=imager_mounting_offsets,
rotation_offset=imager_rotation_offsets,
)
sensor_platform.add_sensor(imager)
sensor_platform.sensors
from stonesoup.models.transition.linear import ConstantTurn
straight_level = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
turn_noise_diff_coeffs = np.array([0., 0.])
turn_rate = np.pi/32
turn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)
turning = CombinedLinearGaussianTransitionModel(
[turn_model, ConstantVelocity(0.)])
manoeuvre_list = [straight_level, turning]
manoeuvre_times = [timedelta(seconds=8),
timedelta(seconds=8)]
from stonesoup.platform.base import MultiTransitionMovingPlatform
initial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])
initial_target_state = State(initial_target_location, start_time)
target = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,
transition_times=manoeuvre_times,
states=initial_target_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
sensors=None)
from stonesoup.simulator.simple import DummyGroundTruthSimulator
from stonesoup.simulator.platform import PlatformDetectionSimulator
times = np.arange(0, 24, 1)
timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]
truths = DummyGroundTruthSimulator(times=timestamps)
sim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])
# each detection they generate. The tracker handles this internally by checking for a measurement model with each
# detection it receives and applying only the relevant measurement model.
target_transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])
# First add a Particle Predictor
predictor = ParticlePredictor(target_transition_model)
# Now create a resampler and particle updater
resampler = SystematicResampler()
updater = ParticleUpdater(measurement_model=None,
resampler=resampler)
# Create a particle initiator
from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator
single_point_initiator = SinglePointInitiator(
GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),
None)
initiator = GaussianParticleInitiator(number_particles=500,
initiator=single_point_initiator)
hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)
data_associator = GNNWith2DAssignment(hypothesiser)
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(time_steps_since_update=10)
# Create a Kalman single-target tracker
tracker = SingleTargetTracker(
initiator=initiator,
deleter=deleter,
detector=sim,
data_associator=data_associator,
updater=updater
)
# %%
# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing
# only sensor it does not make sense to plot out the detections without animating the resulting plot. This
# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target
# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are
# coloured green.
from matplotlib import animation
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate
from stonesoup.functions import sphere2cart
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
frames = []
for time, ctracks in tracker:
artists = []
ax.set_xlabel("$East$")
ax.set_ylabel("$North$")
ax.set_ylim(0, 2250)
ax.set_xlim(-1000, 1000)
X = [state.state_vector[0] for state in sensor_platform]
Y = [state.state_vector[2] for state in sensor_platform]
artists.extend(ax.plot(X, Y, color='b'))
for detection in sim.detections:
if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):
x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]
color = 'y'
else:
r = 10000000
# extract the platform rotation offsets
_, el_offset, az_offset = sensor_platform.orientation
# obtain measurement angles and map to cartesian
e, a = detection.state_vector
x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)
color = 'g'
X = [sensor_platform.state_vector[0], x]
Y = [sensor_platform.state_vector[2], y]
artists.extend(ax.plot(X, Y, color=color))
X = [state.state_vector[0] for state in target]
Y = [state.state_vector[2] for state in target]
artists.extend(ax.plot(X, Y, color='r'))
for track in ctracks:
X = [state.state_vector[0] for state in track]
Y = [state.state_vector[2] for state in track]
artists.extend(ax.plot(X, Y, color='k'))
frames.append(artists)
animation.ArtistAnimation(fig, frames)
# %%
# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly
# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels
# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.
# %%
# Key points
# ----------
# 1. Platforms, static or moving, can be used as targets for sensor platforms.
# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.
# 3. A tracker can be configured to exploit all sensor data created in a simulation.
| true
| true
|
79085aa28cf71f40671b37a9542ed50040078c69
| 7,070
|
py
|
Python
|
lib/model/rpn/proposal_layer.py
|
jinyu121/CIOD
|
37ab2ce14635c4b5cef2ea43b8439c5cd0e0f662
|
[
"MIT"
] | 33
|
2019-07-09T07:14:40.000Z
|
2022-02-17T03:00:36.000Z
|
lib/model/rpn/proposal_layer.py
|
busyboxs/pytorch-faster-rcnn
|
97cea8fce0de3d1d552de1ad9b941e85f2920efa
|
[
"MIT"
] | 7
|
2020-01-10T16:37:12.000Z
|
2021-11-26T02:02:13.000Z
|
lib/model/rpn/proposal_layer.py
|
busyboxs/pytorch-faster-rcnn
|
97cea8fce0de3d1d552de1ad9b941e85f2920efa
|
[
"MIT"
] | 7
|
2019-07-18T02:27:44.000Z
|
2020-04-28T09:41:27.000Z
|
from __future__ import absolute_import
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import math
import yaml
from model.utils.config import cfg
from .generate_anchors import generate_anchors
from .bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch
from model.nms.nms_wrapper import nms
import pdb
DEBUG = False
class _ProposalLayer(nn.Module):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def __init__(self, feat_stride, scales, ratios):
super(_ProposalLayer, self).__init__()
self._feat_stride = feat_stride
self._anchors = torch.from_numpy(generate_anchors(scales=np.array(scales),
ratios=np.array(ratios))).float()
self._num_anchors = self._anchors.size(0)
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
# top[0].reshape(1, 5)
#
# # scores blob: holds scores for R regions of interest
# if len(top) > 1:
# top[1].reshape(1, 1, 1, 1)
def forward(self, input):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs
scores = input[0][:, self._num_anchors:, :, :]
bbox_deltas = input[1]
im_info = input[2]
cfg_key = input[3]
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
batch_size = bbox_deltas.size(0)
feat_height, feat_width = scores.size(2), scores.size(3)
shift_x = np.arange(0, feat_width) * self._feat_stride
shift_y = np.arange(0, feat_height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose())
shifts = shifts.contiguous().type_as(scores).float()
A = self._num_anchors
K = shifts.size(0)
self._anchors = self._anchors.type_as(scores)
# anchors = self._anchors.view(1, A, 4) + shifts.view(1, K, 4).permute(1, 0, 2).contiguous()
anchors = self._anchors.view(1, A, 4) + shifts.view(K, 1, 4)
anchors = anchors.view(1, K * A, 4).expand(batch_size, K * A, 4)
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
bbox_deltas = bbox_deltas.permute(0, 2, 3, 1).contiguous()
bbox_deltas = bbox_deltas.view(batch_size, -1, 4)
# Same story for the scores:
scores = scores.permute(0, 2, 3, 1).contiguous()
scores = scores.view(batch_size, -1)
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, bbox_deltas, batch_size)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info, batch_size)
# proposals = clip_boxes_batch(proposals, im_info, batch_size)
# assign the score to 0 if it's non keep.
# keep = self._filter_boxes(proposals, min_size * im_info[:, 2])
# trim keep index to make it euqal over batch
# keep_idx = torch.cat(tuple(keep_idx), 0)
# scores_keep = scores.view(-1)[keep_idx].view(batch_size, trim_size)
# proposals_keep = proposals.view(-1, 4)[keep_idx, :].contiguous().view(batch_size, trim_size, 4)
# _, order = torch.sort(scores_keep, 1, True)
scores_keep = scores
proposals_keep = proposals
_, order = torch.sort(scores_keep, 1, True)
output = scores.new(batch_size, post_nms_topN, 5).zero_()
for i in range(batch_size):
# # 3. remove predicted boxes with either height or width < threshold
# # (NOTE: convert min_size to input image scale stored in im_info[2])
proposals_single = proposals_keep[i]
scores_single = scores_keep[i]
# # 4. sort all (proposal, score) pairs by score from highest to lowest
# # 5. take top pre_nms_topN (e.g. 6000)
order_single = order[i]
if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():
order_single = order_single[:pre_nms_topN]
proposals_single = proposals_single[order_single, :]
scores_single = scores_single[order_single].view(-1, 1)
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep_idx_i = nms(torch.cat((proposals_single, scores_single), 1), nms_thresh, force_cpu=not cfg.USE_GPU_NMS)
keep_idx_i = keep_idx_i.long().view(-1)
if post_nms_topN > 0:
keep_idx_i = keep_idx_i[:post_nms_topN]
proposals_single = proposals_single[keep_idx_i, :]
scores_single = scores_single[keep_idx_i, :]
# padding 0 at the end.
num_proposal = proposals_single.size(0)
output[i, :, 0] = i
output[i, :num_proposal, 1:] = proposals_single
return output
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _filter_boxes(self, boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, :, 2] - boxes[:, :, 0] + 1
hs = boxes[:, :, 3] - boxes[:, :, 1] + 1
keep = ((ws >= min_size.view(-1, 1).expand_as(ws)) & (hs >= min_size.view(-1, 1).expand_as(hs)))
return keep
| 39.943503
| 120
| 0.603112
|
from __future__ import absolute_import
import torch
import torch.nn as nn
import numpy as np
import math
import yaml
from model.utils.config import cfg
from .generate_anchors import generate_anchors
from .bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch
from model.nms.nms_wrapper import nms
import pdb
DEBUG = False
class _ProposalLayer(nn.Module):
def __init__(self, feat_stride, scales, ratios):
super(_ProposalLayer, self).__init__()
self._feat_stride = feat_stride
self._anchors = torch.from_numpy(generate_anchors(scales=np.array(scales),
ratios=np.array(ratios))).float()
self._num_anchors = self._anchors.size(0)
scores = input[0][:, self._num_anchors:, :, :]
bbox_deltas = input[1]
im_info = input[2]
cfg_key = input[3]
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
batch_size = bbox_deltas.size(0)
feat_height, feat_width = scores.size(2), scores.size(3)
shift_x = np.arange(0, feat_width) * self._feat_stride
shift_y = np.arange(0, feat_height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose())
shifts = shifts.contiguous().type_as(scores).float()
A = self._num_anchors
K = shifts.size(0)
self._anchors = self._anchors.type_as(scores)
anchors = self._anchors.view(1, A, 4) + shifts.view(K, 1, 4)
anchors = anchors.view(1, K * A, 4).expand(batch_size, K * A, 4)
bbox_deltas = bbox_deltas.permute(0, 2, 3, 1).contiguous()
bbox_deltas = bbox_deltas.view(batch_size, -1, 4)
scores = scores.permute(0, 2, 3, 1).contiguous()
scores = scores.view(batch_size, -1)
proposals = bbox_transform_inv(anchors, bbox_deltas, batch_size)
proposals = clip_boxes(proposals, im_info, batch_size)
# keep = self._filter_boxes(proposals, min_size * im_info[:, 2])
# trim keep index to make it euqal over batch
# keep_idx = torch.cat(tuple(keep_idx), 0)
# scores_keep = scores.view(-1)[keep_idx].view(batch_size, trim_size)
# proposals_keep = proposals.view(-1, 4)[keep_idx, :].contiguous().view(batch_size, trim_size, 4)
# _, order = torch.sort(scores_keep, 1, True)
scores_keep = scores
proposals_keep = proposals
_, order = torch.sort(scores_keep, 1, True)
output = scores.new(batch_size, post_nms_topN, 5).zero_()
for i in range(batch_size):
# # 3. remove predicted boxes with either height or width < threshold
# # (NOTE: convert min_size to input image scale stored in im_info[2])
proposals_single = proposals_keep[i]
scores_single = scores_keep[i]
# # 4. sort all (proposal, score) pairs by score from highest to lowest
# # 5. take top pre_nms_topN (e.g. 6000)
order_single = order[i]
if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():
order_single = order_single[:pre_nms_topN]
proposals_single = proposals_single[order_single, :]
scores_single = scores_single[order_single].view(-1, 1)
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep_idx_i = nms(torch.cat((proposals_single, scores_single), 1), nms_thresh, force_cpu=not cfg.USE_GPU_NMS)
keep_idx_i = keep_idx_i.long().view(-1)
if post_nms_topN > 0:
keep_idx_i = keep_idx_i[:post_nms_topN]
proposals_single = proposals_single[keep_idx_i, :]
scores_single = scores_single[keep_idx_i, :]
# padding 0 at the end.
num_proposal = proposals_single.size(0)
output[i, :, 0] = i
output[i, :num_proposal, 1:] = proposals_single
return output
def backward(self, top, propagate_down, bottom):
pass
def reshape(self, bottom, top):
pass
def _filter_boxes(self, boxes, min_size):
ws = boxes[:, :, 2] - boxes[:, :, 0] + 1
hs = boxes[:, :, 3] - boxes[:, :, 1] + 1
keep = ((ws >= min_size.view(-1, 1).expand_as(ws)) & (hs >= min_size.view(-1, 1).expand_as(hs)))
return keep
| true
| true
|
79085b5df8fb20ad6f1d7eee0d3aab21a34df364
| 87,196
|
py
|
Python
|
src/sage/matrix/matrix_space.py
|
kliem/sage-test-27122
|
cc60cfebc4576fed8b01f0fc487271bdee3cefed
|
[
"BSL-1.0"
] | null | null | null |
src/sage/matrix/matrix_space.py
|
kliem/sage-test-27122
|
cc60cfebc4576fed8b01f0fc487271bdee3cefed
|
[
"BSL-1.0"
] | 2
|
2021-04-02T20:43:29.000Z
|
2021-04-05T23:38:58.000Z
|
src/sage/matrix/matrix_space.py
|
kliem/sage-test-27122
|
cc60cfebc4576fed8b01f0fc487271bdee3cefed
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:29:56.000Z
|
2020-07-23T10:29:56.000Z
|
r"""
Matrix Spaces
You can create any space `\text{Mat}_{n\times m}(R)` of
either dense or sparse matrices with given number of rows and
columns over any commutative or noncommutative ring.
EXAMPLES::
sage: MS = MatrixSpace(QQ,6,6,sparse=True); MS
Full MatrixSpace of 6 by 6 sparse matrices over Rational Field
sage: MS.base_ring()
Rational Field
sage: MS = MatrixSpace(ZZ,3,5,sparse=False); MS
Full MatrixSpace of 3 by 5 dense matrices over Integer Ring
TESTS::
sage: matrix(RR,2,2,sparse=True)
[0.000000000000000 0.000000000000000]
[0.000000000000000 0.000000000000000]
sage: matrix(GF(11),2,2,sparse=True)
[0 0]
[0 0]
"""
# ****************************************************************************
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
# System imports
import sys
import operator
# Sage matrix imports
from . import matrix_generic_dense
from . import matrix_generic_sparse
# Sage imports
import sage.structure.coerce
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
import sage.rings.integer as integer
import sage.rings.finite_rings.finite_field_constructor
import sage.misc.latex as latex
import sage.modules.free_module
from sage.misc.lazy_attribute import lazy_attribute
from sage.categories.rings import Rings
from sage.categories.fields import Fields
from sage.categories.enumerated_sets import EnumeratedSets
from sage.misc.lazy_import import lazy_import
from sage.features import PythonModule
lazy_import('sage.matrix.matrix_gfpn_dense', ['Matrix_gfpn_dense'],
feature=PythonModule('sage.matrix.matrix_gfpn_dense', spkg='meataxe'))
_Rings = Rings()
_Fields = Fields()
def is_MatrixSpace(x):
"""
Return whether ``self`` is an instance of ``MatrixSpace``.
EXAMPLES::
sage: from sage.matrix.matrix_space import is_MatrixSpace
sage: MS = MatrixSpace(QQ,2)
sage: A = MS.random_element()
sage: is_MatrixSpace(MS)
True
sage: is_MatrixSpace(A)
False
sage: is_MatrixSpace(5)
False
"""
return isinstance(x, MatrixSpace)
def get_matrix_class(R, nrows, ncols, sparse, implementation):
r"""
Return a matrix class according to the input.
.. NOTE::
This returns the base class without the category.
INPUT:
- ``R`` -- a base ring
- ``nrows`` -- number of rows
- ``ncols`` -- number of columns
- ``sparse`` -- (boolean) whether the matrix class should be sparse
- ``implementation`` -- (``None`` or string or a matrix class) a possible
implementation. See the documentation of the constructor of :class:`MatrixSpace`.
EXAMPLES::
sage: from sage.matrix.matrix_space import get_matrix_class
sage: get_matrix_class(ZZ, 4, 5, False, None)
<class 'sage.matrix.matrix_integer_dense.Matrix_integer_dense'>
sage: get_matrix_class(ZZ, 4, 5, True, None)
<class 'sage.matrix.matrix_integer_sparse.Matrix_integer_sparse'>
sage: get_matrix_class(ZZ, 3, 3, False, 'flint')
<class 'sage.matrix.matrix_integer_dense.Matrix_integer_dense'>
sage: get_matrix_class(ZZ, 3, 3, False, 'gap')
<class 'sage.matrix.matrix_gap.Matrix_gap'>
sage: get_matrix_class(ZZ, 3, 3, False, 'generic')
<class 'sage.matrix.matrix_generic_dense.Matrix_generic_dense'>
sage: get_matrix_class(GF(2^15), 3, 3, False, None)
<class 'sage.matrix.matrix_gf2e_dense.Matrix_gf2e_dense'>
sage: get_matrix_class(GF(2^17), 3, 3, False, None)
<class 'sage.matrix.matrix_generic_dense.Matrix_generic_dense'>
sage: get_matrix_class(GF(2), 2, 2, False, 'm4ri')
<class 'sage.matrix.matrix_mod2_dense.Matrix_mod2_dense'>
sage: get_matrix_class(GF(4), 2, 2, False, 'm4ri')
<class 'sage.matrix.matrix_gf2e_dense.Matrix_gf2e_dense'>
sage: get_matrix_class(GF(7), 2, 2, False, 'linbox-float')
<class 'sage.matrix.matrix_modn_dense_float.Matrix_modn_dense_float'>
sage: get_matrix_class(GF(7), 2, 2, False, 'linbox-double')
<class 'sage.matrix.matrix_modn_dense_double.Matrix_modn_dense_double'>
sage: get_matrix_class(RDF, 2, 2, False, 'numpy')
<class 'sage.matrix.matrix_real_double_dense.Matrix_real_double_dense'>
sage: get_matrix_class(CDF, 2, 3, False, 'numpy')
<class 'sage.matrix.matrix_complex_double_dense.Matrix_complex_double_dense'>
sage: get_matrix_class(GF(25,'x'), 4, 4, False, 'meataxe') # optional: meataxe
<class 'sage.matrix.matrix_gfpn_dense.Matrix_gfpn_dense'>
sage: get_matrix_class(IntegerModRing(3), 4, 4, False, 'meataxe') # optional: meataxe
<class 'sage.matrix.matrix_gfpn_dense.Matrix_gfpn_dense'>
sage: get_matrix_class(IntegerModRing(4), 4, 4, False, 'meataxe')
Traceback (most recent call last):
...
ValueError: 'meataxe' matrix can only deal with finite fields of order < 256
sage: get_matrix_class(GF(next_prime(255)), 4, 4, False, 'meataxe')
Traceback (most recent call last):
...
ValueError: 'meataxe' matrix can only deal with finite fields of order < 256
sage: get_matrix_class(ZZ, 3, 5, False, 'crazy_matrix')
Traceback (most recent call last):
...
ValueError: unknown matrix implementation 'crazy_matrix' over Integer Ring
sage: get_matrix_class(GF(3), 2, 2, False, 'm4ri')
Traceback (most recent call last):
...
ValueError: 'm4ri' matrices are only available for fields of characteristic 2 and order <= 65536
sage: get_matrix_class(Zmod(2**30), 2, 2, False, 'linbox-float')
Traceback (most recent call last):
...
ValueError: 'linbox-float' matrices can only deal with order < 256
sage: get_matrix_class(Zmod(2**30), 2, 2, False, 'linbox-double')
Traceback (most recent call last):
...
ValueError: 'linbox-double' matrices can only deal with order < 8388608
sage: type(matrix(SR, 2, 2, 0))
<class 'sage.matrix.matrix_symbolic_dense.Matrix_symbolic_dense'>
sage: type(matrix(GF(7), 2, range(4)))
<class 'sage.matrix.matrix_modn_dense_float.Matrix_modn_dense_float'>
sage: type(matrix(GF(16007), 2, range(4)))
<class 'sage.matrix.matrix_modn_dense_double.Matrix_modn_dense_double'>
sage: type(matrix(CBF, 2, range(4)))
<class 'sage.matrix.matrix_complex_ball_dense.Matrix_complex_ball_dense'>
sage: type(matrix(GF(2), 2, range(4)))
<class 'sage.matrix.matrix_mod2_dense.Matrix_mod2_dense'>
sage: type(matrix(GF(64,'z'), 2, range(4)))
<class 'sage.matrix.matrix_gf2e_dense.Matrix_gf2e_dense'>
sage: type(matrix(GF(125,'z'), 2, range(4))) # optional: meataxe
<class 'sage.matrix.matrix_gfpn_dense.Matrix_gfpn_dense'>
"""
if isinstance(implementation, type):
return implementation
if not sparse:
if implementation is None:
# Choose default implementation:
if R is sage.rings.integer_ring.ZZ:
try:
from . import matrix_integer_dense
except ImportError:
pass
else:
return matrix_integer_dense.Matrix_integer_dense
elif R is sage.rings.rational_field.QQ:
try:
from . import matrix_rational_dense
except ImportError:
pass
else:
return matrix_rational_dense.Matrix_rational_dense
elif isinstance(R, sage.rings.abc.RealDoubleField):
try:
from . import matrix_real_double_dense
except ImportError:
pass
else:
return matrix_real_double_dense.Matrix_real_double_dense
elif isinstance(R, sage.rings.abc.ComplexDoubleField):
if implementation is None or implementation == 'numpy':
try:
from . import matrix_complex_double_dense
except ImportError:
pass
else:
return matrix_complex_double_dense.Matrix_complex_double_dense
elif sage.rings.finite_rings.finite_field_constructor.is_FiniteField(R):
if R.order() == 2:
try:
from . import matrix_mod2_dense
except ImportError:
pass
else:
return matrix_mod2_dense.Matrix_mod2_dense
if R.characteristic() == 2 and R.order() <= 65536: # 65536 == 2^16
try:
from . import matrix_gf2e_dense
except ImportError:
pass
else:
return matrix_gf2e_dense.Matrix_gf2e_dense
if (not R.is_prime_field()) and R.order() < 256:
try:
from . import matrix_gfpn_dense
return matrix_gfpn_dense.Matrix_gfpn_dense
except ImportError:
pass
if isinstance(R, sage.rings.abc.IntegerModRing):
from . import matrix_modn_dense_double, matrix_modn_dense_float
if R.order() < matrix_modn_dense_float.MAX_MODULUS:
return matrix_modn_dense_float.Matrix_modn_dense_float
if R.order() < matrix_modn_dense_double.MAX_MODULUS:
return matrix_modn_dense_double.Matrix_modn_dense_double
if isinstance(R, sage.rings.abc.NumberField_cyclotomic):
from . import matrix_cyclo_dense
return matrix_cyclo_dense.Matrix_cyclo_dense
try:
from sage.symbolic.ring import SR
except ImportError:
pass
else:
if R is SR:
try:
from . import matrix_symbolic_dense
except ImportError:
pass
else:
return matrix_symbolic_dense.Matrix_symbolic_dense
if isinstance(R, sage.rings.abc.ComplexBallField):
try:
from . import matrix_complex_ball_dense
except ImportError:
pass
else:
return matrix_complex_ball_dense.Matrix_complex_ball_dense
try:
from sage.rings.polynomial import polynomial_ring, multi_polynomial_ring_base
except ImportError:
pass
else:
if polynomial_ring.is_PolynomialRing(R) and R.base_ring() in _Fields:
try:
from . import matrix_polynomial_dense
except ImportError:
pass
else:
return matrix_polynomial_dense.Matrix_polynomial_dense
elif multi_polynomial_ring_base.is_MPolynomialRing(R) and R.base_ring() in _Fields:
try:
from . import matrix_mpolynomial_dense
except ImportError:
pass
else:
return matrix_mpolynomial_dense.Matrix_mpolynomial_dense
# The fallback
return matrix_generic_dense.Matrix_generic_dense
# Deal with request for a specific implementation
if implementation == 'flint':
if R is sage.rings.integer_ring.ZZ:
from . import matrix_integer_dense
return matrix_integer_dense.Matrix_integer_dense
if R is sage.rings.rational_field.QQ:
from . import matrix_rational_dense
return matrix_rational_dense.Matrix_rational_dense
raise ValueError("'flint' matrices are only available over the integers or the rationals")
if implementation == 'm4ri':
if R.is_field() and R.characteristic() == 2 and R.order() <= 65536:
if R.order() == 2:
from . import matrix_mod2_dense
return matrix_mod2_dense.Matrix_mod2_dense
from . import matrix_gf2e_dense
return matrix_gf2e_dense.Matrix_gf2e_dense
raise ValueError("'m4ri' matrices are only available for fields of characteristic 2 and order <= 65536")
if implementation == 'meataxe':
if R.is_field() and R.order() < 256:
return Matrix_gfpn_dense
raise ValueError("'meataxe' matrix can only deal with finite fields of order < 256")
if implementation == 'numpy':
if R is sage.rings.real_double.RDF:
from . import matrix_real_double_dense
return matrix_real_double_dense.Matrix_real_double_dense
if R is sage.rings.complex_double.CDF:
from . import matrix_complex_double_dense
return matrix_complex_double_dense.Matrix_complex_double_dense
raise ValueError("'numpy' matrices are only available over RDF and CDF")
if implementation == 'rational':
if isinstance(R, sage.rings.abc.NumberField_cyclotomic):
from . import matrix_cyclo_dense
return matrix_cyclo_dense.Matrix_cyclo_dense
raise ValueError("'rational' matrices are only available over a cyclotomic field")
if implementation == 'linbox-float':
from . import matrix_modn_dense_float
if R.order() < matrix_modn_dense_float.MAX_MODULUS:
return matrix_modn_dense_float.Matrix_modn_dense_float
raise ValueError("'linbox-float' matrices can only deal with order < %s" % matrix_modn_dense_float.MAX_MODULUS)
if implementation == 'linbox-double':
from . import matrix_modn_dense_double
if R.order() < matrix_modn_dense_double.MAX_MODULUS:
return matrix_modn_dense_double.Matrix_modn_dense_double
raise ValueError("'linbox-double' matrices can only deal with order < %s" % matrix_modn_dense_double.MAX_MODULUS)
if implementation == 'generic':
return matrix_generic_dense.Matrix_generic_dense
if implementation == 'gap':
from .matrix_gap import Matrix_gap
return Matrix_gap
raise ValueError("unknown matrix implementation %r over %r" % (implementation, R))
# By now, we are dealing with sparse matrices
if implementation is not None:
raise ValueError("cannot choose an implementation for sparse matrices")
if isinstance(R, sage.rings.abc.IntegerModRing):
try:
from . import matrix_modn_sparse
except ImportError:
pass
else:
if R.order() < matrix_modn_sparse.MAX_MODULUS:
return matrix_modn_sparse.Matrix_modn_sparse
if sage.rings.rational_field.is_RationalField(R):
try:
from . import matrix_rational_sparse
except ImportError:
pass
else:
return matrix_rational_sparse.Matrix_rational_sparse
if sage.rings.integer_ring.is_IntegerRing(R):
try:
from . import matrix_integer_sparse
except ImportError:
pass
else:
return matrix_integer_sparse.Matrix_integer_sparse
# the fallback
return matrix_generic_sparse.Matrix_generic_sparse
class MatrixSpace(UniqueRepresentation, Parent):
"""
The space of matrices of given size and base ring
EXAMPLES:
Some examples of square 2 by 2 rational matrices::
sage: MS = MatrixSpace(QQ, 2)
sage: MS.dimension()
4
sage: MS.dims()
(2, 2)
sage: B = MS.basis()
sage: list(B)
[
[1 0] [0 1] [0 0] [0 0]
[0 0], [0 0], [1 0], [0 1]
]
sage: B[0,0]
[1 0]
[0 0]
sage: B[0,1]
[0 1]
[0 0]
sage: B[1,0]
[0 0]
[1 0]
sage: B[1,1]
[0 0]
[0 1]
sage: A = MS.matrix([1,2,3,4])
sage: A
[1 2]
[3 4]
The above matrix ``A`` can be multiplied by a 2 by 3 integer matrix::
sage: MS2 = MatrixSpace(ZZ, 2, 3)
sage: B = MS2.matrix([1,2,3,4,5,6])
sage: A * B
[ 9 12 15]
[19 26 33]
Check categories::
sage: MatrixSpace(ZZ,10,5)
Full MatrixSpace of 10 by 5 dense matrices over Integer Ring
sage: MatrixSpace(ZZ,10,5).category()
Category of infinite enumerated finite dimensional modules with basis over
(euclidean domains and infinite enumerated sets and metric spaces)
sage: MatrixSpace(ZZ,10,10).category()
Category of infinite enumerated finite dimensional algebras with basis over
(euclidean domains and infinite enumerated sets and metric spaces)
sage: MatrixSpace(QQ,10).category()
Category of infinite finite dimensional algebras with basis over
(number fields and quotient fields and metric spaces)
TESTS::
sage: MatrixSpace(ZZ, 1, 2^63)
Traceback (most recent call last):
...
OverflowError: number of rows and columns may be at most...
sage: MatrixSpace(ZZ, 2^100, 10)
Traceback (most recent call last):
...
OverflowError: number of rows and columns may be at most...
Check that different implementations play together as expected::
sage: M1 = MatrixSpace(ZZ, 2, implementation='flint')
sage: M2 = MatrixSpace(ZZ, 2, implementation='generic')
sage: type(M1(range(4)))
<class 'sage.matrix.matrix_integer_dense.Matrix_integer_dense'>
sage: type(M2(range(4)))
<class 'sage.matrix.matrix_generic_dense.Matrix_generic_dense'>
sage: M1(M2.an_element())
[ 0 1]
[-1 2]
sage: M2(M1.an_element())
[ 0 1]
[-1 2]
sage: all(((A.get_action(B) is not None) == (A is B)) for A in [M1,M2] for B in [M1,M2])
True
Check that libgap matrices over finite fields are working properly::
sage: M2 = MatrixSpace(GF(2), 5, implementation='gap')
sage: M2.one()
[1 0 0 0 0]
[0 1 0 0 0]
[0 0 1 0 0]
[0 0 0 1 0]
[0 0 0 0 1]
sage: m = M2.random_element()
sage: M1 = MatrixSpace(GF(2), 5)
sage: M1(m * m) == M1(m) * M1(m)
True
"""
@staticmethod
def __classcall__(cls, base_ring, nrows, ncols=None, sparse=False, implementation=None, **kwds):
"""
Normalize the arguments to call the ``__init__`` constructor.
See the documentation in ``__init__``.
TESTS::
sage: M1 = MatrixSpace(QQ, 2)
sage: M2 = MatrixSpace(QQ, 2)
sage: M3 = MatrixSpace(QQ, 2, implementation='flint')
sage: M1 is M2 and M1 is M3
True
::
sage: M = MatrixSpace(ZZ, 10, implementation="flint")
sage: M
Full MatrixSpace of 10 by 10 dense matrices over Integer Ring
sage: loads(M.dumps()) is M
True
sage: MatrixSpace(ZZ, 10, implementation="foobar")
Traceback (most recent call last):
...
ValueError: unknown matrix implementation 'foobar' over Integer Ring
Check that :trac:`29466`is fixed::
sage: class MyMatrixSpace(MatrixSpace):
....: @staticmethod
....: def __classcall__(cls, base_ring, nrows, ncols=None, my_option=True, sparse=False, implementation=None):
....: return super(MyMatrixSpace, cls).__classcall__(cls, base_ring, nrows, ncols=ncols, my_option=my_option, sparse=sparse, implementation=implementation)
....:
....: def __init__(self, base_ring, nrows, ncols, sparse, implementation, my_option=True):
....: super(MyMatrixSpace, self).__init__(base_ring, nrows, ncols, sparse, implementation)
....: self._my_option = my_option
sage: MS1 = MyMatrixSpace(ZZ, 2)
sage: MS1._my_option
True
sage: MS2 = MyMatrixSpace(ZZ, 2, my_option=False)
sage: MS2._my_option
False
"""
if base_ring not in _Rings:
raise TypeError("base_ring (=%s) must be a ring"%base_ring)
nrows = int(nrows)
if ncols is None:
ncols = nrows
else:
ncols = int(ncols)
sparse = bool(sparse)
if nrows < 0:
raise ArithmeticError("nrows must be nonnegative")
if ncols < 0:
raise ArithmeticError("ncols must be nonnegative")
if nrows > sys.maxsize or ncols > sys.maxsize:
raise OverflowError("number of rows and columns may be at most %s" % sys.maxsize)
matrix_cls = get_matrix_class(base_ring, nrows, ncols, sparse, implementation)
return super(MatrixSpace, cls).__classcall__(
cls, base_ring, nrows, ncols, sparse, matrix_cls, **kwds)
def __init__(self, base_ring, nrows, ncols, sparse, implementation):
r"""
INPUT:
- ``base_ring`
- ``nrows`` - (positive integer) the number of rows
- ``ncols`` - (positive integer, default nrows) the number of
columns
- ``sparse`` - (boolean, default false) whether or not matrices
are given a sparse representation
- ``implementation`` -- (optional, a string or a matrix class) a possible
implementation. Depending on the base ring the string can be
- ``'generic'`` - on any base rings
- ``'flint'`` - for integers and rationals
- ``'meataxe'`` - finite fields, needs to install the optional package meataxe
- ``m4ri`` - for characteristic 2 using M4RI library
- ``linbox-float`` - for integer mod rings up to `2^8 = 256`
- ``linbox-double`` - for integer mod rings up to `2^23 = 8388608`
- ``numpy`` - for real and complex floating point numbers
EXAMPLES::
sage: MatrixSpace(QQ, 2)
Full MatrixSpace of 2 by 2 dense matrices over Rational Field
sage: MatrixSpace(ZZ, 3, 2)
Full MatrixSpace of 3 by 2 dense matrices over Integer Ring
sage: MatrixSpace(ZZ, 3, sparse=False)
Full MatrixSpace of 3 by 3 dense matrices over Integer Ring
sage: MatrixSpace(ZZ,10,5)
Full MatrixSpace of 10 by 5 dense matrices over Integer Ring
sage: MatrixSpace(ZZ,10,5).category()
Category of infinite enumerated finite dimensional modules with basis over
(euclidean domains and infinite enumerated sets and metric spaces)
sage: MatrixSpace(ZZ,10,10).category()
Category of infinite enumerated finite dimensional algebras with basis over
(euclidean domains and infinite enumerated sets and metric spaces)
sage: MatrixSpace(QQ,10).category()
Category of infinite finite dimensional algebras with basis over (number fields and quotient fields and metric spaces)
TESTS:
We test that in the real or complex double dense case,
conversion from the base ring is done by a call morphism.
Note that by :trac:`9138`, other algebras usually
get a conversion map by multiplication with the one element.
::
sage: MS = MatrixSpace(RDF, 2, 2)
sage: MS.convert_map_from(RDF)
Coercion map:
From: Real Double Field
To: Full MatrixSpace of 2 by 2 dense matrices over Real Double Field
sage: MS = MatrixSpace(CDF, 2, 2)
sage: MS.convert_map_from(CDF)
Coercion map:
From: Complex Double Field
To: Full MatrixSpace of 2 by 2 dense matrices over Complex Double Field
We check that :trac:`10095` is fixed::
sage: M = Matrix(QQ, [[1 for dummy in range(125)]])
sage: V = M.right_kernel()
sage: V
Vector space of degree 125 and dimension 124 over Rational Field
Basis matrix:
124 x 125 dense matrix over Rational Field
sage: MatrixSpace(ZZ,20,20)(1) \ MatrixSpace(ZZ,20,1).random_element()
20 x 1 dense matrix over Rational Field (use the '.str()' method to see the entries)
sage: MatrixSpace(ZZ,200,200)(1) \ MatrixSpace(ZZ,200,1).random_element()
200 x 1 dense matrix over Rational Field (use the '.str()' method to see the entries)
sage: A = MatrixSpace(RDF,1000,1000).random_element()
sage: B = MatrixSpace(RDF,1000,1000).random_element()
sage: C = A * B
We check that :trac:`18186` is fixed::
sage: MatrixSpace(ZZ,0,3) in FiniteSets()
True
sage: MatrixSpace(Zmod(4),2) in FiniteSets()
True
sage: MatrixSpace(ZZ,2) in Sets().Infinite()
True
"""
# Checks of input data are supposed to be done in __classcall__
assert isinstance(implementation, type)
self.Element = implementation
self.__nrows = nrows
self.__ncols = ncols
self.__is_sparse = sparse
from sage.categories.all import Modules, Algebras
if nrows == ncols:
category = Algebras(base_ring.category())
else:
category = Modules(base_ring.category())
category = category.WithBasis().FiniteDimensional()
if not self.__nrows or not self.__ncols:
is_finite = True
else:
try:
is_finite = base_ring.is_finite()
except (AttributeError, NotImplementedError):
is_finite = None
if is_finite is True:
category = category.Finite()
elif is_finite is False:
category = category.Infinite()
if base_ring in EnumeratedSets():
category = category.Enumerated()
Parent.__init__(self, base_ring, category=category)
def cardinality(self):
r"""
Return the number of elements in ``self``.
EXAMPLES::
sage: MatrixSpace(GF(3), 2, 3).cardinality()
729
sage: MatrixSpace(ZZ, 2).cardinality()
+Infinity
sage: MatrixSpace(ZZ, 0, 3).cardinality()
1
"""
if not self.__nrows or not self.__ncols:
from sage.rings.integer_ring import ZZ
return ZZ.one()
else:
return self.base_ring().cardinality() ** (self.__nrows * self.__ncols)
def characteristic(self):
r"""
Return the characteristic.
EXAMPLES::
sage: MatrixSpace(ZZ, 2).characteristic()
0
sage: MatrixSpace(GF(9), 0).characteristic()
3
"""
return self.base_ring().characteristic()
def _has_default_implementation(self):
r"""
EXAMPLES::
sage: MatrixSpace(ZZ, 2, implementation='generic')._has_default_implementation()
False
sage: MatrixSpace(ZZ, 2, implementation='flint')._has_default_implementation()
True
"""
default = get_matrix_class(self.base_ring(), self.nrows(), self.ncols(), self.is_sparse(), None)
return self.Element is default
@lazy_attribute
def transposed(self):
"""
The transposed matrix space, having the same base ring and sparseness,
but number of columns and rows is swapped.
EXAMPLES::
sage: MS = MatrixSpace(GF(3), 7, 10)
sage: MS.transposed
Full MatrixSpace of 10 by 7 dense matrices over Finite Field of size 3
sage: MS = MatrixSpace(GF(3), 7, 7)
sage: MS.transposed is MS
True
sage: M = MatrixSpace(ZZ, 2, 3)
sage: M.transposed
Full MatrixSpace of 3 by 2 dense matrices over Integer Ring
"""
return MatrixSpace(self._base, self.__ncols, self.__nrows,
self.__is_sparse, self.Element)
@lazy_attribute
def _copy_zero(self):
"""
Is it faster to copy a zero matrix or is it faster to create a
new matrix from scratch?
EXAMPLES::
sage: MS = MatrixSpace(GF(2),20,20)
sage: MS._copy_zero
False
sage: MS = MatrixSpace(GF(3),20,20)
sage: MS._copy_zero
True
sage: MS = MatrixSpace(GF(3),200,200)
sage: MS._copy_zero
False
sage: MS = MatrixSpace(ZZ,200,200)
sage: MS._copy_zero
False
sage: MS = MatrixSpace(ZZ,30,30)
sage: MS._copy_zero
True
sage: MS = MatrixSpace(QQ,200,200)
sage: MS._copy_zero
False
sage: MS = MatrixSpace(QQ,20,20)
sage: MS._copy_zero
False
"""
if self.__is_sparse:
return False
elif self.Element is sage.matrix.matrix_mod2_dense.Matrix_mod2_dense:
return False
elif self.Element is sage.matrix.matrix_rational_dense.Matrix_rational_dense:
return False
elif self.__nrows > 40 and self.__ncols > 40:
return False
else:
return True
def _element_constructor_(self, entries, **kwds):
"""
Construct an element of ``self`` from ``entries``.
EXAMPLES::
sage: k = GF(7); G = MatrixGroup([matrix(k,2,[1,1,0,1]), matrix(k,2,[1,0,0,2])])
sage: g = G.0
sage: MatrixSpace(k,2)(g)
[1 1]
[0 1]
::
sage: MS = MatrixSpace(ZZ,2,4)
sage: M2 = MS(range(8)); M2
[0 1 2 3]
[4 5 6 7]
sage: M2 == MS(M2.rows())
True
::
sage: MS = MatrixSpace(ZZ,2,4, sparse=True)
sage: M2 = MS(range(8)); M2
[0 1 2 3]
[4 5 6 7]
sage: M2 == MS(M2.rows())
True
::
sage: MS = MatrixSpace(ZZ,2,2, sparse=True)
sage: MS([1,2,3,4])
[1 2]
[3 4]
sage: MS = MatrixSpace(ZZ, 2)
sage: g = Gamma0(5)([1,1,0,1])
sage: MS(g)
[1 1]
[0 1]
::
sage: MS = MatrixSpace(ZZ,2,2, sparse=True)
sage: mat = MS(); mat
[0 0]
[0 0]
sage: mat.is_mutable()
True
sage: mat2 = mat.change_ring(QQ); mat2.is_mutable()
True
TESTS:
Ensure that :trac:`12020` is fixed::
sage: x = polygen(QQ)
sage: for R in [ZZ, QQ, RealField(100), ComplexField(100), RDF, CDF,
....: SR, GF(2), GF(11), GF(2^8,'a'), GF(3^19,'a'),
....: NumberField(x^3+2,'a'), CyclotomicField(4),
....: PolynomialRing(QQ,'x'), PolynomialRing(CC,2,'x')]:
....: A = MatrixSpace(R,60,30,sparse=False)(0)
....: B = A.augment(A)
....: A = MatrixSpace(R,60,30,sparse=True)(0)
....: B = A.augment(A)
Check that :trac:`13012` is fixed::
sage: m = zero_matrix(2, 3)
sage: m
[0 0 0]
[0 0 0]
sage: M = MatrixSpace(ZZ, 3, 5)
sage: M.zero()
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]
sage: M(m)
Traceback (most recent call last):
...
ValueError: inconsistent number of rows: should be 3 but got 2
sage: M.matrix(m)
Traceback (most recent call last):
...
ValueError: inconsistent number of rows: should be 3 but got 2
Check that :trac:`15110` is fixed::
sage: S.<t> = LaurentSeriesRing(ZZ)
sage: MS = MatrixSpace(S,1,1)
sage: MS([[t]]) # given as a list of lists
[t]
sage: MS([t]) # given as a list of coefficients
[t]
sage: MS(t) # given as a scalar matrix
[t]
Calling a matrix space `M` with a matrix in `M` as argument
returns the original matrix unless ``copy=True`` is specified
(:trac:`31078`)::
sage: m = Matrix([[0, 1], [2, 3]])
sage: M = m.parent()
sage: M(m) is m
True
sage: M(m, copy=True) is m
False
"""
return self.element_class(self, entries, **kwds)
def change_ring(self, R):
"""
Return matrix space over R with otherwise same parameters as ``self``.
INPUT:
- ``R`` - ring
OUTPUT: a matrix space
EXAMPLES::
sage: Mat(QQ,3,5).change_ring(GF(7))
Full MatrixSpace of 3 by 5 dense matrices over Finite Field of size 7
"""
try:
return self.__change_ring[R]
except AttributeError:
self.__change_ring = {}
except KeyError:
pass
M = MatrixSpace(R, self.__nrows, self.__ncols, self.__is_sparse)
self.__change_ring[R] = M
return M
def base_extend(self, R):
"""
Return base extension of this matrix space to R.
INPUT:
- ``R`` - ring
OUTPUT: a matrix space
EXAMPLES::
sage: Mat(ZZ,3,5).base_extend(QQ)
Full MatrixSpace of 3 by 5 dense matrices over Rational Field
sage: Mat(QQ,3,5).base_extend(GF(7))
Traceback (most recent call last):
...
TypeError: no base extension defined
"""
if R.has_coerce_map_from(self.base_ring()):
return self.change_ring(R)
raise TypeError("no base extension defined")
def construction(self):
"""
EXAMPLES::
sage: A = matrix(ZZ, 2, [1..4], sparse=True)
sage: A.parent().construction()
(MatrixFunctor, Integer Ring)
sage: A.parent().construction()[0](QQ['x'])
Full MatrixSpace of 2 by 2 sparse matrices over Univariate Polynomial Ring in x over Rational Field
sage: parent(A/2)
Full MatrixSpace of 2 by 2 sparse matrices over Rational Field
"""
from sage.categories.pushout import MatrixFunctor
return MatrixFunctor(self.__nrows, self.__ncols, is_sparse=self.is_sparse()), self.base_ring()
def _get_action_(self, S, op, self_on_left):
r"""
Return the action of S on ``self``.
INPUT:
- ``S`` -- a parent
- ``op`` -- an operator
- ``self_on_left`` -- whether the operation is on left or on right
EXAMPLES::
sage: V = QQ^(2,3)
sage: W1 = QQ^(3,4); W2 = QQ^(2,2)
sage: V.get_action(W1, operator.mul)
Left action by Full MatrixSpace of 2 by 3 dense matrices over Rational Field on Full MatrixSpace of 3 by 4 dense matrices over Rational Field
sage: V.get_action(W2, operator.mul)
sage: V.get_action(W1, operator.mul, self_on_left=False)
sage: V.get_action(W2, operator.mul, self_on_left=False)
Left action by Full MatrixSpace of 2 by 2 dense matrices over Rational Field on Full MatrixSpace of 2 by 3 dense matrices over Rational Field
::
sage: V2 = QQ^2; V3 = QQ^3
sage: V.get_action(V3, operator.mul)
Left action by Full MatrixSpace of 2 by 3 dense matrices over Rational Field on Vector space of dimension 3 over Rational Field
sage: V.get_action(V2, operator.mul)
sage: V.get_action(V3, operator.mul, self_on_left=False)
sage: V.get_action(V2, operator.mul, self_on_left=False)
Right action by Full MatrixSpace of 2 by 3 dense matrices over Rational Field on Vector space of dimension 2 over Rational Field
::
sage: V.get_action(ZZ, operator.mul)
Right scalar multiplication by Integer Ring on Full MatrixSpace of 2 by 3 dense matrices over Rational Field
sage: V.get_action(ZZ, operator.mul, self_on_left=False)
Left scalar multiplication by Integer Ring on Full MatrixSpace of 2 by 3 dense matrices over Rational Field
"""
try:
try:
from sage.schemes.generic.homset import SchemeHomset_generic
from sage.schemes.generic.homset import SchemeHomset_points
except ImportError:
SchemeHomset_generic = SchemeHomset_points = None
if op is operator.mul:
from . import action as matrix_action
if self_on_left:
if is_MatrixSpace(S):
# matrix multiplications
return matrix_action.MatrixMatrixAction(self, S)
elif sage.modules.free_module.is_FreeModule(S):
return matrix_action.MatrixVectorAction(self, S)
elif isinstance(S, SchemeHomset_points):
return matrix_action.MatrixSchemePointAction(self, S)
elif isinstance(S, SchemeHomset_generic):
return matrix_action.MatrixPolymapAction(self, S)
else:
# action of base ring
return sage.structure.coerce.RightModuleAction(S, self)
else:
if is_MatrixSpace(S):
# matrix multiplications
return matrix_action.MatrixMatrixAction(S, self)
elif sage.modules.free_module.is_FreeModule(S):
return matrix_action.VectorMatrixAction(self, S)
elif isinstance(S, SchemeHomset_generic):
return matrix_action.PolymapMatrixAction(self, S)
else:
# action of base ring
return sage.structure.coerce.LeftModuleAction(S, self)
except TypeError:
return None
def _coerce_map_from_base_ring(self):
"""
Return a coercion map from the base ring of ``self``.
.. NOTE::
This is only called for algebras of square matrices.
EXAMPLES::
sage: MS1 = MatrixSpace(QQ, 3)
sage: MS1.coerce_map_from(QQ)
Coercion map:
From: Rational Field
To: Full MatrixSpace of 3 by 3 dense matrices over Rational Field
sage: MS1.coerce_map_from(ZZ)
Composite map:
From: Integer Ring
To: Full MatrixSpace of 3 by 3 dense matrices over Rational Field
Defn: Natural morphism:
From: Integer Ring
To: Rational Field
then
Coercion map:
From: Rational Field
To: Full MatrixSpace of 3 by 3 dense matrices over Rational Field
sage: MS2 = MatrixSpace(ZZ, 3)
sage: MS2.coerce_map_from(QQ)
sage: MS2.coerce_map_from(ZZ)
Coercion map:
From: Integer Ring
To: Full MatrixSpace of 3 by 3 dense matrices over Integer Ring
sage: MatrixSpace(QQ, 1, 3).coerce_map_from(QQ)
"""
return self._generic_coerce_map(self.base_ring())
def _coerce_map_from_(self, S):
"""
Canonical coercion from ``S`` to this matrix space.
EXAMPLES::
sage: MS1 = MatrixSpace(QQ, 3)
sage: MS2 = MatrixSpace(ZZ, 3)
sage: MS1.coerce_map_from(MS2)
Coercion map:
From: Full MatrixSpace of 3 by 3 dense matrices over Integer Ring
To: Full MatrixSpace of 3 by 3 dense matrices over Rational Field
sage: MS2.coerce_map_from(MS1)
There are also coercions possible from matrix group and
arithmetic subgroups::
sage: MS = MatrixSpace(GF(3), 2, 2)
sage: MS.coerce_map_from(GL(2, 3))
Coercion map:
From: General Linear Group of degree 2 over Finite Field of size 3
To: Full MatrixSpace of 2 by 2 dense matrices over Finite Field of size 3
sage: MS.coerce_map_from(GL(2, 2))
sage: MS.coerce_map_from(Gamma1(5))
Coercion map:
From: Congruence Subgroup Gamma1(5)
To: Full MatrixSpace of 2 by 2 dense matrices over Finite Field of size 3
TESTS:
Check that :trac:`22091` is fixed::
sage: A = Zmod(4)
sage: R = MatrixSpace(A, 2)
sage: G = GL(2, A)
sage: R.coerce_map_from(G)
Coercion map:
From: General Linear Group of degree 2 over Ring of integers modulo 4
To: Full MatrixSpace of 2 by 2 dense matrices over Ring of integers modulo 4
sage: R.coerce_map_from(GL(2, ZZ))
Coercion map:
From: General Linear Group of degree 2 over Integer Ring
To: Full MatrixSpace of 2 by 2 dense matrices over Ring of integers modulo 4
sage: m = R([[1, 0], [0, 1]])
sage: m in G
True
sage: m in list(G)
True
sage: m == G(m)
True
sage: G = SL(3, QQ)
sage: M = MatrixSpace(QQ, 3)
sage: G.one() == M.identity_matrix()
True
sage: G.one() + M.identity_matrix()
[2 0 0]
[0 2 0]
[0 0 2]
Verify which coercion maps are allowed (this should form a
poset)::
sage: M1 = MatrixSpace(ZZ, 3, implementation='flint')
sage: M2 = MatrixSpace(ZZ, 3, implementation='generic')
sage: M3 = MatrixSpace(ZZ, 3, implementation='gap')
sage: M4 = MatrixSpace(ZZ, 3, sparse=True)
sage: S = [M1, M2, M3, M4]
sage: mult = ''
sage: for A in S:
....: for B in S:
....: if A.has_coerce_map_from(B):
....: mult += 'X'
....: else:
....: mult += ' '
....: mult += '\n'
sage: print(mult)
XXXX
X X
XX
X
Thanks to the coercion model, arithmetic is allowed between all
these parents::
sage: for A in S:
....: for B in S:
....: a = A.an_element()
....: b = B.an_element()
....: dummy = (a * b) + (a - b)
"""
B = self.base()
if isinstance(S, MatrixSpace):
# Disallow coercion if dimensions do not match
if self.nrows() != S.nrows() or self.ncols() != S.ncols():
return False
T = S.base()
if B is not T:
# Matrix spaces over different base rings.
# TODO: make this an actual map induced by the map
# on the bases, see Trac #25540
return B.has_coerce_map_from(T)
# Base ring and dimensions are the same. So the only
# difference can be the implementation and sparseness.
if self.is_sparse() != S.is_sparse():
# Allow coercion sparse -> dense
return S.is_sparse()
# Allow coercion to the default implementation.
# As a consequence, the default implementation is considered
# the "common parent" when mixing implementations.
return self._has_default_implementation()
# Check for other parents whose elements are some kind of matrices
try:
meth_matrix_space = S.matrix_space
except AttributeError:
pass
else:
MS = meth_matrix_space()
from sage.groups.matrix_gps.matrix_group import is_MatrixGroup
from sage.modular.arithgroup.arithgroup_generic import is_ArithmeticSubgroup
if is_MatrixGroup(S) or is_ArithmeticSubgroup(S):
return self.has_coerce_map_from(MS)
else:
return False
# The parent is not matrix-like: coerce via base ring
return (self.nrows() == self.ncols()) and self._coerce_map_via([B], S)
def _repr_(self):
"""
Return the string representation of a MatrixSpace.
EXAMPLES::
sage: MS = MatrixSpace(ZZ,2,4,true)
sage: repr(MS)
'Full MatrixSpace of 2 by 4 sparse matrices over Integer Ring'
sage: MS
Full MatrixSpace of 2 by 4 sparse matrices over Integer Ring
sage: MatrixSpace(ZZ, 2, implementation='flint')
Full MatrixSpace of 2 by 2 dense matrices over Integer Ring
sage: MatrixSpace(ZZ, 2, implementation='generic')
Full MatrixSpace of 2 by 2 dense matrices over Integer Ring (using Matrix_generic_dense)
"""
if self.is_sparse():
s = "sparse"
else:
s = "dense"
s = "Full MatrixSpace of %s by %s %s matrices over %s"%(
self.__nrows, self.__ncols, s, self.base_ring())
if not self._has_default_implementation():
s += " (using {})".format(self.Element.__name__)
return s
def _repr_option(self, key):
"""
Metadata about the :meth:`_repr_` output.
See :meth:`sage.structure.parent._repr_option` for details.
EXAMPLES::
sage: MS = MatrixSpace(ZZ,2,4,true)
sage: MS._repr_option('element_ascii_art')
True
"""
if key == 'element_ascii_art':
return self.__nrows > 1
return super(MatrixSpace, self)._repr_option(key)
def _latex_(self):
r"""
Return the latex representation of a MatrixSpace.
EXAMPLES::
sage: MS3 = MatrixSpace(QQ,6,6,true)
sage: latex(MS3)
\mathrm{Mat}_{6\times 6}(\Bold{Q})
"""
return "\\mathrm{Mat}_{%s\\times %s}(%s)"%(self.nrows(), self.ncols(),
latex.latex(self.base_ring()))
def __len__(self):
"""
Return number of elements of this matrix space if it fits in
an int; raise a TypeError if there are infinitely many
elements, and raise an OverflowError if there are finitely
many but more than the size of an int.
EXAMPLES::
sage: len(MatrixSpace(GF(3),3,2))
729
sage: len(MatrixSpace(GF(3),2,3))
729
sage: 3^(2*3)
729
sage: len(MatrixSpace(GF(2003),3,2))
Traceback (most recent call last):
...
OverflowError: cannot fit 'int' into an index-sized integer
sage: len(MatrixSpace(QQ,3,2))
Traceback (most recent call last):
...
TypeError: len() of unsized object
"""
return len(self.base_ring())**(self.nrows() * self.ncols())
def __iter__(self):
r"""
Return a generator object which iterates through the elements of
self. The order in which the elements are generated is based on a
'weight' of a matrix which is the number of iterations on the base
ring that are required to reach that matrix.
The ordering is similar to a degree negative lexicographic order in
monomials in a multivariate polynomial ring.
EXAMPLES: Consider the case of 2 x 2 matrices over GF(5).
::
sage: list( GF(5) )
[0, 1, 2, 3, 4]
sage: MS = MatrixSpace(GF(5), 2, 2)
sage: l = list(MS)
Then, consider the following matrices::
sage: A = MS([2,1,0,1]); A
[2 1]
[0 1]
sage: B = MS([1,2,1,0]); B
[1 2]
[1 0]
sage: C = MS([1,2,0,0]); C
[1 2]
[0 0]
A appears before B since the weight of one of A's entries exceeds
the weight of the corresponding entry in B earliest in the list.
::
sage: l.index(A)
41
sage: l.index(B)
46
However, A would come after the matrix C since C has a lower weight
than A.
::
sage: l.index(A)
41
sage: l.index(C)
19
The weights of matrices over other base rings are not as obvious.
For example, the weight of
::
sage: MS = MatrixSpace(ZZ, 2, 2)
sage: MS([-1,0,0,0])
[-1 0]
[ 0 0]
is 2 since
::
sage: i = iter(ZZ)
sage: next(i)
0
sage: next(i)
1
sage: next(i)
-1
Some more examples::
sage: MS = MatrixSpace(GF(2),2)
sage: a = list(MS)
sage: len(a)
16
sage: for m in a:
....: print(m)
....: print('-')
[0 0]
[0 0]
-
[1 0]
[0 0]
-
[0 1]
[0 0]
-
[0 0]
[1 0]
-
[0 0]
[0 1]
-
[1 1]
[0 0]
-
[1 0]
[1 0]
-
[1 0]
[0 1]
-
[0 1]
[1 0]
-
[0 1]
[0 1]
-
[0 0]
[1 1]
-
[1 1]
[1 0]
-
[1 1]
[0 1]
-
[1 0]
[1 1]
-
[0 1]
[1 1]
-
[1 1]
[1 1]
-
::
sage: MS = MatrixSpace(GF(2),2, 3)
sage: a = list(MS)
sage: len(a)
64
sage: a[0]
[0 0 0]
[0 0 0]
::
sage: MS = MatrixSpace(ZZ, 2, 3)
sage: i = iter(MS)
sage: a = [ next(i) for _ in range(6) ]
sage: a[0]
[0 0 0]
[0 0 0]
sage: a[4]
[0 0 0]
[1 0 0]
For degenerate cases, where either the number of rows or columns
(or both) are zero, then the single element of the space is
returned.
::
sage: list( MatrixSpace(GF(2), 2, 0) )
[[]]
sage: list( MatrixSpace(GF(2), 0, 2) )
[[]]
sage: list( MatrixSpace(GF(2), 0, 0) )
[[]]
If the base ring does not support iteration (for example, with the
reals), then the matrix space over that ring does not support
iteration either.
::
sage: MS = MatrixSpace(RR, 2)
sage: a = list(MS)
Traceback (most recent call last):
...
NotImplementedError: len() of an infinite set
"""
#Make sure that we can iterate over the base ring
base_ring = self.base_ring()
base_iter = iter(base_ring)
number_of_entries = (self.__nrows*self.__ncols)
#If the number of entries is zero, then just
#yield the empty matrix in that case and return
if number_of_entries == 0:
yield self(0)
return
import sage.combinat.integer_vector
if not base_ring.is_finite():
#When the base ring is not finite, then we should go
#through and yield the matrices by "weight", which is
#the total number of iterations that need to be done
#on the base ring to reach the matrix.
base_elements = [ next(base_iter) ]
weight = 0
while True:
for iv in sage.combinat.integer_vector.IntegerVectors(weight, number_of_entries):
yield self([base_elements[i] for i in iv])
weight += 1
base_elements.append( next(base_iter) )
else:
#In the finite case, we do a similar thing except that
#the "weight" of each entry is bounded by the number
#of elements in the base ring
order = base_ring.order()
base_elements = list(base_ring)
for weight in range((order-1)*number_of_entries+1):
for iv in sage.combinat.integer_vector.IntegerVectors(weight, number_of_entries, max_part=(order-1)):
yield self([base_elements[i] for i in iv])
def __getitem__(self, x):
"""
Return a polynomial ring over this ring or the `n`-th element of this ring.
This method implements the syntax ``R['x']`` to define polynomial rings
over matrix rings, while still allowing to get the `n`-th element of a
finite matrix ring with ``R[n]`` for backward compatibility.
(If this behaviour proves desirable for all finite enumerated rings, it
should eventually be implemented in the corresponding category rather
than here.)
.. SEEALSO::
:meth:`sage.categories.rings.Rings.ParentMethod.__getitem__`,
:meth:`sage.structure.parent.Parent.__getitem__`
EXAMPLES::
sage: MS = MatrixSpace(GF(3), 2, 2)
sage: MS['x']
Univariate Polynomial Ring in x over Full MatrixSpace of 2 by 2 dense matrices over Finite Field of size 3
sage: MS[0]
[0 0]
[0 0]
sage: MS[9]
[0 2]
[0 0]
sage: MS = MatrixSpace(QQ, 7)
sage: MS['x']
Univariate Polynomial Ring in x over Full MatrixSpace of 7 by 7 dense matrices over Rational Field
sage: MS[2]
Traceback (most recent call last):
...
AttributeError: 'MatrixSpace_with_category' object has no attribute 'list'
"""
if isinstance(x, (integer.Integer, int)):
return self.list()[x]
return super(MatrixSpace, self).__getitem__(x)
def basis(self):
"""
Return a basis for this matrix space.
.. WARNING::
This will of course compute every generator of this matrix
space. So for large dimensions, this could take a long time,
waste a massive amount of memory (for dense matrices), and
is likely not very useful. Don't use this on large matrix
spaces.
EXAMPLES::
sage: list(Mat(ZZ,2,2).basis())
[
[1 0] [0 1] [0 0] [0 0]
[0 0], [0 0], [1 0], [0 1]
]
"""
v = {(r, c): self.zero_matrix().__copy__()
for r in range(self.__nrows)
for c in range(self.__ncols)}
one = self.base_ring().one()
keys = []
for r in range(self.__nrows):
for c in range(self.__ncols):
keys.append((r, c))
v[r, c][r, c] = one
v[r, c].set_immutable()
from sage.sets.family import Family
return Family(keys, v.__getitem__)
def dimension(self):
r"""
Return (m rows) \* (n cols) of ``self`` as ``Integer``.
EXAMPLES::
sage: MS = MatrixSpace(ZZ,4,6)
sage: u = MS.dimension()
sage: u - 24 == 0
True
"""
return self.__nrows * self.__ncols
def dims(self):
"""
Return (m row, n col) representation of ``self`` dimension.
EXAMPLES::
sage: MS = MatrixSpace(ZZ,4,6)
sage: MS.dims()
(4, 6)
"""
return (self.__nrows, self.__ncols)
def submodule(self, gens, check=True, already_echelonized=False,
unitriangular=False, support_order=None, category=None,
*args, **opts):
r"""
The submodule spanned by a finite set of matrices.
INPUT:
- ``gens`` -- a list or family of elements of ``self``
- ``check`` -- (default: ``True``) whether to verify that the
elements of ``gens`` are in ``self``
- ``already_echelonized`` -- (default: ``False``) whether
the elements of ``gens`` are already in (not necessarily
reduced) echelon form
- ``unitriangular`` -- (default: ``False``) whether
the lift morphism is unitriangular
- ``support_order`` -- (optional) either something that can
be converted into a tuple or a key function
If ``already_echelonized`` is ``False``, then the
generators are put in reduced echelon form using
:meth:`echelonize`, and reindexed by `0, 1, \ldots`.
.. WARNING::
At this point, this method only works for finite
dimensional submodules and if matrices can be
echelonized over the base ring.
If in addition ``unitriangular`` is ``True``, then
the generators are made such that the coefficients of
the pivots are 1, so that lifting map is unitriangular.
The basis of the submodule uses the same index set as the
generators, and the lifting map sends `y_i` to `gens[i]`.
.. SEEALSO::
:meth:`ModulesWithBasis.ParentMethods.submodule`
EXAMPLES::
sage: M = MatrixSpace(QQ, 2)
sage: mat = M.matrix([[1, 2], [3, 4]])
sage: X = M.submodule([mat], already_echelonized=True); X
Free module generated by {0} over Rational Field
sage: mat2 = M.matrix([[1, 0], [-3, 2]])
sage: X = M.submodule([mat, mat2])
sage: [X.lift(b) for b in X.basis()]
[
[ 1 0] [0 1]
[-3 2], [3 1]
]
sage: A = matrix([[1, 1], [0, -1]])
sage: B = matrix([[0, 1], [0, 2]])
sage: X = M.submodule([A, B])
sage: Xp = M.submodule([A, B], support_order=[(0,1), (1,1), (0,0)])
sage: [X.lift(b) for b in X.basis()]
[
[ 1 0] [0 1]
[ 0 -3], [0 2]
]
sage: [Xp.lift(b) for b in Xp.basis()]
[
[2/3 1] [-1/3 0]
[ 0 0], [ 0 1]
]
"""
support_order = self._compute_support_order(gens, support_order)
if not already_echelonized:
gens = self.echelon_form(gens, unitriangular, order=support_order)
else:
from copy import copy
# We will be making gens immutable, so copy the mutable matrices
gens = [copy(g) if g.is_mutable() else g for g in gens]
# We need to make sure the result immutable
for g in gens:
g.set_immutable()
from sage.modules.with_basis.subquotient import SubmoduleWithBasis
return SubmoduleWithBasis(gens, ambient=self,
support_order=support_order,
unitriangular=unitriangular,
category=category, *args, **opts)
from sage.misc.cachefunc import cached_method
@cached_method
def identity_matrix(self):
"""
Return the identity matrix in ``self``.
``self`` must be a space of square
matrices. The returned matrix is immutable. Please use ``copy`` if
you want a modified copy.
EXAMPLES::
sage: MS1 = MatrixSpace(ZZ,4)
sage: MS2 = MatrixSpace(QQ,3,4)
sage: I = MS1.identity_matrix()
sage: I
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
sage: Er = MS2.identity_matrix()
Traceback (most recent call last):
...
TypeError: identity matrix must be square
TESTS::
sage: MS1.one()[1,2] = 3
Traceback (most recent call last):
...
ValueError: matrix is immutable; please change a copy instead (i.e., use copy(M) to change a copy of M).
Check different implementations::
sage: M1 = MatrixSpace(ZZ, 2, implementation='flint')
sage: M2 = MatrixSpace(ZZ, 2, implementation='generic')
sage: type(M1.identity_matrix())
<class 'sage.matrix.matrix_integer_dense.Matrix_integer_dense'>
sage: type(M2.identity_matrix())
<class 'sage.matrix.matrix_generic_dense.Matrix_generic_dense'>
"""
if self.__nrows != self.__ncols:
raise TypeError("identity matrix must be square")
A = self.zero_matrix().__copy__()
for i in range(self.__nrows):
A[i, i] = 1
A.set_immutable()
return A
one = identity_matrix
def diagonal_matrix(self, entries):
"""
Create a diagonal matrix in ``self`` using the specified elements
INPUT:
- ``entries`` -- the elements to use as the diagonal entries
``self`` must be a space of square matrices. The length of
``entries`` must be less than or equal to the matrix
dimensions. If the length of ``entries`` is less than the
matrix dimensions, ``entries`` is padded with zeroes at the
end.
EXAMPLES::
sage: MS1 = MatrixSpace(ZZ,4)
sage: MS2 = MatrixSpace(QQ,3,4)
sage: I = MS1.diagonal_matrix([1, 2, 3, 4])
sage: I
[1 0 0 0]
[0 2 0 0]
[0 0 3 0]
[0 0 0 4]
sage: MS2.diagonal_matrix([1, 2])
Traceback (most recent call last):
...
TypeError: diagonal matrix must be square
sage: MS1.diagonal_matrix([1, 2, 3, 4, 5])
Traceback (most recent call last):
...
ValueError: number of diagonal matrix entries (5) exceeds the matrix size (4)
sage: MS1.diagonal_matrix([1/2, 2, 3, 4])
Traceback (most recent call last):
...
TypeError: no conversion of this rational to integer
Check different implementations::
sage: M1 = MatrixSpace(ZZ, 2, implementation='flint')
sage: M2 = MatrixSpace(ZZ, 2, implementation='generic')
sage: type(M1.diagonal_matrix([1, 2]))
<class 'sage.matrix.matrix_integer_dense.Matrix_integer_dense'>
sage: type(M2.diagonal_matrix([1, 2]))
<class 'sage.matrix.matrix_generic_dense.Matrix_generic_dense'>
"""
if self.__nrows != self.__ncols:
raise TypeError("diagonal matrix must be square")
if self.__nrows < len(entries):
raise ValueError('number of diagonal matrix entries (%s) exceeds the matrix size (%s)' % (len(entries), self.__nrows))
A = self.zero_matrix().__copy__()
for i in range(len(entries)):
A[i, i] = entries[i]
return A
def is_dense(self):
"""
Return whether matrices in ``self`` are dense.
EXAMPLES::
sage: Mat(RDF,2,3).is_sparse()
False
sage: Mat(RR,123456,22,sparse=True).is_sparse()
True
"""
return not self.__is_sparse
def is_sparse(self):
"""
Return whether matrices in ``self`` are sparse.
EXAMPLES::
sage: Mat(GF(2011),10000).is_sparse()
False
sage: Mat(GF(2011),10000,sparse=True).is_sparse()
True
"""
return self.__is_sparse
def is_finite(self):
"""
Return whether this matrix space is finite.
EXAMPLES::
sage: MatrixSpace(GF(101), 10000).is_finite()
True
sage: MatrixSpace(QQ, 2).is_finite()
False
"""
return self.base_ring().is_finite()
def gen(self, n):
"""
Return the n-th generator of this matrix space.
This does not compute all basis matrices, so it is reasonably
intelligent.
EXAMPLES::
sage: M = Mat(GF(7),10000,5); M.ngens()
50000
sage: a = M.10
sage: a[:4]
[0 0 0 0 0]
[0 0 0 0 0]
[1 0 0 0 0]
[0 0 0 0 0]
"""
if hasattr(self, '__basis'):
return self.__basis[n]
r = n // self.__ncols
c = n - (r * self.__ncols)
z = self.zero_matrix().__copy__()
z[r,c] = 1
return z
@cached_method
def zero_matrix(self):
"""
Return the zero matrix in ``self``.
``self`` must be a space of square matrices. The returned matrix is
immutable. Please use ``copy`` if you want a modified copy.
EXAMPLES::
sage: z = MatrixSpace(GF(7),2,4).zero_matrix(); z
[0 0 0 0]
[0 0 0 0]
sage: z.is_mutable()
False
TESTS::
sage: MM = MatrixSpace(RDF,1,1,sparse=False); mat = MM.zero_matrix()
sage: copy(mat)
[0.0]
sage: MM = MatrixSpace(RDF,0,0,sparse=False); mat = MM.zero_matrix()
sage: copy(mat)
[]
sage: mat.is_mutable()
False
sage: MM.zero().is_mutable()
False
"""
zero = self.base_ring().zero()
res = self.element_class(self, zero, False, False)
res.set_immutable()
return res
zero = zero_matrix
def ngens(self):
"""
Return the number of generators of this matrix space.
This is the number of entries in the matrices in this space.
EXAMPLES::
sage: M = Mat(GF(7),100,200); M.ngens()
20000
"""
return self.dimension()
def matrix(self, x=None, **kwds):
r"""
Create a matrix in ``self``.
INPUT:
- ``x`` -- data to construct a new matrix from. See :func:`matrix`
- ``coerce`` -- (default: ``True``) if False, assume without
checking that the values in ``x`` lie in the base ring
OUTPUT:
- a matrix in ``self``.
EXAMPLES::
sage: M = MatrixSpace(ZZ, 2)
sage: M.matrix([[1,0],[0,-1]])
[ 1 0]
[ 0 -1]
sage: M.matrix([1,0,0,-1])
[ 1 0]
[ 0 -1]
sage: M.matrix([1,2,3,4])
[1 2]
[3 4]
Note that the last "flip" cannot be performed if ``x`` is a
matrix, no matter what is ``rows`` (it used to be possible but
was fixed by :trac:`10793`)::
sage: projection = matrix(ZZ,[[1,0,0],[0,1,0]])
sage: projection
[1 0 0]
[0 1 0]
sage: projection.parent()
Full MatrixSpace of 2 by 3 dense matrices over Integer Ring
sage: M = MatrixSpace(ZZ, 3 , 2)
sage: M
Full MatrixSpace of 3 by 2 dense matrices over Integer Ring
sage: M(projection)
Traceback (most recent call last):
...
ValueError: inconsistent number of rows: should be 3 but got 2
If you really want to make from a matrix another matrix of different
dimensions, use either transpose method or explicit conversion to a
list::
sage: M(projection.list())
[1 0]
[0 0]
[1 0]
TESTS:
The following corner cases were problematic while working on
:trac:`10628`::
sage: MS = MatrixSpace(ZZ,2,1)
sage: MS([[1],[2]])
[1]
[2]
sage: MS = MatrixSpace(CC,2,1)
sage: F = NumberField(x^2+1, name='x')
sage: MS([F(1),F(0)])
[ 1.00000000000000]
[0.000000000000000]
:trac:`10628` allowed to provide the data as lists of matrices, but
:trac:`13012` prohibited it::
sage: MS = MatrixSpace(ZZ,4,2)
sage: MS0 = MatrixSpace(ZZ,2)
sage: MS.matrix([MS0([1,2,3,4]), MS0([5,6,7,8])])
Traceback (most recent call last):
...
TypeError: unable to coerce <class 'sage.matrix.matrix_integer_dense.Matrix_integer_dense'> to an integer
A mixed list of matrices and vectors is prohibited as well::
sage: MS.matrix( [MS0([1,2,3,4])] + list(MS0([5,6,7,8])) )
Traceback (most recent call last):
...
TypeError: unable to coerce <class 'sage.matrix.matrix_integer_dense.Matrix_integer_dense'> to an integer
Check that :trac:`13302` is fixed::
sage: MatrixSpace(Qp(3),1,1)([Qp(3).zero()])
[0]
sage: MatrixSpace(Qp(3),1,1)([Qp(3)(4/3)])
[3^-1 + 1 + O(3^19)]
One-rowed matrices over combinatorial free modules used to break
the constructor (:trac:`17124`). Check that this is fixed::
sage: Sym = SymmetricFunctions(QQ)
sage: h = Sym.h()
sage: MatrixSpace(h,1,1)([h[1]])
[h[1]]
sage: MatrixSpace(h,2,1)([h[1], h[2]])
[h[1]]
[h[2]]
Converting sparse to dense matrices used to be too slow
(:trac:`20470`). Check that this is fixed::
sage: m = identity_matrix(GF(2), 2000, sparse=True)
sage: MS = MatrixSpace(GF(2), 2000, sparse=False)
sage: md = MS(m) # used to be slow
sage: md.parent() is MS
True
"""
return self(x, **kwds)
def matrix_space(self, nrows=None, ncols=None, sparse=False):
"""
Return the matrix space with given number of rows, columns and
sparsity over the same base ring as self, and defaults the same as
self.
EXAMPLES::
sage: M = Mat(GF(7),100,200)
sage: M.matrix_space(5000)
Full MatrixSpace of 5000 by 200 dense matrices over Finite Field of size 7
sage: M.matrix_space(ncols=5000)
Full MatrixSpace of 100 by 5000 dense matrices over Finite Field of size 7
sage: M.matrix_space(sparse=True)
Full MatrixSpace of 100 by 200 sparse matrices over Finite Field of size 7
"""
if nrows is None:
nrows = self.__nrows
if ncols is None:
ncols = self.__ncols
base = self._base
return MatrixSpace(base, nrows, ncols, sparse=sparse)
def ncols(self):
"""
Return the number of columns of matrices in this space.
EXAMPLES::
sage: M = Mat(ZZ['x'],200000,500000,sparse=True)
sage: M.ncols()
500000
"""
return self.__ncols
def nrows(self):
"""
Return the number of rows of matrices in this space.
EXAMPLES::
sage: M = Mat(ZZ,200000,500000)
sage: M.nrows()
200000
"""
return self.__nrows
def row_space(self):
"""
Return the module spanned by all rows of matrices in this matrix
space. This is a free module of rank the number of rows. It will be
sparse or dense as this matrix space is sparse or dense.
EXAMPLES::
sage: M = Mat(ZZ,20,5,sparse=False); M.row_space()
Ambient free module of rank 5 over the principal ideal domain Integer Ring
"""
try:
return self.__row_space
except AttributeError:
self.__row_space = sage.modules.free_module.FreeModule(self.base_ring(),
self.ncols(), sparse=self.is_sparse())
return self.__row_space
def column_space(self):
"""
Return the module spanned by all columns of matrices in this matrix
space. This is a free module of rank the number of columns. It will
be sparse or dense as this matrix space is sparse or dense.
EXAMPLES::
sage: M = Mat(GF(9,'a'),20,5,sparse=True); M.column_space()
Sparse vector space of dimension 20 over Finite Field in a of size 3^2
"""
try:
return self.__column_space
except AttributeError:
self.__column_space = sage.modules.free_module.FreeModule(self.base_ring(), self.nrows(),
sparse=self.is_sparse())
return self.__column_space
def random_element(self, density=None, *args, **kwds):
"""
Return a random element from this matrix space.
INPUT:
- ``density`` - ``float`` or ``None`` (default: ``None``); rough
measure of the proportion of nonzero entries in the random matrix;
if set to ``None``, all entries of the matrix are randomized,
allowing for any element of the underlying ring, but if set to
a ``float``, a proportion of entries is selected and randomized to
non-zero elements of the ring
- ``*args, **kwds`` - remaining parameters, which may be passed to
the random_element function of the base ring. ("may be", since this
function calls the ``randomize`` function on the zero matrix, which
need not call the ``random_element`` function of the base ring at
all in general.)
OUTPUT:
- Matrix
.. NOTE::
This method will randomize a proportion of roughly ``density`` entries
in a newly allocated zero matrix.
By default, if the user sets the value of ``density`` explicitly, this
method will enforce that these entries are set to non-zero values.
However, if the test for equality with zero in the base ring is too
expensive, the user can override this behaviour by passing the
argument ``nonzero=False`` to this method.
Otherwise, if the user does not set the value of ``density``, the
default value is taken to be 1, and the option ``nonzero=False`` is
passed to the ``randomize`` method.
EXAMPLES::
sage: M = Mat(ZZ, 2, 5).random_element()
sage: TestSuite(M).run()
sage: M = Mat(QQ, 2, 5).random_element(density=0.5)
sage: TestSuite(M).run()
sage: M = Mat(QQ, 3, sparse=True).random_element()
sage: TestSuite(M).run()
sage: M = Mat(GF(9,'a'), 3, sparse=True).random_element()
sage: TestSuite(M).run()
"""
Z = self.zero_matrix().__copy__()
if density is None:
Z.randomize(density=float(1), nonzero=kwds.pop('nonzero', False), \
*args, **kwds)
else:
Z.randomize(density=density, nonzero=kwds.pop('nonzero', True), \
*args, **kwds)
return Z
def _an_element_(self):
"""
Create a typical element of this matrix space.
This uses ``some_elements`` of the base ring.
EXAMPLES::
sage: MatrixSpace(QQ, 3, 3).an_element() # indirect doctest
[ 1/2 -1/2 2]
[ -2 0 1]
[ -1 42 2/3]
TESTS::
sage: MatrixSpace(ZZ, 0, 0).an_element()
[]
Check that this works for large matrices and that it returns a
matrix which is not too trivial::
sage: M = MatrixSpace(GF(2), 100, 100).an_element()
sage: M.rank() >= 2
True
Check that this works for sparse matrices::
sage: M = MatrixSpace(ZZ, 1000, 1000, sparse=True).an_element()
sage: 96 <= M.density() * 10^6 <= 99
True
"""
from .args import MatrixArgs
dim = self.dimension()
if dim > 100 and self.is_sparse():
# Sparse case: add 100 elements
D = {}
nr = self.nrows()
nc = self.ncols()
from random import randrange
n = 0
while True:
for el in self.base().some_elements():
if n == 100:
ma = MatrixArgs(D, space=self)
del D
return ma.matrix()
D[randrange(nr), randrange(nc)] = el
n += 1
assert D
else:
# Dense case
# Keep appending to L until we have enough elements
L = []
while True:
for el in self.base().some_elements():
if len(L) == dim:
ma = MatrixArgs(L, space=self)
del L # for efficiency: this may avoid a copy of L
return ma.matrix()
L.append(el)
assert L
def some_elements(self):
r"""
Return some elements of this matrix space.
See :class:`TestSuite` for a typical use case.
OUTPUT:
An iterator.
EXAMPLES::
sage: M = MatrixSpace(ZZ, 2, 2)
sage: tuple(M.some_elements())
(
[ 0 1] [1 0] [0 1] [0 0] [0 0]
[-1 2], [0 0], [0 0], [1 0], [0 1]
)
sage: M = MatrixSpace(QQ, 2, 3)
sage: tuple(M.some_elements())
(
[ 1/2 -1/2 2] [1 0 0] [0 1 0] [0 0 1] [0 0 0] [0 0 0] [0 0 0]
[ -2 0 1], [0 0 0], [0 0 0], [0 0 0], [1 0 0], [0 1 0], [0 0 1]
)
sage: M = MatrixSpace(SR, 2, 2)
sage: tuple(M.some_elements())
(
[some_variable some_variable] [1 0] [0 1] [0 0] [0 0]
[some_variable some_variable], [0 0], [0 0], [1 0], [0 1]
)
"""
yield self.an_element()
for g in self.gens():
yield g
def _magma_init_(self, magma):
r"""
EXAMPLES: We first coerce a square matrix.
::
sage: magma(MatrixSpace(QQ,3)) # optional - magma
Full Matrix Algebra of degree 3 over Rational Field
::
sage: magma(MatrixSpace(Integers(8),2,3)) # optional - magma
Full RMatrixSpace of 2 by 3 matrices over IntegerRing(8)
"""
K = magma(self.base_ring())
if self.__nrows == self.__ncols:
s = 'MatrixAlgebra(%s,%s)'%(K.name(), self.__nrows)
else:
s = 'RMatrixSpace(%s,%s,%s)'%(K.name(), self.__nrows, self.__ncols)
return s
def _polymake_init_(self):
r"""
Return the polymake representation of the matrix space.
EXAMPLES::
sage: polymake(MatrixSpace(QQ,3)) # optional - polymake
Matrix<Rational>
sage: polymake(MatrixSpace(QuadraticField(5),3)) # optional - polymake
Matrix<QuadraticExtension>
"""
from sage.interfaces.polymake import polymake
K = polymake(self.base_ring())
return '"Matrix<{}>"'.format(K)
def _random_nonzero_element(self, *args, **kwds):
"""
Return a random non-zero matrix.
This function repeatedly calls ``random_element`` until a non-zero
matrix is obtained.
INPUT:
- ``*args``, ``**kwds`` - Parameters that can be forwarded to the
``random_element`` method
OUTPUT:
- Random non-zero matrix
EXAMPLES::
sage: M = MatrixSpace(ZZ, 4)
sage: A = M._random_nonzero_element()
sage: A.is_zero()
False
sage: M = MatrixSpace(ZZ, 0)
sage: A = M._random_nonzero_element()
Traceback (most recent call last):
...
ValueError: Full MatrixSpace of 0 by 0 dense matrices over Integer Ring only has zero elements
"""
if 0 in self.dims():
raise ValueError("{} only has zero elements".format(self))
rand_matrix = self.random_element(*args, **kwds)
while rand_matrix.is_zero():
rand_matrix = self.random_element(*args, **kwds)
return rand_matrix
def dict_to_list(entries, nrows, ncols):
r"""
Given a dictionary of coordinate tuples, return the list given by
reading off the nrows\*ncols matrix in row order.
EXAMPLES::
sage: from sage.matrix.matrix_space import dict_to_list
sage: d = {}
sage: d[(0,0)] = 1
sage: d[(1,1)] = 2
sage: dict_to_list(d, 2, 2)
[1, 0, 0, 2]
sage: dict_to_list(d, 2, 3)
[1, 0, 0, 0, 2, 0]
"""
v = [0] * (nrows * ncols)
for ij, y in entries.items():
i, j = ij
v[i * ncols + j] = y
return v
def test_trivial_matrices_inverse(ring, sparse=True, implementation=None, checkrank=True):
"""
Tests inversion, determinant and is_invertible for trivial matrices.
This function is a helper to check that the inversion of trivial matrices
(of size 0x0, nx0, 0xn or 1x1) is handled consistently by the various
implementation of matrices. The coherency is checked through a bunch of
assertions. If an inconsistency is found, an AssertionError is raised
which should make clear what is the problem.
INPUT:
- ``ring`` - a ring
- ``sparse`` - a boolean
- ``checkrank`` - a boolean
OUTPUT:
- nothing if everything is correct, otherwise raise an AssertionError
The methods determinant, is_invertible, rank and inverse are checked for
- the 0x0 empty identity matrix
- the 0x3 and 3x0 matrices
- the 1x1 null matrix [0]
- the 1x1 identity matrix [1]
If ``checkrank`` is ``False`` then the rank is not checked. This is used
the check matrix over ring where echelon form is not implemented.
.. TODO::
This must be adapted to category check framework when ready
(see :trac:`5274`).
TESTS::
sage: from sage.matrix.matrix_space import test_trivial_matrices_inverse as tinv
sage: tinv(ZZ, sparse=True)
sage: tinv(ZZ, sparse=False, implementation='flint')
sage: tinv(ZZ, sparse=False, implementation='generic')
sage: tinv(QQ, sparse=True)
sage: tinv(QQ, sparse=False, implementation='flint')
sage: tinv(QQ, sparse=False, implementation='generic')
sage: tinv(GF(11), sparse=True)
sage: tinv(GF(11), sparse=False)
sage: tinv(GF(2), sparse=True)
sage: tinv(GF(2), sparse=False)
sage: tinv(SR, sparse=True)
sage: tinv(SR, sparse=False)
sage: tinv(RDF, sparse=True)
sage: tinv(RDF, sparse=False)
sage: tinv(CDF, sparse=True)
sage: tinv(CDF, sparse=False)
sage: tinv(CyclotomicField(7), sparse=True)
sage: tinv(CyclotomicField(7), sparse=False)
sage: tinv(QQ['x,y'], sparse=True)
sage: tinv(QQ['x,y'], sparse=False)
"""
# Check that the empty 0x0 matrix is it's own inverse with det=1.
ms00 = MatrixSpace(ring, 0, 0, sparse=sparse)
m00 = ms00(0)
assert(m00.determinant() == ring(1))
assert(m00.is_invertible())
assert(m00.inverse() == m00)
if checkrank:
assert(m00.rank() == 0)
# Check that the empty 0x3 and 3x0 matrices are not invertible and that
# computing the determinant raise the proper exception.
for ms0 in [MatrixSpace(ring, 0, 3, sparse=sparse),
MatrixSpace(ring, 3, 0, sparse=sparse)]:
mn0 = ms0(0)
assert(not mn0.is_invertible())
try:
d = mn0.determinant()
print(d)
res = False
except ValueError:
res = True
assert(res)
try:
mn0.inverse()
res = False
except ArithmeticError:
res = True
assert(res)
if checkrank:
assert(mn0.rank() == 0)
# Check that the null 1x1 matrix is not invertible and that det=0
ms1 = MatrixSpace(ring, 1, 1, sparse=sparse)
m0 = ms1(0)
assert(not m0.is_invertible())
assert(m0.determinant() == ring(0))
try:
m0.inverse()
res = False
except (ZeroDivisionError, RuntimeError):
#FIXME: Make pynac throw a ZeroDivisionError on division by
#zero instead of a runtime Error
res = True
assert(res)
if checkrank:
assert(m0.rank() == 0)
# Check that the identity 1x1 matrix is its own inverse with det=1
m1 = ms1(1)
assert(m1.is_invertible())
assert(m1.determinant() == ring(1))
inv = m1.inverse()
assert(inv == m1)
if checkrank:
assert(m1.rank() == 1)
# Fix unpickling Matrix_modn_dense and Matrix_integer_2x2
lazy_import('sage.matrix.matrix_modn_dense_double', 'Matrix_modn_dense_double')
lazy_import('sage.matrix.matrix_integer_dense', 'Matrix_integer_dense')
from sage.misc.persist import register_unpickle_override
def _MatrixSpace_ZZ_2x2():
from sage.rings.integer_ring import ZZ
return MatrixSpace(ZZ,2)
register_unpickle_override('sage.matrix.matrix_modn_dense',
'Matrix_modn_dense', Matrix_modn_dense_double)
register_unpickle_override('sage.matrix.matrix_integer_2x2',
'Matrix_integer_2x2', Matrix_integer_dense)
register_unpickle_override('sage.matrix.matrix_integer_2x2',
'MatrixSpace_ZZ_2x2_class', MatrixSpace)
register_unpickle_override('sage.matrix.matrix_integer_2x2',
'MatrixSpace_ZZ_2x2', _MatrixSpace_ZZ_2x2)
lazy_import('sage.matrix.matrix_gf2e_dense', 'unpickle_matrix_gf2e_dense_v0')
register_unpickle_override('sage.matrix.matrix_mod2e_dense',
'unpickle_matrix_mod2e_dense_v0', unpickle_matrix_gf2e_dense_v0)
| 34.396844
| 175
| 0.549658
|
import sys
import operator
from . import matrix_generic_dense
from . import matrix_generic_sparse
import sage.structure.coerce
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
import sage.rings.integer as integer
import sage.rings.finite_rings.finite_field_constructor
import sage.misc.latex as latex
import sage.modules.free_module
from sage.misc.lazy_attribute import lazy_attribute
from sage.categories.rings import Rings
from sage.categories.fields import Fields
from sage.categories.enumerated_sets import EnumeratedSets
from sage.misc.lazy_import import lazy_import
from sage.features import PythonModule
lazy_import('sage.matrix.matrix_gfpn_dense', ['Matrix_gfpn_dense'],
feature=PythonModule('sage.matrix.matrix_gfpn_dense', spkg='meataxe'))
_Rings = Rings()
_Fields = Fields()
def is_MatrixSpace(x):
return isinstance(x, MatrixSpace)
def get_matrix_class(R, nrows, ncols, sparse, implementation):
if isinstance(implementation, type):
return implementation
if not sparse:
if implementation is None:
if R is sage.rings.integer_ring.ZZ:
try:
from . import matrix_integer_dense
except ImportError:
pass
else:
return matrix_integer_dense.Matrix_integer_dense
elif R is sage.rings.rational_field.QQ:
try:
from . import matrix_rational_dense
except ImportError:
pass
else:
return matrix_rational_dense.Matrix_rational_dense
elif isinstance(R, sage.rings.abc.RealDoubleField):
try:
from . import matrix_real_double_dense
except ImportError:
pass
else:
return matrix_real_double_dense.Matrix_real_double_dense
elif isinstance(R, sage.rings.abc.ComplexDoubleField):
if implementation is None or implementation == 'numpy':
try:
from . import matrix_complex_double_dense
except ImportError:
pass
else:
return matrix_complex_double_dense.Matrix_complex_double_dense
elif sage.rings.finite_rings.finite_field_constructor.is_FiniteField(R):
if R.order() == 2:
try:
from . import matrix_mod2_dense
except ImportError:
pass
else:
return matrix_mod2_dense.Matrix_mod2_dense
if R.characteristic() == 2 and R.order() <= 65536:
try:
from . import matrix_gf2e_dense
except ImportError:
pass
else:
return matrix_gf2e_dense.Matrix_gf2e_dense
if (not R.is_prime_field()) and R.order() < 256:
try:
from . import matrix_gfpn_dense
return matrix_gfpn_dense.Matrix_gfpn_dense
except ImportError:
pass
if isinstance(R, sage.rings.abc.IntegerModRing):
from . import matrix_modn_dense_double, matrix_modn_dense_float
if R.order() < matrix_modn_dense_float.MAX_MODULUS:
return matrix_modn_dense_float.Matrix_modn_dense_float
if R.order() < matrix_modn_dense_double.MAX_MODULUS:
return matrix_modn_dense_double.Matrix_modn_dense_double
if isinstance(R, sage.rings.abc.NumberField_cyclotomic):
from . import matrix_cyclo_dense
return matrix_cyclo_dense.Matrix_cyclo_dense
try:
from sage.symbolic.ring import SR
except ImportError:
pass
else:
if R is SR:
try:
from . import matrix_symbolic_dense
except ImportError:
pass
else:
return matrix_symbolic_dense.Matrix_symbolic_dense
if isinstance(R, sage.rings.abc.ComplexBallField):
try:
from . import matrix_complex_ball_dense
except ImportError:
pass
else:
return matrix_complex_ball_dense.Matrix_complex_ball_dense
try:
from sage.rings.polynomial import polynomial_ring, multi_polynomial_ring_base
except ImportError:
pass
else:
if polynomial_ring.is_PolynomialRing(R) and R.base_ring() in _Fields:
try:
from . import matrix_polynomial_dense
except ImportError:
pass
else:
return matrix_polynomial_dense.Matrix_polynomial_dense
elif multi_polynomial_ring_base.is_MPolynomialRing(R) and R.base_ring() in _Fields:
try:
from . import matrix_mpolynomial_dense
except ImportError:
pass
else:
return matrix_mpolynomial_dense.Matrix_mpolynomial_dense
return matrix_generic_dense.Matrix_generic_dense
if implementation == 'flint':
if R is sage.rings.integer_ring.ZZ:
from . import matrix_integer_dense
return matrix_integer_dense.Matrix_integer_dense
if R is sage.rings.rational_field.QQ:
from . import matrix_rational_dense
return matrix_rational_dense.Matrix_rational_dense
raise ValueError("'flint' matrices are only available over the integers or the rationals")
if implementation == 'm4ri':
if R.is_field() and R.characteristic() == 2 and R.order() <= 65536:
if R.order() == 2:
from . import matrix_mod2_dense
return matrix_mod2_dense.Matrix_mod2_dense
from . import matrix_gf2e_dense
return matrix_gf2e_dense.Matrix_gf2e_dense
raise ValueError("'m4ri' matrices are only available for fields of characteristic 2 and order <= 65536")
if implementation == 'meataxe':
if R.is_field() and R.order() < 256:
return Matrix_gfpn_dense
raise ValueError("'meataxe' matrix can only deal with finite fields of order < 256")
if implementation == 'numpy':
if R is sage.rings.real_double.RDF:
from . import matrix_real_double_dense
return matrix_real_double_dense.Matrix_real_double_dense
if R is sage.rings.complex_double.CDF:
from . import matrix_complex_double_dense
return matrix_complex_double_dense.Matrix_complex_double_dense
raise ValueError("'numpy' matrices are only available over RDF and CDF")
if implementation == 'rational':
if isinstance(R, sage.rings.abc.NumberField_cyclotomic):
from . import matrix_cyclo_dense
return matrix_cyclo_dense.Matrix_cyclo_dense
raise ValueError("'rational' matrices are only available over a cyclotomic field")
if implementation == 'linbox-float':
from . import matrix_modn_dense_float
if R.order() < matrix_modn_dense_float.MAX_MODULUS:
return matrix_modn_dense_float.Matrix_modn_dense_float
raise ValueError("'linbox-float' matrices can only deal with order < %s" % matrix_modn_dense_float.MAX_MODULUS)
if implementation == 'linbox-double':
from . import matrix_modn_dense_double
if R.order() < matrix_modn_dense_double.MAX_MODULUS:
return matrix_modn_dense_double.Matrix_modn_dense_double
raise ValueError("'linbox-double' matrices can only deal with order < %s" % matrix_modn_dense_double.MAX_MODULUS)
if implementation == 'generic':
return matrix_generic_dense.Matrix_generic_dense
if implementation == 'gap':
from .matrix_gap import Matrix_gap
return Matrix_gap
raise ValueError("unknown matrix implementation %r over %r" % (implementation, R))
if implementation is not None:
raise ValueError("cannot choose an implementation for sparse matrices")
if isinstance(R, sage.rings.abc.IntegerModRing):
try:
from . import matrix_modn_sparse
except ImportError:
pass
else:
if R.order() < matrix_modn_sparse.MAX_MODULUS:
return matrix_modn_sparse.Matrix_modn_sparse
if sage.rings.rational_field.is_RationalField(R):
try:
from . import matrix_rational_sparse
except ImportError:
pass
else:
return matrix_rational_sparse.Matrix_rational_sparse
if sage.rings.integer_ring.is_IntegerRing(R):
try:
from . import matrix_integer_sparse
except ImportError:
pass
else:
return matrix_integer_sparse.Matrix_integer_sparse
return matrix_generic_sparse.Matrix_generic_sparse
class MatrixSpace(UniqueRepresentation, Parent):
@staticmethod
def __classcall__(cls, base_ring, nrows, ncols=None, sparse=False, implementation=None, **kwds):
if base_ring not in _Rings:
raise TypeError("base_ring (=%s) must be a ring"%base_ring)
nrows = int(nrows)
if ncols is None:
ncols = nrows
else:
ncols = int(ncols)
sparse = bool(sparse)
if nrows < 0:
raise ArithmeticError("nrows must be nonnegative")
if ncols < 0:
raise ArithmeticError("ncols must be nonnegative")
if nrows > sys.maxsize or ncols > sys.maxsize:
raise OverflowError("number of rows and columns may be at most %s" % sys.maxsize)
matrix_cls = get_matrix_class(base_ring, nrows, ncols, sparse, implementation)
return super(MatrixSpace, cls).__classcall__(
cls, base_ring, nrows, ncols, sparse, matrix_cls, **kwds)
def __init__(self, base_ring, nrows, ncols, sparse, implementation):
assert isinstance(implementation, type)
self.Element = implementation
self.__nrows = nrows
self.__ncols = ncols
self.__is_sparse = sparse
from sage.categories.all import Modules, Algebras
if nrows == ncols:
category = Algebras(base_ring.category())
else:
category = Modules(base_ring.category())
category = category.WithBasis().FiniteDimensional()
if not self.__nrows or not self.__ncols:
is_finite = True
else:
try:
is_finite = base_ring.is_finite()
except (AttributeError, NotImplementedError):
is_finite = None
if is_finite is True:
category = category.Finite()
elif is_finite is False:
category = category.Infinite()
if base_ring in EnumeratedSets():
category = category.Enumerated()
Parent.__init__(self, base_ring, category=category)
def cardinality(self):
if not self.__nrows or not self.__ncols:
from sage.rings.integer_ring import ZZ
return ZZ.one()
else:
return self.base_ring().cardinality() ** (self.__nrows * self.__ncols)
def characteristic(self):
return self.base_ring().characteristic()
def _has_default_implementation(self):
default = get_matrix_class(self.base_ring(), self.nrows(), self.ncols(), self.is_sparse(), None)
return self.Element is default
@lazy_attribute
def transposed(self):
return MatrixSpace(self._base, self.__ncols, self.__nrows,
self.__is_sparse, self.Element)
@lazy_attribute
def _copy_zero(self):
if self.__is_sparse:
return False
elif self.Element is sage.matrix.matrix_mod2_dense.Matrix_mod2_dense:
return False
elif self.Element is sage.matrix.matrix_rational_dense.Matrix_rational_dense:
return False
elif self.__nrows > 40 and self.__ncols > 40:
return False
else:
return True
def _element_constructor_(self, entries, **kwds):
return self.element_class(self, entries, **kwds)
def change_ring(self, R):
try:
return self.__change_ring[R]
except AttributeError:
self.__change_ring = {}
except KeyError:
pass
M = MatrixSpace(R, self.__nrows, self.__ncols, self.__is_sparse)
self.__change_ring[R] = M
return M
def base_extend(self, R):
if R.has_coerce_map_from(self.base_ring()):
return self.change_ring(R)
raise TypeError("no base extension defined")
def construction(self):
from sage.categories.pushout import MatrixFunctor
return MatrixFunctor(self.__nrows, self.__ncols, is_sparse=self.is_sparse()), self.base_ring()
def _get_action_(self, S, op, self_on_left):
try:
try:
from sage.schemes.generic.homset import SchemeHomset_generic
from sage.schemes.generic.homset import SchemeHomset_points
except ImportError:
SchemeHomset_generic = SchemeHomset_points = None
if op is operator.mul:
from . import action as matrix_action
if self_on_left:
if is_MatrixSpace(S):
return matrix_action.MatrixMatrixAction(self, S)
elif sage.modules.free_module.is_FreeModule(S):
return matrix_action.MatrixVectorAction(self, S)
elif isinstance(S, SchemeHomset_points):
return matrix_action.MatrixSchemePointAction(self, S)
elif isinstance(S, SchemeHomset_generic):
return matrix_action.MatrixPolymapAction(self, S)
else:
return sage.structure.coerce.RightModuleAction(S, self)
else:
if is_MatrixSpace(S):
return matrix_action.MatrixMatrixAction(S, self)
elif sage.modules.free_module.is_FreeModule(S):
return matrix_action.VectorMatrixAction(self, S)
elif isinstance(S, SchemeHomset_generic):
return matrix_action.PolymapMatrixAction(self, S)
else:
return sage.structure.coerce.LeftModuleAction(S, self)
except TypeError:
return None
def _coerce_map_from_base_ring(self):
return self._generic_coerce_map(self.base_ring())
def _coerce_map_from_(self, S):
B = self.base()
if isinstance(S, MatrixSpace):
if self.nrows() != S.nrows() or self.ncols() != S.ncols():
return False
T = S.base()
if B is not T:
return B.has_coerce_map_from(T)
if self.is_sparse() != S.is_sparse():
return S.is_sparse()
return self._has_default_implementation()
try:
meth_matrix_space = S.matrix_space
except AttributeError:
pass
else:
MS = meth_matrix_space()
from sage.groups.matrix_gps.matrix_group import is_MatrixGroup
from sage.modular.arithgroup.arithgroup_generic import is_ArithmeticSubgroup
if is_MatrixGroup(S) or is_ArithmeticSubgroup(S):
return self.has_coerce_map_from(MS)
else:
return False
return (self.nrows() == self.ncols()) and self._coerce_map_via([B], S)
def _repr_(self):
if self.is_sparse():
s = "sparse"
else:
s = "dense"
s = "Full MatrixSpace of %s by %s %s matrices over %s"%(
self.__nrows, self.__ncols, s, self.base_ring())
if not self._has_default_implementation():
s += " (using {})".format(self.Element.__name__)
return s
def _repr_option(self, key):
if key == 'element_ascii_art':
return self.__nrows > 1
return super(MatrixSpace, self)._repr_option(key)
def _latex_(self):
return "\\mathrm{Mat}_{%s\\times %s}(%s)"%(self.nrows(), self.ncols(),
latex.latex(self.base_ring()))
def __len__(self):
return len(self.base_ring())**(self.nrows() * self.ncols())
def __iter__(self):
base_ring = self.base_ring()
base_iter = iter(base_ring)
number_of_entries = (self.__nrows*self.__ncols)
if number_of_entries == 0:
yield self(0)
return
import sage.combinat.integer_vector
if not base_ring.is_finite():
base_elements = [ next(base_iter) ]
weight = 0
while True:
for iv in sage.combinat.integer_vector.IntegerVectors(weight, number_of_entries):
yield self([base_elements[i] for i in iv])
weight += 1
base_elements.append( next(base_iter) )
else:
order = base_ring.order()
base_elements = list(base_ring)
for weight in range((order-1)*number_of_entries+1):
for iv in sage.combinat.integer_vector.IntegerVectors(weight, number_of_entries, max_part=(order-1)):
yield self([base_elements[i] for i in iv])
def __getitem__(self, x):
if isinstance(x, (integer.Integer, int)):
return self.list()[x]
return super(MatrixSpace, self).__getitem__(x)
def basis(self):
v = {(r, c): self.zero_matrix().__copy__()
for r in range(self.__nrows)
for c in range(self.__ncols)}
one = self.base_ring().one()
keys = []
for r in range(self.__nrows):
for c in range(self.__ncols):
keys.append((r, c))
v[r, c][r, c] = one
v[r, c].set_immutable()
from sage.sets.family import Family
return Family(keys, v.__getitem__)
def dimension(self):
return self.__nrows * self.__ncols
def dims(self):
return (self.__nrows, self.__ncols)
def submodule(self, gens, check=True, already_echelonized=False,
unitriangular=False, support_order=None, category=None,
*args, **opts):
support_order = self._compute_support_order(gens, support_order)
if not already_echelonized:
gens = self.echelon_form(gens, unitriangular, order=support_order)
else:
from copy import copy
gens = [copy(g) if g.is_mutable() else g for g in gens]
for g in gens:
g.set_immutable()
from sage.modules.with_basis.subquotient import SubmoduleWithBasis
return SubmoduleWithBasis(gens, ambient=self,
support_order=support_order,
unitriangular=unitriangular,
category=category, *args, **opts)
from sage.misc.cachefunc import cached_method
@cached_method
def identity_matrix(self):
if self.__nrows != self.__ncols:
raise TypeError("identity matrix must be square")
A = self.zero_matrix().__copy__()
for i in range(self.__nrows):
A[i, i] = 1
A.set_immutable()
return A
one = identity_matrix
def diagonal_matrix(self, entries):
if self.__nrows != self.__ncols:
raise TypeError("diagonal matrix must be square")
if self.__nrows < len(entries):
raise ValueError('number of diagonal matrix entries (%s) exceeds the matrix size (%s)' % (len(entries), self.__nrows))
A = self.zero_matrix().__copy__()
for i in range(len(entries)):
A[i, i] = entries[i]
return A
def is_dense(self):
return not self.__is_sparse
def is_sparse(self):
return self.__is_sparse
def is_finite(self):
return self.base_ring().is_finite()
def gen(self, n):
if hasattr(self, '__basis'):
return self.__basis[n]
r = n // self.__ncols
c = n - (r * self.__ncols)
z = self.zero_matrix().__copy__()
z[r,c] = 1
return z
@cached_method
def zero_matrix(self):
zero = self.base_ring().zero()
res = self.element_class(self, zero, False, False)
res.set_immutable()
return res
zero = zero_matrix
def ngens(self):
return self.dimension()
def matrix(self, x=None, **kwds):
return self(x, **kwds)
def matrix_space(self, nrows=None, ncols=None, sparse=False):
if nrows is None:
nrows = self.__nrows
if ncols is None:
ncols = self.__ncols
base = self._base
return MatrixSpace(base, nrows, ncols, sparse=sparse)
def ncols(self):
return self.__ncols
def nrows(self):
return self.__nrows
def row_space(self):
try:
return self.__row_space
except AttributeError:
self.__row_space = sage.modules.free_module.FreeModule(self.base_ring(),
self.ncols(), sparse=self.is_sparse())
return self.__row_space
def column_space(self):
try:
return self.__column_space
except AttributeError:
self.__column_space = sage.modules.free_module.FreeModule(self.base_ring(), self.nrows(),
sparse=self.is_sparse())
return self.__column_space
def random_element(self, density=None, *args, **kwds):
Z = self.zero_matrix().__copy__()
if density is None:
Z.randomize(density=float(1), nonzero=kwds.pop('nonzero', False), \
*args, **kwds)
else:
Z.randomize(density=density, nonzero=kwds.pop('nonzero', True), \
*args, **kwds)
return Z
def _an_element_(self):
from .args import MatrixArgs
dim = self.dimension()
if dim > 100 and self.is_sparse():
D = {}
nr = self.nrows()
nc = self.ncols()
from random import randrange
n = 0
while True:
for el in self.base().some_elements():
if n == 100:
ma = MatrixArgs(D, space=self)
del D
return ma.matrix()
D[randrange(nr), randrange(nc)] = el
n += 1
assert D
else:
L = []
while True:
for el in self.base().some_elements():
if len(L) == dim:
ma = MatrixArgs(L, space=self)
del L
return ma.matrix()
L.append(el)
assert L
def some_elements(self):
yield self.an_element()
for g in self.gens():
yield g
def _magma_init_(self, magma):
K = magma(self.base_ring())
if self.__nrows == self.__ncols:
s = 'MatrixAlgebra(%s,%s)'%(K.name(), self.__nrows)
else:
s = 'RMatrixSpace(%s,%s,%s)'%(K.name(), self.__nrows, self.__ncols)
return s
def _polymake_init_(self):
from sage.interfaces.polymake import polymake
K = polymake(self.base_ring())
return '"Matrix<{}>"'.format(K)
def _random_nonzero_element(self, *args, **kwds):
if 0 in self.dims():
raise ValueError("{} only has zero elements".format(self))
rand_matrix = self.random_element(*args, **kwds)
while rand_matrix.is_zero():
rand_matrix = self.random_element(*args, **kwds)
return rand_matrix
def dict_to_list(entries, nrows, ncols):
v = [0] * (nrows * ncols)
for ij, y in entries.items():
i, j = ij
v[i * ncols + j] = y
return v
def test_trivial_matrices_inverse(ring, sparse=True, implementation=None, checkrank=True):
ms00 = MatrixSpace(ring, 0, 0, sparse=sparse)
m00 = ms00(0)
assert(m00.determinant() == ring(1))
assert(m00.is_invertible())
assert(m00.inverse() == m00)
if checkrank:
assert(m00.rank() == 0)
# Check that the empty 0x3 and 3x0 matrices are not invertible and that
# computing the determinant raise the proper exception.
for ms0 in [MatrixSpace(ring, 0, 3, sparse=sparse),
MatrixSpace(ring, 3, 0, sparse=sparse)]:
mn0 = ms0(0)
assert(not mn0.is_invertible())
try:
d = mn0.determinant()
print(d)
res = False
except ValueError:
res = True
assert(res)
try:
mn0.inverse()
res = False
except ArithmeticError:
res = True
assert(res)
if checkrank:
assert(mn0.rank() == 0)
# Check that the null 1x1 matrix is not invertible and that det=0
ms1 = MatrixSpace(ring, 1, 1, sparse=sparse)
m0 = ms1(0)
assert(not m0.is_invertible())
assert(m0.determinant() == ring(0))
try:
m0.inverse()
res = False
except (ZeroDivisionError, RuntimeError):
#FIXME: Make pynac throw a ZeroDivisionError on division by
#zero instead of a runtime Error
res = True
assert(res)
if checkrank:
assert(m0.rank() == 0)
# Check that the identity 1x1 matrix is its own inverse with det=1
m1 = ms1(1)
assert(m1.is_invertible())
assert(m1.determinant() == ring(1))
inv = m1.inverse()
assert(inv == m1)
if checkrank:
assert(m1.rank() == 1)
# Fix unpickling Matrix_modn_dense and Matrix_integer_2x2
lazy_import('sage.matrix.matrix_modn_dense_double', 'Matrix_modn_dense_double')
lazy_import('sage.matrix.matrix_integer_dense', 'Matrix_integer_dense')
from sage.misc.persist import register_unpickle_override
def _MatrixSpace_ZZ_2x2():
from sage.rings.integer_ring import ZZ
return MatrixSpace(ZZ,2)
register_unpickle_override('sage.matrix.matrix_modn_dense',
'Matrix_modn_dense', Matrix_modn_dense_double)
register_unpickle_override('sage.matrix.matrix_integer_2x2',
'Matrix_integer_2x2', Matrix_integer_dense)
register_unpickle_override('sage.matrix.matrix_integer_2x2',
'MatrixSpace_ZZ_2x2_class', MatrixSpace)
register_unpickle_override('sage.matrix.matrix_integer_2x2',
'MatrixSpace_ZZ_2x2', _MatrixSpace_ZZ_2x2)
lazy_import('sage.matrix.matrix_gf2e_dense', 'unpickle_matrix_gf2e_dense_v0')
register_unpickle_override('sage.matrix.matrix_mod2e_dense',
'unpickle_matrix_mod2e_dense_v0', unpickle_matrix_gf2e_dense_v0)
| true
| true
|
79085bf646a15f04d6ff2922d7764eaa4cfb6e13
| 145
|
py
|
Python
|
Conversor_metros.py
|
KazumaShachou/Programs-in-Python
|
307f59e2a53b3a07eaf2ddf199bbffa0cf8d394f
|
[
"MIT"
] | null | null | null |
Conversor_metros.py
|
KazumaShachou/Programs-in-Python
|
307f59e2a53b3a07eaf2ddf199bbffa0cf8d394f
|
[
"MIT"
] | null | null | null |
Conversor_metros.py
|
KazumaShachou/Programs-in-Python
|
307f59e2a53b3a07eaf2ddf199bbffa0cf8d394f
|
[
"MIT"
] | null | null | null |
n = int(input('digite um numero para metros'))
print('o valor {} metros, vale {} em centimetros, e vale {} milimetros'.format(n, n*100, n*1000))
| 48.333333
| 97
| 0.675862
|
n = int(input('digite um numero para metros'))
print('o valor {} metros, vale {} em centimetros, e vale {} milimetros'.format(n, n*100, n*1000))
| true
| true
|
79085dd250c1e2298a23c9208789dd8fb87a1771
| 1,754
|
py
|
Python
|
pyngboard/credentials.py
|
tsouza/pyngboard
|
00f696453c0dddedad588b2f48259e0c158a88ec
|
[
"MIT"
] | 1
|
2017-03-30T16:34:36.000Z
|
2017-03-30T16:34:36.000Z
|
pyngboard/credentials.py
|
tsouza/pyngboard
|
00f696453c0dddedad588b2f48259e0c158a88ec
|
[
"MIT"
] | null | null | null |
pyngboard/credentials.py
|
tsouza/pyngboard
|
00f696453c0dddedad588b2f48259e0c158a88ec
|
[
"MIT"
] | 1
|
2016-08-31T10:31:28.000Z
|
2016-08-31T10:31:28.000Z
|
import os
class FileCredentials:
def __init__(self, credentials_file):
if credentials_file == None:
credentials_file = os.path.expanduser("~") + "/.pingboard"
self.credentials_file = credentials_file
self.client_id = None
self.client_secret = None
def load(self):
try:
credentials = dict(line.strip().split('=') for line in open(self.credentials_file))
self.client_id = credentials['client_id']
self.client_secret = credentials['client_secret']
return True
except Exception as e:
return False
class ArgsCredentials:
def __init__(self, id_key, secret_key, **kwargs):
self.client_id = None
self.client_secret = None
try:
self.client_id = kwargs[id_key]
self.client_secret = kwargs[secret_key]
except KeyError:
pass
def load(self):
return self.client_id != None and self.client_secret != None;
class Credentials:
def __init__(self, **kwargs):
self.chain = [
ArgsCredentials('client_id', 'client_secret',
**kwargs),
ArgsCredentials('PINGBOARD_CLIENT_ID', 'PINGBOARD_CLIENT_SECRET',
**os.environ),
FileCredentials(kwargs.get('credentials_file'))
]
def load(self):
loaded_credentials = None
for credentials in self.chain:
if credentials.load():
loaded_credentials = credentials
break
if not loaded_credentials:
return False
self.client_id = loaded_credentials.client_id
self.client_secret = loaded_credentials.client_secret
return True
| 27.40625
| 95
| 0.600342
|
import os
class FileCredentials:
def __init__(self, credentials_file):
if credentials_file == None:
credentials_file = os.path.expanduser("~") + "/.pingboard"
self.credentials_file = credentials_file
self.client_id = None
self.client_secret = None
def load(self):
try:
credentials = dict(line.strip().split('=') for line in open(self.credentials_file))
self.client_id = credentials['client_id']
self.client_secret = credentials['client_secret']
return True
except Exception as e:
return False
class ArgsCredentials:
def __init__(self, id_key, secret_key, **kwargs):
self.client_id = None
self.client_secret = None
try:
self.client_id = kwargs[id_key]
self.client_secret = kwargs[secret_key]
except KeyError:
pass
def load(self):
return self.client_id != None and self.client_secret != None;
class Credentials:
def __init__(self, **kwargs):
self.chain = [
ArgsCredentials('client_id', 'client_secret',
**kwargs),
ArgsCredentials('PINGBOARD_CLIENT_ID', 'PINGBOARD_CLIENT_SECRET',
**os.environ),
FileCredentials(kwargs.get('credentials_file'))
]
def load(self):
loaded_credentials = None
for credentials in self.chain:
if credentials.load():
loaded_credentials = credentials
break
if not loaded_credentials:
return False
self.client_id = loaded_credentials.client_id
self.client_secret = loaded_credentials.client_secret
return True
| true
| true
|
79085ee902a617ecb3b1ec8e2483f280bc007e90
| 458
|
py
|
Python
|
galerias/urls.py
|
shiminasai/interteam-1
|
1be77a529025a226fb759fb3e04811d854f90f66
|
[
"MIT"
] | null | null | null |
galerias/urls.py
|
shiminasai/interteam-1
|
1be77a529025a226fb759fb3e04811d854f90f66
|
[
"MIT"
] | null | null | null |
galerias/urls.py
|
shiminasai/interteam-1
|
1be77a529025a226fb759fb3e04811d854f90f66
|
[
"MIT"
] | 3
|
2018-06-07T15:36:04.000Z
|
2019-04-01T19:25:43.000Z
|
from django.urls import include, path, re_path
from .models import *
from .views import *
urlpatterns = [
path('imagenes/', lista_galerias_img, name='lista-galerias-img'),
path('imagenes/<tema>', filtro_temas_img, name='filtro_temas_img'),
path('imagenes/<id>/', detalle_galerias_img, name='detalle-galerias-img'),
path('videos/', lista_galerias_videos, name='lista-galerias-videos'),
path('videos/<tema>', filtro_temas_vid, name='filtro_temas_vid'),
]
| 41.636364
| 75
| 0.746725
|
from django.urls import include, path, re_path
from .models import *
from .views import *
urlpatterns = [
path('imagenes/', lista_galerias_img, name='lista-galerias-img'),
path('imagenes/<tema>', filtro_temas_img, name='filtro_temas_img'),
path('imagenes/<id>/', detalle_galerias_img, name='detalle-galerias-img'),
path('videos/', lista_galerias_videos, name='lista-galerias-videos'),
path('videos/<tema>', filtro_temas_vid, name='filtro_temas_vid'),
]
| true
| true
|
79085fe85c4825d66a89abb24efaea57782be387
| 3,119
|
py
|
Python
|
topsis.py
|
diptikaushal/TOPSIS-Dipti-101803601
|
0919e64072d4f301f311ddf280bc0c7b920ecbd5
|
[
"MIT"
] | null | null | null |
topsis.py
|
diptikaushal/TOPSIS-Dipti-101803601
|
0919e64072d4f301f311ddf280bc0c7b920ecbd5
|
[
"MIT"
] | null | null | null |
topsis.py
|
diptikaushal/TOPSIS-Dipti-101803601
|
0919e64072d4f301f311ddf280bc0c7b920ecbd5
|
[
"MIT"
] | null | null | null |
import pandas as pd
import sys
from os import path
import numpy
from sys import exit
def main():
if len(sys.argv)!=5:
print("Incorrect no. of parameters passed.")
exit(0)
i=sys.argv[1]
w=sys.argv[2]
im=sys.argv[3]
result=sys.argv[4]
if not i.endswith('.csv'):
print("Input file is not in .csv format.")
exit(0)
if not path.exists(i):
print("No such file exists!!")
exit(0)
f = pd.read_csv(i)
c = f.shape[-1]
if c<3:
print("File should have at least 3 or more columns.")
exit(0)
k=0
for i in f.columns:
k=k+1
for j in f.index:
if k!=1:
v=isinstance(f[i][j],numpy.int64)
v1=isinstance(f[i][j],float)
if not v and not v1:
print(f'It is not a numeric value in {k} column.')
exit(0)
weights=w.split(',')
impacts=im.split(',')
for i in range(0, len(weights)):
weights[i] = int(weights[i])
if len(weights)!=len(impacts) and len(weights)!=len(f.iloc[:,1:]):
print("No. of input Impacts, Weights and columns(from second to last) should be similar.")
exit(0)
for j in impacts:
if j!='+' and j!='-':
print("Impact can be '+' or '-'.")
exit(0)
if w.count(",")*2+1!=len(w) and im.count(",")*2+1!=len(im):
print("Weights and Impacts should be separated by commas(,).")
exit(0)
a=f.iloc[:,1:]
vp=[]
vn=[]
sp=[]
sn=[]
skn=[]
p=[]
for col in range(a.shape[1]):
total=0
for row in range(a.shape[0]):
total=total+a.iloc[row,col]**2
total=total**0.5
for i in range(a.shape[0]):
a.iloc[i,col]=a.iloc[i,col]/total
for j in range(a.shape[0]):
a.iloc[j,col]=a.iloc[j,col]*weights[col]
if impacts[col]=='+':
vp.append(a.iloc[:,col].max())
vn.append(a.iloc[:,col].min())
else:
vp.append(a.iloc[:,col].min())
vn.append(a.iloc[:,col].max())
for m in range(a.shape[0]):
temp=0
ans=0
for n in range(a.shape[1]):
temp=temp+(a.iloc[m,n]-vp[n])**2
temp=temp**0.5
sp.append(temp)
for q in range(a.shape[1]):
ans=ans+(a.iloc[m,q]-vn[q])**2
ans=ans**0.5
sn.append(ans)
for w in range(0,len(sp)):
skn.append(sp[w]+sn[w])
for y in range(0,len(skn)):
p.append(sn[y]/skn[y])
f.insert(5,"Topsis Score",p)
f.insert(6,"Rank",f["Topsis Score"].rank(ascending=False))
f.to_csv(result)
if __name__ == "__main__":
main()
| 30.281553
| 99
| 0.435075
|
import pandas as pd
import sys
from os import path
import numpy
from sys import exit
def main():
if len(sys.argv)!=5:
print("Incorrect no. of parameters passed.")
exit(0)
i=sys.argv[1]
w=sys.argv[2]
im=sys.argv[3]
result=sys.argv[4]
if not i.endswith('.csv'):
print("Input file is not in .csv format.")
exit(0)
if not path.exists(i):
print("No such file exists!!")
exit(0)
f = pd.read_csv(i)
c = f.shape[-1]
if c<3:
print("File should have at least 3 or more columns.")
exit(0)
k=0
for i in f.columns:
k=k+1
for j in f.index:
if k!=1:
v=isinstance(f[i][j],numpy.int64)
v1=isinstance(f[i][j],float)
if not v and not v1:
print(f'It is not a numeric value in {k} column.')
exit(0)
weights=w.split(',')
impacts=im.split(',')
for i in range(0, len(weights)):
weights[i] = int(weights[i])
if len(weights)!=len(impacts) and len(weights)!=len(f.iloc[:,1:]):
print("No. of input Impacts, Weights and columns(from second to last) should be similar.")
exit(0)
for j in impacts:
if j!='+' and j!='-':
print("Impact can be '+' or '-'.")
exit(0)
if w.count(",")*2+1!=len(w) and im.count(",")*2+1!=len(im):
print("Weights and Impacts should be separated by commas(,).")
exit(0)
a=f.iloc[:,1:]
vp=[]
vn=[]
sp=[]
sn=[]
skn=[]
p=[]
for col in range(a.shape[1]):
total=0
for row in range(a.shape[0]):
total=total+a.iloc[row,col]**2
total=total**0.5
for i in range(a.shape[0]):
a.iloc[i,col]=a.iloc[i,col]/total
for j in range(a.shape[0]):
a.iloc[j,col]=a.iloc[j,col]*weights[col]
if impacts[col]=='+':
vp.append(a.iloc[:,col].max())
vn.append(a.iloc[:,col].min())
else:
vp.append(a.iloc[:,col].min())
vn.append(a.iloc[:,col].max())
for m in range(a.shape[0]):
temp=0
ans=0
for n in range(a.shape[1]):
temp=temp+(a.iloc[m,n]-vp[n])**2
temp=temp**0.5
sp.append(temp)
for q in range(a.shape[1]):
ans=ans+(a.iloc[m,q]-vn[q])**2
ans=ans**0.5
sn.append(ans)
for w in range(0,len(sp)):
skn.append(sp[w]+sn[w])
for y in range(0,len(skn)):
p.append(sn[y]/skn[y])
f.insert(5,"Topsis Score",p)
f.insert(6,"Rank",f["Topsis Score"].rank(ascending=False))
f.to_csv(result)
if __name__ == "__main__":
main()
| true
| true
|
79086034d91e7c6e4f944bc89dbc653fd5ab3a2a
| 743
|
py
|
Python
|
src/hark_lang/executors/thread.py
|
krrome/teal-lang
|
594ac0f0baae047fdb19ac9126d174408d487905
|
[
"Apache-2.0"
] | 85
|
2020-04-29T13:51:33.000Z
|
2020-08-28T04:40:11.000Z
|
src/hark_lang/executors/thread.py
|
krrome/teal-lang
|
594ac0f0baae047fdb19ac9126d174408d487905
|
[
"Apache-2.0"
] | 15
|
2020-05-06T07:58:18.000Z
|
2020-08-28T10:29:28.000Z
|
src/hark_lang/executors/thread.py
|
krrome/teal-lang
|
594ac0f0baae047fdb19ac9126d174408d487905
|
[
"Apache-2.0"
] | 4
|
2020-05-31T09:42:08.000Z
|
2020-08-27T17:04:26.000Z
|
import logging
import threading
import time
import traceback
import warnings
from ..machine.machine import TlMachine
LOG = logging.getLogger(__name__)
class Invoker:
def __init__(self, data_controller):
self.data_controller = data_controller
self.exception = None
threading.excepthook = self._threading_excepthook
def _threading_excepthook(self, args):
self.exception = args
def invoke(self, vmid, run_async=True):
LOG.info(f"Invoking {vmid} (new thread? {run_async})")
m = TlMachine(vmid, self)
if run_async:
thread = threading.Thread(target=m.run)
LOG.info(f"New thread: {thread}")
thread.start()
else:
m.run()
| 24.766667
| 62
| 0.650067
|
import logging
import threading
import time
import traceback
import warnings
from ..machine.machine import TlMachine
LOG = logging.getLogger(__name__)
class Invoker:
def __init__(self, data_controller):
self.data_controller = data_controller
self.exception = None
threading.excepthook = self._threading_excepthook
def _threading_excepthook(self, args):
self.exception = args
def invoke(self, vmid, run_async=True):
LOG.info(f"Invoking {vmid} (new thread? {run_async})")
m = TlMachine(vmid, self)
if run_async:
thread = threading.Thread(target=m.run)
LOG.info(f"New thread: {thread}")
thread.start()
else:
m.run()
| true
| true
|
7908603d96dc1027215771828eb3a38ef8ffd7d3
| 46,925
|
py
|
Python
|
simpletransformers/seq2seq/seq2seq_model.py
|
mahanswaray/simpletransformers
|
44a97d689b6bd19495e698ae918e67c80828559e
|
[
"Apache-2.0"
] | 2
|
2021-03-13T19:05:46.000Z
|
2021-11-07T20:03:36.000Z
|
simpletransformers/seq2seq/seq2seq_model.py
|
mahanswaray/simpletransformers
|
44a97d689b6bd19495e698ae918e67c80828559e
|
[
"Apache-2.0"
] | null | null | null |
simpletransformers/seq2seq/seq2seq_model.py
|
mahanswaray/simpletransformers
|
44a97d689b6bd19495e698ae918e67c80828559e
|
[
"Apache-2.0"
] | 3
|
2021-11-07T19:55:46.000Z
|
2022-01-24T15:25:33.000Z
|
import json
import logging
import math
import os
import random
import warnings
from dataclasses import asdict
from multiprocessing import Pool, cpu_count
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from tensorboardX import SummaryWriter
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoTokenizer,
BartConfig,
BartForConditionalGeneration,
BartTokenizer,
BertConfig,
BertForMaskedLM,
BertModel,
BertTokenizer,
CamembertConfig,
CamembertModel,
CamembertTokenizer,
DistilBertConfig,
DistilBertModel,
DistilBertTokenizer,
ElectraConfig,
ElectraModel,
ElectraTokenizer,
EncoderDecoderConfig,
EncoderDecoderModel,
LongformerConfig,
LongformerModel,
LongformerTokenizer,
MarianConfig,
MarianMTModel,
MarianTokenizer,
MobileBertConfig,
MobileBertModel,
MobileBertTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from simpletransformers.config.global_args import global_args
from simpletransformers.config.model_args import Seq2SeqArgs
from simpletransformers.seq2seq.seq2seq_utils import Seq2SeqDataset, SimpleSummarizationDataset
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModel, AutoTokenizer),
"bart": (BartConfig, BartForConditionalGeneration, BartTokenizer),
"bert": (BertConfig, BertModel, BertTokenizer),
"camembert": (CamembertConfig, CamembertModel, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraModel, ElectraTokenizer),
"longformer": (LongformerConfig, LongformerModel, LongformerTokenizer),
"mobilebert": (MobileBertConfig, MobileBertModel, MobileBertTokenizer),
"marian": (MarianConfig, MarianMTModel, MarianTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
}
class Seq2SeqModel:
def __init__(
self,
encoder_type=None,
encoder_name=None,
decoder_name=None,
encoder_decoder_type=None,
encoder_decoder_name=None,
config=None,
args=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a Seq2SeqModel.
Args:
encoder_type (optional): The type of model to use as the encoder.
encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
Must be the same "size" as the encoder model (base/base, large/large, etc.)
encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)
encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART or MarianMT model.
config (optional): A configuration file to build an EncoderDecoderModel.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
if not config:
# if not ((encoder_name and decoder_name) or encoder_decoder_name) and not encoder_type:
if not ((encoder_name and decoder_name) or encoder_decoder_name):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name OR \t \t"
"encoder_type and encoder_decoder_name"
)
elif not (encoder_type or encoder_decoder_type):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name \t OR \t"
"encoder_type and encoder_decoder_name"
)
self.args = self._load_model_args(encoder_decoder_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, Seq2SeqArgs):
self.args = args
if "sweep_config" in kwargs:
sweep_config = kwargs.pop("sweep_config")
sweep_values = {key: value["value"] for key, value in sweep_config.as_dict().items() if key != "_wandb"}
self.args.update_from_dict(sweep_values)
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
"Make sure CUDA is available or set `use_cuda=False`."
)
else:
self.device = "cpu"
self.results = {}
if not use_cuda:
self.args.fp16 = False
# config = EncoderDecoderConfig.from_encoder_decoder_configs(config, config)
if encoder_decoder_type:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_decoder_type]
else:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_type]
if encoder_decoder_type in ["bart", "marian"]:
self.model = model_class.from_pretrained(encoder_decoder_name)
if encoder_decoder_type == "bart":
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
elif encoder_decoder_type == "marian":
if self.args.base_marian_model_name:
self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args.base_marian_model_name)
else:
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
self.decoder_tokenizer = self.encoder_tokenizer
self.config = self.model.config
else:
if encoder_decoder_name:
# self.model = EncoderDecoderModel.from_pretrained(encoder_decoder_name)
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
os.path.join(encoder_decoder_name, "encoder"), os.path.join(encoder_decoder_name, "decoder")
)
self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
else:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_name, decoder_name, config=config
)
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)
self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)
self.encoder_config = self.model.config.encoder
self.decoder_config = self.model.config.decoder
if self.args.wandb_project and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args.wandb_project = None
if encoder_decoder_name:
self.args.model_name = encoder_decoder_name
# # Checking if we are loading from a saved model or using a pre-trained model
# if not saved_model_args and encoder_decoder_type == "marian":
# Need to store base pre-trained model name to get the tokenizer when loading a saved model
self.args.base_marian_model_name = encoder_decoder_name
elif encoder_name and decoder_name:
self.args.model_name = encoder_name + "-" + decoder_name
else:
self.args.model_name = "encoder-decoder"
if encoder_decoder_type:
self.args.model_type = encoder_decoder_type
elif encoder_type:
self.args.model_type = encoder_type + "-bert"
else:
self.args.model_type = "encoder-decoder"
def train_model(
self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
# if self.args.silent:
# show_running_loss = False
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(output_dir)
)
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataset,
output_dir,
show_running_loss=show_running_loss,
eval_data=eval_data,
verbose=verbose,
**kwargs,
)
self._save_model(self.args.output_dir, model=self.model)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.encoder_tokenizer.save_pretrained(output_dir)
# self.decoder_tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_name, output_dir))
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
num_workers=self.args.dataloader_num_workers,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
# TODO: Use custom optimizer like with BertSum?
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if (
args.model_name
and os.path.isfile(os.path.join(args.model_name, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, "scheduler.pt")))
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info(" Training started")
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
model.train()
for current_epoch in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
disable=args.silent,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
else:
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args.save_model_every_epoch or args.evaluate_during_training:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training:
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
**kwargs,
)
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, "training_progress_scores.csv"), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if self.args.evaluate_generated_text:
to_predict = eval_data["input_text"].tolist()
preds = self.predict(to_predict)
result = self.compute_metrics(eval_data["target_text"].tolist(), preds, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=args.silent or silent, desc="Running Evaluation"):
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
results["eval_loss"] = eval_loss
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def predict(self, to_predict):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.
Returns:
preds: A python list of the generated sequences.
""" # noqa: ignore flake8"
self._move_model_to_device()
all_outputs = []
# Batching
for batch in [
to_predict[i : i + self.args.eval_batch_size] for i in range(0, len(to_predict), self.args.eval_batch_size)
]:
if self.args.model_type == "marian":
input_ids = self.encoder_tokenizer.prepare_translation_batch(
batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
else:
input_ids = self.encoder_tokenizer.batch_encode_plus(
batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
input_ids = input_ids.to(self.device)
if self.args.model_type in ["bart", "marian"]:
outputs = self.model.generate(
input_ids=input_ids,
num_beams=self.args.num_beams,
max_length=self.args.max_length,
length_penalty=self.args.length_penalty,
early_stopping=self.args.early_stopping,
repetition_penalty=self.args.repetition_penalty,
do_sample=self.args.do_sample,
top_k=self.args.top_k,
top_p=self.args.top_p,
num_return_sequences=self.args.num_return_sequences,
)
else:
outputs = self.model.generate(
input_ids=input_ids,
decoder_start_token_id=self.model.config.decoder.pad_token_id,
num_beams=self.args.num_beams,
max_length=self.args.max_length,
length_penalty=self.args.length_penalty,
early_stopping=self.args.early_stopping,
repetition_penalty=self.args.repetition_penalty,
do_sample=self.args.do_sample,
top_k=self.args.top_k,
top_p=self.args.top_p,
num_return_sequences=self.args.num_return_sequences,
)
all_outputs.extend(outputs.cpu().numpy())
if self.args.use_multiprocessed_decoding:
self.model.to("cpu")
with Pool(self.args.process_count) as p:
outputs = list(
tqdm(
p.imap(self._decode, all_outputs, chunksize=self.args.multiprocessing_chunksize),
total=len(all_outputs),
desc="Decoding outputs",
disable=self.args.silent,
)
)
self._move_model_to_device()
else:
outputs = [
self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for output_id in all_outputs
]
if self.args.num_return_sequences > 1:
return [
outputs[i : i + self.args.num_return_sequences]
for i in range(0, len(outputs), self.args.num_return_sequences)
]
else:
return outputs
def _decode(self, output_id):
return self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
def compute_metrics(self, labels, preds, **kwargs):
"""
Computes the evaluation metrics for the model predictions.
Args:
labels: List of target sequences
preds: List of model generated outputs
**kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
result: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
# assert len(labels) == len(preds)
results = {}
for metric, func in kwargs.items():
results[metric] = func(labels, preds)
return results
def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):
"""
Creates a T5Dataset from data.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
encoder_tokenizer = self.encoder_tokenizer
decoder_tokenizer = self.decoder_tokenizer
args = self.args
if not no_cache:
no_cache = args.no_cache
if not no_cache:
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = "dev" if evaluate else "train"
if args.dataset_class:
CustomDataset = args.dataset_class
return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)
else:
if args.model_type in ["bart", "marian"]:
return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)
else:
return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode,)
def _create_training_progress_scores(self, **kwargs):
extra_metrics = {key: [] for key in kwargs}
training_progress_scores = {
"global_step": [],
"eval_loss": [],
"train_loss": [],
**extra_metrics,
}
return training_progress_scores
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model into {output_dir}")
if model and not self.args.no_save:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
self._save_model_args(output_dir)
if self.args.model_type in ["bart", "marian"]:
os.makedirs(os.path.join(output_dir), exist_ok=True)
model_to_save.save_pretrained(output_dir)
self.config.save_pretrained(output_dir)
if self.args.model_type == "bart":
self.encoder_tokenizer.save_pretrained(output_dir)
else:
os.makedirs(os.path.join(output_dir, "encoder"), exist_ok=True)
os.makedirs(os.path.join(output_dir, "decoder"), exist_ok=True)
self.encoder_config.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_config.save_pretrained(os.path.join(output_dir, "decoder"))
model_to_save = (
self.model.encoder.module if hasattr(self.model.encoder, "module") else self.model.encoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "encoder"))
model_to_save = (
self.model.decoder.module if hasattr(self.model.decoder, "module") else self.model.decoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "decoder"))
self.encoder_tokenizer.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_tokenizer.save_pretrained(os.path.join(output_dir, "decoder"))
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args.save_optimizer_and_scheduler:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
device = self.device
if self.args.model_type in ["bart", "marian"]:
pad_token_id = self.encoder_tokenizer.pad_token_id
source_ids, source_mask, y = batch["source_ids"], batch["source_mask"], batch["target_ids"]
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone()
lm_labels[y[:, 1:] == pad_token_id] = -100
inputs = {
"input_ids": source_ids.to(device),
"attention_mask": source_mask.to(device),
"decoder_input_ids": y_ids.to(device),
"lm_labels": lm_labels.to(device),
}
else:
lm_labels = batch[1]
lm_labels_masked = lm_labels.clone()
lm_labels_masked[lm_labels_masked == self.decoder_tokenizer.pad_token_id] = -100
inputs = {
"input_ids": batch[0].to(device),
"decoder_input_ids": lm_labels.to(device),
"labels": lm_labels_masked.to(device),
}
return inputs
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
self.args.save(output_dir)
def _load_model_args(self, input_dir):
args = Seq2SeqArgs()
args.load(input_dir)
return args
def get_named_parameters(self):
return [n for n, p in self.model.named_parameters()]
| 46.231527
| 227
| 0.58212
|
import json
import logging
import math
import os
import random
import warnings
from dataclasses import asdict
from multiprocessing import Pool, cpu_count
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from tensorboardX import SummaryWriter
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoTokenizer,
BartConfig,
BartForConditionalGeneration,
BartTokenizer,
BertConfig,
BertForMaskedLM,
BertModel,
BertTokenizer,
CamembertConfig,
CamembertModel,
CamembertTokenizer,
DistilBertConfig,
DistilBertModel,
DistilBertTokenizer,
ElectraConfig,
ElectraModel,
ElectraTokenizer,
EncoderDecoderConfig,
EncoderDecoderModel,
LongformerConfig,
LongformerModel,
LongformerTokenizer,
MarianConfig,
MarianMTModel,
MarianTokenizer,
MobileBertConfig,
MobileBertModel,
MobileBertTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from simpletransformers.config.global_args import global_args
from simpletransformers.config.model_args import Seq2SeqArgs
from simpletransformers.seq2seq.seq2seq_utils import Seq2SeqDataset, SimpleSummarizationDataset
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModel, AutoTokenizer),
"bart": (BartConfig, BartForConditionalGeneration, BartTokenizer),
"bert": (BertConfig, BertModel, BertTokenizer),
"camembert": (CamembertConfig, CamembertModel, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraModel, ElectraTokenizer),
"longformer": (LongformerConfig, LongformerModel, LongformerTokenizer),
"mobilebert": (MobileBertConfig, MobileBertModel, MobileBertTokenizer),
"marian": (MarianConfig, MarianMTModel, MarianTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
}
class Seq2SeqModel:
def __init__(
self,
encoder_type=None,
encoder_name=None,
decoder_name=None,
encoder_decoder_type=None,
encoder_decoder_name=None,
config=None,
args=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
if not config:
if not ((encoder_name and decoder_name) or encoder_decoder_name):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name OR \t \t"
"encoder_type and encoder_decoder_name"
)
elif not (encoder_type or encoder_decoder_type):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name \t OR \t"
"encoder_type and encoder_decoder_name"
)
self.args = self._load_model_args(encoder_decoder_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, Seq2SeqArgs):
self.args = args
if "sweep_config" in kwargs:
sweep_config = kwargs.pop("sweep_config")
sweep_values = {key: value["value"] for key, value in sweep_config.as_dict().items() if key != "_wandb"}
self.args.update_from_dict(sweep_values)
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
"Make sure CUDA is available or set `use_cuda=False`."
)
else:
self.device = "cpu"
self.results = {}
if not use_cuda:
self.args.fp16 = False
if encoder_decoder_type:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_decoder_type]
else:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_type]
if encoder_decoder_type in ["bart", "marian"]:
self.model = model_class.from_pretrained(encoder_decoder_name)
if encoder_decoder_type == "bart":
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
elif encoder_decoder_type == "marian":
if self.args.base_marian_model_name:
self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args.base_marian_model_name)
else:
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
self.decoder_tokenizer = self.encoder_tokenizer
self.config = self.model.config
else:
if encoder_decoder_name:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
os.path.join(encoder_decoder_name, "encoder"), os.path.join(encoder_decoder_name, "decoder")
)
self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
else:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_name, decoder_name, config=config
)
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)
self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)
self.encoder_config = self.model.config.encoder
self.decoder_config = self.model.config.decoder
if self.args.wandb_project and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args.wandb_project = None
if encoder_decoder_name:
self.args.model_name = encoder_decoder_name
coder_decoder_name
elif encoder_name and decoder_name:
self.args.model_name = encoder_name + "-" + decoder_name
else:
self.args.model_name = "encoder-decoder"
if encoder_decoder_type:
self.args.model_type = encoder_decoder_type
elif encoder_type:
self.args.model_type = encoder_type + "-bert"
else:
self.args.model_type = "encoder-decoder"
def train_model(
self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs,
):
if args:
self.args.update_from_dict(args)
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(output_dir)
)
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataset,
output_dir,
show_running_loss=show_running_loss,
eval_data=eval_data,
verbose=verbose,
**kwargs,
)
self._save_model(self.args.output_dir, model=self.model)
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_name, output_dir))
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
num_workers=self.args.dataloader_num_workers,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if (
args.model_name
and os.path.isfile(os.path.join(args.model_name, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name, "scheduler.pt"))
):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, "scheduler.pt")))
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info(" Training started")
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args.model_name and os.path.exists(args.model_name):
try:
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
model.train()
for current_epoch in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
disable=args.silent,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
outputs = model(**inputs)
loss = outputs[0]
else:
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean()
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args.save_steps > 0 and global_step % args.save_steps == 0:
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args.save_model_every_epoch or args.evaluate_during_training:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training:
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
**kwargs,
)
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, "training_progress_scores.csv"), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):
if not output_dir:
output_dir = self.args.output_dir
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if self.args.evaluate_generated_text:
to_predict = eval_data["input_text"].tolist()
preds = self.predict(to_predict)
result = self.compute_metrics(eval_data["target_text"].tolist(), preds, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=args.silent or silent, desc="Running Evaluation"):
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
results["eval_loss"] = eval_loss
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def predict(self, to_predict):
self._move_model_to_device()
all_outputs = []
for batch in [
to_predict[i : i + self.args.eval_batch_size] for i in range(0, len(to_predict), self.args.eval_batch_size)
]:
if self.args.model_type == "marian":
input_ids = self.encoder_tokenizer.prepare_translation_batch(
batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
else:
input_ids = self.encoder_tokenizer.batch_encode_plus(
batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
input_ids = input_ids.to(self.device)
if self.args.model_type in ["bart", "marian"]:
outputs = self.model.generate(
input_ids=input_ids,
num_beams=self.args.num_beams,
max_length=self.args.max_length,
length_penalty=self.args.length_penalty,
early_stopping=self.args.early_stopping,
repetition_penalty=self.args.repetition_penalty,
do_sample=self.args.do_sample,
top_k=self.args.top_k,
top_p=self.args.top_p,
num_return_sequences=self.args.num_return_sequences,
)
else:
outputs = self.model.generate(
input_ids=input_ids,
decoder_start_token_id=self.model.config.decoder.pad_token_id,
num_beams=self.args.num_beams,
max_length=self.args.max_length,
length_penalty=self.args.length_penalty,
early_stopping=self.args.early_stopping,
repetition_penalty=self.args.repetition_penalty,
do_sample=self.args.do_sample,
top_k=self.args.top_k,
top_p=self.args.top_p,
num_return_sequences=self.args.num_return_sequences,
)
all_outputs.extend(outputs.cpu().numpy())
if self.args.use_multiprocessed_decoding:
self.model.to("cpu")
with Pool(self.args.process_count) as p:
outputs = list(
tqdm(
p.imap(self._decode, all_outputs, chunksize=self.args.multiprocessing_chunksize),
total=len(all_outputs),
desc="Decoding outputs",
disable=self.args.silent,
)
)
self._move_model_to_device()
else:
outputs = [
self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for output_id in all_outputs
]
if self.args.num_return_sequences > 1:
return [
outputs[i : i + self.args.num_return_sequences]
for i in range(0, len(outputs), self.args.num_return_sequences)
]
else:
return outputs
def _decode(self, output_id):
return self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
def compute_metrics(self, labels, preds, **kwargs):
results = {}
for metric, func in kwargs.items():
results[metric] = func(labels, preds)
return results
def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):
encoder_tokenizer = self.encoder_tokenizer
decoder_tokenizer = self.decoder_tokenizer
args = self.args
if not no_cache:
no_cache = args.no_cache
if not no_cache:
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = "dev" if evaluate else "train"
if args.dataset_class:
CustomDataset = args.dataset_class
return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)
else:
if args.model_type in ["bart", "marian"]:
return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)
else:
return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode,)
def _create_training_progress_scores(self, **kwargs):
extra_metrics = {key: [] for key in kwargs}
training_progress_scores = {
"global_step": [],
"eval_loss": [],
"train_loss": [],
**extra_metrics,
}
return training_progress_scores
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model into {output_dir}")
if model and not self.args.no_save:
model_to_save = model.module if hasattr(model, "module") else model
self._save_model_args(output_dir)
if self.args.model_type in ["bart", "marian"]:
os.makedirs(os.path.join(output_dir), exist_ok=True)
model_to_save.save_pretrained(output_dir)
self.config.save_pretrained(output_dir)
if self.args.model_type == "bart":
self.encoder_tokenizer.save_pretrained(output_dir)
else:
os.makedirs(os.path.join(output_dir, "encoder"), exist_ok=True)
os.makedirs(os.path.join(output_dir, "decoder"), exist_ok=True)
self.encoder_config.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_config.save_pretrained(os.path.join(output_dir, "decoder"))
model_to_save = (
self.model.encoder.module if hasattr(self.model.encoder, "module") else self.model.encoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "encoder"))
model_to_save = (
self.model.decoder.module if hasattr(self.model.decoder, "module") else self.model.decoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "decoder"))
self.encoder_tokenizer.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_tokenizer.save_pretrained(os.path.join(output_dir, "decoder"))
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args.save_optimizer_and_scheduler:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
device = self.device
if self.args.model_type in ["bart", "marian"]:
pad_token_id = self.encoder_tokenizer.pad_token_id
source_ids, source_mask, y = batch["source_ids"], batch["source_mask"], batch["target_ids"]
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone()
lm_labels[y[:, 1:] == pad_token_id] = -100
inputs = {
"input_ids": source_ids.to(device),
"attention_mask": source_mask.to(device),
"decoder_input_ids": y_ids.to(device),
"lm_labels": lm_labels.to(device),
}
else:
lm_labels = batch[1]
lm_labels_masked = lm_labels.clone()
lm_labels_masked[lm_labels_masked == self.decoder_tokenizer.pad_token_id] = -100
inputs = {
"input_ids": batch[0].to(device),
"decoder_input_ids": lm_labels.to(device),
"labels": lm_labels_masked.to(device),
}
return inputs
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
self.args.save(output_dir)
def _load_model_args(self, input_dir):
args = Seq2SeqArgs()
args.load(input_dir)
return args
def get_named_parameters(self):
return [n for n, p in self.model.named_parameters()]
| true
| true
|
790860829b3afe564d22a3be18d4eabe97541fa8
| 3,423
|
py
|
Python
|
dataloaders/__init__.py
|
dzwallkilled/pytorch-deeplab-xception
|
d8c04a24641c8c31a6800a37de6a7bfe607e5495
|
[
"MIT"
] | null | null | null |
dataloaders/__init__.py
|
dzwallkilled/pytorch-deeplab-xception
|
d8c04a24641c8c31a6800a37de6a7bfe607e5495
|
[
"MIT"
] | null | null | null |
dataloaders/__init__.py
|
dzwallkilled/pytorch-deeplab-xception
|
d8c04a24641c8c31a6800a37de6a7bfe607e5495
|
[
"MIT"
] | null | null | null |
from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd, rip
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'cityscapes':
train_set = cityscapes.CityscapesSegmentation(args, split='train')
val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'rip':
classes = {'full': 7, 'level1': 2, 'level2': 3, 'level3': 5}
import os
from mypath import Path
data_root = Path.db_root_dir(args.dataset)
root = os.path.join(data_root, 'RipTrainingAllData')
patches, level = args.rip_mode.split('-')
if patches == 'patches':
patches = 'COCOJSONPatches'
elif patches == 'patches_v1':
patches = 'COCOJSONPatches_v1'
else:
patches = 'COCOJSONs'
# patches = 'COCOJSONPatches' if patches == 'patches' else 'COCOJSONs'
train_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'train_1.json')
val_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'val_1.json')
train_set = rip.RIPSegmentation(args, split='train', root=root, ann_file=train_ann_file)
val_set = rip.RIPSegmentation(args, split='val', root=root, ann_file=val_ann_file)
num_classes = classes[level]
# NOTE: drop_last=True here to avoid situation when batch_size=1 which causes BatchNorm2d errors
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_classes
else:
raise NotImplementedError
| 48.211268
| 112
| 0.684487
|
from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd, rip
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'cityscapes':
train_set = cityscapes.CityscapesSegmentation(args, split='train')
val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'rip':
classes = {'full': 7, 'level1': 2, 'level2': 3, 'level3': 5}
import os
from mypath import Path
data_root = Path.db_root_dir(args.dataset)
root = os.path.join(data_root, 'RipTrainingAllData')
patches, level = args.rip_mode.split('-')
if patches == 'patches':
patches = 'COCOJSONPatches'
elif patches == 'patches_v1':
patches = 'COCOJSONPatches_v1'
else:
patches = 'COCOJSONs'
train_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'train_1.json')
val_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'val_1.json')
train_set = rip.RIPSegmentation(args, split='train', root=root, ann_file=train_ann_file)
val_set = rip.RIPSegmentation(args, split='val', root=root, ann_file=val_ann_file)
num_classes = classes[level]
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_classes
else:
raise NotImplementedError
| true
| true
|
79086156a6561eb37e79d532af049091f1d13751
| 3,025
|
py
|
Python
|
mizarlabs/tests/transformers/test_rsi.py
|
MizarAI/mizar-labs
|
c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37
|
[
"MIT"
] | 18
|
2021-03-19T15:41:43.000Z
|
2022-03-20T14:23:07.000Z
|
mizarlabs/tests/transformers/test_rsi.py
|
MizarAI/mizar-labs
|
c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37
|
[
"MIT"
] | 14
|
2021-03-17T14:16:02.000Z
|
2021-05-31T16:51:12.000Z
|
mizarlabs/tests/transformers/test_rsi.py
|
MizarAI/mizar-labs
|
c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37
|
[
"MIT"
] | 3
|
2021-07-02T21:38:06.000Z
|
2022-01-10T09:56:18.000Z
|
from mizarlabs.transformers.technical.rsi import BarArrivalRSIStrategy
from mizarlabs.transformers.technical.rsi import RSIConfirmation
def test_bar_arrival_rsi_strategy_predict(dollar_bar_dataframe):
rsi_upper_threshold = 55
rsi_lower_threshold = 45
bar_arrival_upper_threshold = 0
bar_arrival_lower_threshold = -0.2
rsi_timeperiod = 25
bar_arrival_fast_period = 500
bar_arrival_slow_period = 200
max_bar_arrival_mean_diff = 10000000000000
bar_arrival_rsi_strategy = BarArrivalRSIStrategy(
rsi_upper_threshold,
rsi_lower_threshold,
bar_arrival_upper_threshold,
bar_arrival_lower_threshold,
rsi_timeperiod,
bar_arrival_fast_period,
bar_arrival_slow_period,
max_bar_arrival_mean_diff,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
assert set(preds).issubset(bar_arrival_rsi_strategy.classes_)
def test_bar_arrival_rsi_strategy_predict_proba(dollar_bar_dataframe):
rsi_upper_threshold = 55
rsi_lower_threshold = 45
bar_arrival_upper_threshold = 0
bar_arrival_lower_threshold = -0.2
rsi_timeperiod = 25
bar_arrival_fast_period = 500
bar_arrival_slow_period = 200
max_bar_arrival_mean_diff = 10000000000000
bar_arrival_rsi_strategy = BarArrivalRSIStrategy(
rsi_upper_threshold,
rsi_lower_threshold,
bar_arrival_upper_threshold,
bar_arrival_lower_threshold,
rsi_timeperiod,
bar_arrival_fast_period,
bar_arrival_slow_period,
max_bar_arrival_mean_diff,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
pred_proba = bar_arrival_rsi_strategy.predict_proba(dollar_bar_dataframe)
pred_to_idx_map = {0: 0, 1: 1}
assert all(pred_proba.sum(axis=1) == 1)
assert all(pred_proba[i, pred_to_idx_map[p]] == 1 for i, p in enumerate(preds))
def test_rsi_confirmation_predict(dollar_bar_dataframe):
rsi_threshold = 45
rsi_timeperiod = 25
rsi_confirmation_period = 2
rsi_moving_average_window = 2
bar_arrival_rsi_strategy = RSIConfirmation(
rsi_timeperiod,
rsi_threshold,
rsi_confirmation_period,
rsi_moving_average_window,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
assert set(preds).issubset(bar_arrival_rsi_strategy.classes_)
def test_rsi_confirmation_predict_proba(dollar_bar_dataframe):
rsi_threshold = 45
rsi_timeperiod = 25
rsi_confirmation_period = 2
rsi_moving_average_window = 2
bar_arrival_rsi_strategy = RSIConfirmation(
rsi_timeperiod,
rsi_threshold,
rsi_confirmation_period,
rsi_moving_average_window,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
pred_proba = bar_arrival_rsi_strategy.predict_proba(dollar_bar_dataframe)
pred_to_idx_map = {0: 0, 1: 1}
assert all(pred_proba.sum(axis=1) == 1)
assert all(pred_proba[i, pred_to_idx_map[p]] == 1 for i, p in enumerate(preds))
| 35.174419
| 83
| 0.753719
|
from mizarlabs.transformers.technical.rsi import BarArrivalRSIStrategy
from mizarlabs.transformers.technical.rsi import RSIConfirmation
def test_bar_arrival_rsi_strategy_predict(dollar_bar_dataframe):
rsi_upper_threshold = 55
rsi_lower_threshold = 45
bar_arrival_upper_threshold = 0
bar_arrival_lower_threshold = -0.2
rsi_timeperiod = 25
bar_arrival_fast_period = 500
bar_arrival_slow_period = 200
max_bar_arrival_mean_diff = 10000000000000
bar_arrival_rsi_strategy = BarArrivalRSIStrategy(
rsi_upper_threshold,
rsi_lower_threshold,
bar_arrival_upper_threshold,
bar_arrival_lower_threshold,
rsi_timeperiod,
bar_arrival_fast_period,
bar_arrival_slow_period,
max_bar_arrival_mean_diff,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
assert set(preds).issubset(bar_arrival_rsi_strategy.classes_)
def test_bar_arrival_rsi_strategy_predict_proba(dollar_bar_dataframe):
rsi_upper_threshold = 55
rsi_lower_threshold = 45
bar_arrival_upper_threshold = 0
bar_arrival_lower_threshold = -0.2
rsi_timeperiod = 25
bar_arrival_fast_period = 500
bar_arrival_slow_period = 200
max_bar_arrival_mean_diff = 10000000000000
bar_arrival_rsi_strategy = BarArrivalRSIStrategy(
rsi_upper_threshold,
rsi_lower_threshold,
bar_arrival_upper_threshold,
bar_arrival_lower_threshold,
rsi_timeperiod,
bar_arrival_fast_period,
bar_arrival_slow_period,
max_bar_arrival_mean_diff,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
pred_proba = bar_arrival_rsi_strategy.predict_proba(dollar_bar_dataframe)
pred_to_idx_map = {0: 0, 1: 1}
assert all(pred_proba.sum(axis=1) == 1)
assert all(pred_proba[i, pred_to_idx_map[p]] == 1 for i, p in enumerate(preds))
def test_rsi_confirmation_predict(dollar_bar_dataframe):
rsi_threshold = 45
rsi_timeperiod = 25
rsi_confirmation_period = 2
rsi_moving_average_window = 2
bar_arrival_rsi_strategy = RSIConfirmation(
rsi_timeperiod,
rsi_threshold,
rsi_confirmation_period,
rsi_moving_average_window,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
assert set(preds).issubset(bar_arrival_rsi_strategy.classes_)
def test_rsi_confirmation_predict_proba(dollar_bar_dataframe):
rsi_threshold = 45
rsi_timeperiod = 25
rsi_confirmation_period = 2
rsi_moving_average_window = 2
bar_arrival_rsi_strategy = RSIConfirmation(
rsi_timeperiod,
rsi_threshold,
rsi_confirmation_period,
rsi_moving_average_window,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
pred_proba = bar_arrival_rsi_strategy.predict_proba(dollar_bar_dataframe)
pred_to_idx_map = {0: 0, 1: 1}
assert all(pred_proba.sum(axis=1) == 1)
assert all(pred_proba[i, pred_to_idx_map[p]] == 1 for i, p in enumerate(preds))
| true
| true
|
790861f2a12db1fc516a552714bea108abca7751
| 3,681
|
py
|
Python
|
HackPSUconfig.py
|
hackpsu-tech/hackPSUS2018-rfid
|
0f33b588ddbf4a82ced9a25ba8a8384649b16017
|
[
"MIT"
] | null | null | null |
HackPSUconfig.py
|
hackpsu-tech/hackPSUS2018-rfid
|
0f33b588ddbf4a82ced9a25ba8a8384649b16017
|
[
"MIT"
] | 2
|
2018-04-02T14:32:13.000Z
|
2018-08-20T22:54:15.000Z
|
HackPSUconfig.py
|
hackpsu-tech/hackPSUS2018-rfid
|
0f33b588ddbf4a82ced9a25ba8a8384649b16017
|
[
"MIT"
] | null | null | null |
"""
This module provides an interface for reading and writing to a HackPSU RaspberryPi Scanner config file
Methods:
getProperty(configFile, prop)
Get a property from a config file by reading the config file until the desired property is found
setProperty(configFile, prop, value)
Set a property by updating the config file (requries a total rewrite of the config file)
getProperties(configFile)
Read all properties into a dictionary, which is returned to the user
setProperties(configFile, dict)
Overwrite the configFile with a new configFile generated from the dictionary provided
"""
def getProperties(configFile):
"""
dictionary getProperties(str)
This funciton reads the entire config file and builds a dictionary from the config file
Args:
configFile: The configuration file to read from
Returns:
dictionary: A list of key value pairs from the config file
"""
dict = {}
#For each line in the file
with open(configFile) as file:
for line in file:
#Remove leading and trailing whitespace
line = line.strip()
#If the line is a comment, skip
if line.startswith('#'):
continue
#Find the equals sign, if not present, skip the line
loc = line.find('=')
if loc == -1:
continue
#parse out the key and value
key = line[:loc]
value = line[loc+1:]
dict[key] = value
return dict
def setProperties(configFile, dict):
"""
void setProperties (str, dictionary)
This function iterates over the entire dictionary and saves each dictionary entry to the specified config file
Args:
configFile: The file to overwrite with the new configuration
dict: The dictionary to write
"""
#Overwrite the file
#Foreach key in dictionary write a new line
with open(configFile, 'w') as file:
for key in dict:
file.write(key + '=' + dict[key] + '\n')
def getProperty(configFile, prop):
"""
str getProperty(str, str)
This function searches a configFile for a specific property and returns its value
Args:
configFile: The configuration file to open
prop: The property to search for
Returns:
string: The property value if found or None for no value found
"""
retVal = None
#Foreach line in the file
with open(configFile) as file:
for line in file:
#Remove leading and trailing whitespace
line = line.strip()
#Ignore comment lines
if line.startswith('#'):
continue
#If the line is the desired property, parse and return
if line.startswith(prop):
retVal = line.replace(prop, '')
retVal = retVal.strip()
retVal = retVal[1:]
retVal = retVal.lstrip()
break
return retVal
def setProperty(configFile, prop, value):
"""
void setProperty(str, str, str)
This function searches a config file for the specified propery and updates its value if found.
If the specified property is not found, then a new line for the property will be created
Args:
configFile: The configuration file to open and update
prop: The property key to update
value: The new value for the property
"""
written = False
with open(configFile) as inFile:
#Create a temp file to copy into
tmpHandle, outPath = mkstemp()
with fdopen(tmpHandle, 'w') as outFile:
#Foreach line in the original file
for line in inFile:
#If it's the prop line, rewrite the prop line
if line.startswith(prop):
outFile.write(prop + '=' + value + '\n')
written = True
#Otherwise keep the line as is
else:
outFile.write(line)
#If no update was performed, then add a new line for the prop
if not written:
outFile.write(prop + ':' + value + '\n')
#Move from tmp to actual file
remove(configFile)
move(outPath, configFile)
| 27.676692
| 111
| 0.710676
|
def getProperties(configFile):
dict = {}
with open(configFile) as file:
for line in file:
line = line.strip()
if line.startswith('#'):
continue
loc = line.find('=')
if loc == -1:
continue
key = line[:loc]
value = line[loc+1:]
dict[key] = value
return dict
def setProperties(configFile, dict):
with open(configFile, 'w') as file:
for key in dict:
file.write(key + '=' + dict[key] + '\n')
def getProperty(configFile, prop):
retVal = None
with open(configFile) as file:
for line in file:
line = line.strip()
if line.startswith('#'):
continue
if line.startswith(prop):
retVal = line.replace(prop, '')
retVal = retVal.strip()
retVal = retVal[1:]
retVal = retVal.lstrip()
break
return retVal
def setProperty(configFile, prop, value):
written = False
with open(configFile) as inFile:
tmpHandle, outPath = mkstemp()
with fdopen(tmpHandle, 'w') as outFile:
for line in inFile:
if line.startswith(prop):
outFile.write(prop + '=' + value + '\n')
written = True
#Otherwise keep the line as is
else:
outFile.write(line)
#If no update was performed, then add a new line for the prop
if not written:
outFile.write(prop + ':' + value + '\n')
#Move from tmp to actual file
remove(configFile)
move(outPath, configFile)
| true
| true
|
7908629b18e1e6a7ed63ddf360a5dba333406511
| 3,312
|
py
|
Python
|
biostar/server/search.py
|
biostars/support.bioconductor.org
|
2416b6f4ec82fce3e69a218a616761a934f82bb5
|
[
"MIT"
] | 1
|
2021-08-01T23:26:12.000Z
|
2021-08-01T23:26:12.000Z
|
biostar/server/search.py
|
biostars/support.bioconductor.org
|
2416b6f4ec82fce3e69a218a616761a934f82bb5
|
[
"MIT"
] | null | null | null |
biostar/server/search.py
|
biostars/support.bioconductor.org
|
2416b6f4ec82fce3e69a218a616761a934f82bb5
|
[
"MIT"
] | 1
|
2020-03-12T07:24:27.000Z
|
2020-03-12T07:24:27.000Z
|
__author__ = 'ialbert'
from django.views.generic import DetailView, ListView, TemplateView, RedirectView, View
from haystack.views import SearchView
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet, AutoQuery
from haystack.utils import Highlighter
from django.conf import settings
from biostar.server.views import BaseListMixin
from ajax import ajax_error, ajax_success, ajax_error_wrapper, json_response
from django.conf.urls import patterns
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap
from biostar.apps.posts.models import Post, Tag
from biostar.apps.planet.models import BlogPost
import logging
logger = logging.getLogger(__name__)
info_dict = {
'queryset': Post.objects.all(),
}
sitemaps = {
'flatpages': FlatPageSitemap,
'posts': GenericSitemap(info_dict, priority=0.6),
}
class SiteSearch(SearchView):
extra_context = lambda x: dict(topic="search", page_title="Search")
def slow_highlight(query, text):
"Invoked only if the search backend does not support highlighting"
highlight = Highlighter(query)
value = highlight.highlight(text)
return value
def join_highlights(row):
"Joins the highlighted text"
if type(row.highlighted) is dict:
return ''
if not row.highlighted:
return
return '<br>'.join(x for x in row.highlighted)
class Search(BaseListMixin):
template_name = "search/search.html"
paginate_by = settings.PAGINATE_BY
context_object_name = "results"
page_title = "Search"
def get_queryset(self):
self.q = self.request.GET.get('q', '')
if not self.q:
return []
content = AutoQuery(self.q)
query = SearchQuerySet().filter(content=content).highlight()[:50]
for row in query:
if row is None:
continue
context = join_highlights(row)
context = context or slow_highlight(query=self.q, text=row.content)
row.context = context
return query
def get_context_data(self, **kwargs):
context = super(Search, self).get_context_data(**kwargs)
context['q'] = self.q
return context
def suggest_tags(request):
"Returns suggested tags"
tags = Tag.objects.all().order_by('-count')#[:10]
data = settings.POST_TAG_LIST + [t.name for t in tags]
data = filter(None, data)
return json_response(data)
#@ajax_error_wrapper
def search_title(request):
"Handles title searches"
q = request.GET.get('q', '')
content = AutoQuery(q)
results = SearchQuerySet().filter(content=content).highlight()[:50]
items = []
for row in results:
try:
ob = row.object
# Why can this happen?
if not ob:
continue
context = join_highlights(row)
context = context or slow_highlight(query=q, text=row.content)
text = "%s" % row.title
items.append(
dict(id=ob.get_absolute_url(), text=text, context=context, author=row.author,
url=ob.get_absolute_url()),
)
except Exception, exc:
logger.error(content)
logger.error(exc)
pass
payload = dict(items=items)
return json_response(payload)
| 28.067797
| 93
| 0.658514
|
__author__ = 'ialbert'
from django.views.generic import DetailView, ListView, TemplateView, RedirectView, View
from haystack.views import SearchView
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet, AutoQuery
from haystack.utils import Highlighter
from django.conf import settings
from biostar.server.views import BaseListMixin
from ajax import ajax_error, ajax_success, ajax_error_wrapper, json_response
from django.conf.urls import patterns
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap
from biostar.apps.posts.models import Post, Tag
from biostar.apps.planet.models import BlogPost
import logging
logger = logging.getLogger(__name__)
info_dict = {
'queryset': Post.objects.all(),
}
sitemaps = {
'flatpages': FlatPageSitemap,
'posts': GenericSitemap(info_dict, priority=0.6),
}
class SiteSearch(SearchView):
extra_context = lambda x: dict(topic="search", page_title="Search")
def slow_highlight(query, text):
"Invoked only if the search backend does not support highlighting"
highlight = Highlighter(query)
value = highlight.highlight(text)
return value
def join_highlights(row):
"Joins the highlighted text"
if type(row.highlighted) is dict:
return ''
if not row.highlighted:
return
return '<br>'.join(x for x in row.highlighted)
class Search(BaseListMixin):
template_name = "search/search.html"
paginate_by = settings.PAGINATE_BY
context_object_name = "results"
page_title = "Search"
def get_queryset(self):
self.q = self.request.GET.get('q', '')
if not self.q:
return []
content = AutoQuery(self.q)
query = SearchQuerySet().filter(content=content).highlight()[:50]
for row in query:
if row is None:
continue
context = join_highlights(row)
context = context or slow_highlight(query=self.q, text=row.content)
row.context = context
return query
def get_context_data(self, **kwargs):
context = super(Search, self).get_context_data(**kwargs)
context['q'] = self.q
return context
def suggest_tags(request):
"Returns suggested tags"
tags = Tag.objects.all().order_by('-count')
data = settings.POST_TAG_LIST + [t.name for t in tags]
data = filter(None, data)
return json_response(data)
def search_title(request):
"Handles title searches"
q = request.GET.get('q', '')
content = AutoQuery(q)
results = SearchQuerySet().filter(content=content).highlight()[:50]
items = []
for row in results:
try:
ob = row.object
if not ob:
continue
context = join_highlights(row)
context = context or slow_highlight(query=q, text=row.content)
text = "%s" % row.title
items.append(
dict(id=ob.get_absolute_url(), text=text, context=context, author=row.author,
url=ob.get_absolute_url()),
)
except Exception, exc:
logger.error(content)
logger.error(exc)
pass
payload = dict(items=items)
return json_response(payload)
| false
| true
|
790862d99c8d496659a9bab0c632f446cf9f32c5
| 2,671
|
py
|
Python
|
lacaudiofiles/wave/wavefile.py
|
landmarkacoustics/lac-audio-files
|
a6658513dc34ac66d7d3cb1c08ec1649af963a0d
|
[
"MIT"
] | null | null | null |
lacaudiofiles/wave/wavefile.py
|
landmarkacoustics/lac-audio-files
|
a6658513dc34ac66d7d3cb1c08ec1649af963a0d
|
[
"MIT"
] | null | null | null |
lacaudiofiles/wave/wavefile.py
|
landmarkacoustics/lac-audio-files
|
a6658513dc34ac66d7d3cb1c08ec1649af963a0d
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2019 by Landmark Acoustics LLC
r"""A class to write a WAV-formatted file."""
import wave
class WaveFile:
'''A wrapper for `Wave_write` from Python STL's `wave` module.
Parameters
----------
name : str
The name to save the file as. It should include path and extension.
sample_rate : int
The number of samples per second that the file will use.
bit_rate : int
The number of bits the file will use per sample.
channels : int
The number of channels that the file has.
See Also
--------
wave : the Python STL module
'''
def __init__(self,
name: str,
sample_rate: int,
bit_rate: int,
channels: int) -> None:
self._channels = channels
self._sample_rate = sample_rate
self._byte_rate = bit_rate // 8
self._filehandle = wave.open(name, 'wb')
self._filehandle.setnchannels(self.channels)
self._filehandle.setsampwidth(self.byte_rate)
self._filehandle.setframerate(self.sample_rate)
@property
def channels(self) -> int:
'''The number of channels the file has.'''
return self._channels
@property
def sample_rate(self) -> int:
'''The number of samples per second.'''
return self._sample_rate
@property
def byte_rate(self) -> int:
'''The number of bytes per sample.'''
return self._byte_rate
@property
def bit_rate(self) -> int:
'''The number of bits per sample.'''
return self.byte_rate * 8
def write_frames(self, data) -> int:
'''Add some data to the file.
Parameters
----------
data : bytes-like object
The user must ensure that the data's format matches the file's!
Returns
-------
int : the number of frames written
'''
pos = self._filehandle.tell()
self._filehandle.writeframes(data)
return self._filehandle.tell() - pos
@property
def frame_size(self) -> int:
'''The number of bytes per frame.'''
return self.byte_rate * self.channels
def __enter__(self):
self._filehandle.__enter__()
return self
def __exit__(self, *args, **kwargs):
return self._filehandle.__exit__(*args, **kwargs)
if __name__ == '__main__':
import array
import sys
wvf = WaveFile(sys.argv[1], 44100, 28, 3)
a = array.array('b')
a.extend([0 for i in range(12000 * wvf.frame_size)])
N = wvf.write_frames(a)
print(f'Wrote {N} frames in {wvf.channels} {wvf.bit_rate}-bit channels.')
| 25.932039
| 77
| 0.592662
|
import wave
class WaveFile:
def __init__(self,
name: str,
sample_rate: int,
bit_rate: int,
channels: int) -> None:
self._channels = channels
self._sample_rate = sample_rate
self._byte_rate = bit_rate // 8
self._filehandle = wave.open(name, 'wb')
self._filehandle.setnchannels(self.channels)
self._filehandle.setsampwidth(self.byte_rate)
self._filehandle.setframerate(self.sample_rate)
@property
def channels(self) -> int:
return self._channels
@property
def sample_rate(self) -> int:
return self._sample_rate
@property
def byte_rate(self) -> int:
return self._byte_rate
@property
def bit_rate(self) -> int:
return self.byte_rate * 8
def write_frames(self, data) -> int:
pos = self._filehandle.tell()
self._filehandle.writeframes(data)
return self._filehandle.tell() - pos
@property
def frame_size(self) -> int:
return self.byte_rate * self.channels
def __enter__(self):
self._filehandle.__enter__()
return self
def __exit__(self, *args, **kwargs):
return self._filehandle.__exit__(*args, **kwargs)
if __name__ == '__main__':
import array
import sys
wvf = WaveFile(sys.argv[1], 44100, 28, 3)
a = array.array('b')
a.extend([0 for i in range(12000 * wvf.frame_size)])
N = wvf.write_frames(a)
print(f'Wrote {N} frames in {wvf.channels} {wvf.bit_rate}-bit channels.')
| true
| true
|
790863fed04ec75f17df4b4acdbdb2b1ea5aed25
| 882
|
py
|
Python
|
var/spack/repos/builtin/packages/r-rrblup/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/r-rrblup/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/r-rrblup/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRrblup(RPackage):
"""Ridge Regression and Other Kernels for Genomic Selection.
Software for genomic prediction with the RR-BLUP mixed model (Endelman
2011, <doi:10.3835/plantgenome2011.08.0024>). One application is to
estimate marker effects by ridge regression; alternatively, BLUPs can be
calculated based on an additive relationship matrix or a Gaussian
kernel."""
cran = "rrBLUP"
version('4.6.1', sha256='e9230e74cc430a83ac5567071cb1c7f00b35c368f7d79bcc1cfde7225446c4db')
version('4.6', sha256='28b475a1466fcdc1780caace75cf34155338fda496cebd5799315598a4bc84af')
depends_on('r@2.14:', type=('build', 'run'))
| 36.75
| 95
| 0.756236
|
from spack import *
class RRrblup(RPackage):
cran = "rrBLUP"
version('4.6.1', sha256='e9230e74cc430a83ac5567071cb1c7f00b35c368f7d79bcc1cfde7225446c4db')
version('4.6', sha256='28b475a1466fcdc1780caace75cf34155338fda496cebd5799315598a4bc84af')
depends_on('r@2.14:', type=('build', 'run'))
| true
| true
|
790864759a4b957bdc3d35e7e37654553a7be404
| 405
|
py
|
Python
|
backend/withposh_34166/wsgi.py
|
crowdbotics-apps/withposh-34166
|
429203738310d89abbeb5ff49385446ab5a791cf
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/withposh_34166/wsgi.py
|
crowdbotics-apps/withposh-34166
|
429203738310d89abbeb5ff49385446ab5a791cf
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/withposh_34166/wsgi.py
|
crowdbotics-apps/withposh-34166
|
429203738310d89abbeb5ff49385446ab5a791cf
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for withposh_34166 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'withposh_34166.settings')
application = get_wsgi_application()
| 23.823529
| 78
| 0.792593
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'withposh_34166.settings')
application = get_wsgi_application()
| true
| true
|
790864e9db53c51cd3c475e3ea5632f7442923c7
| 302
|
py
|
Python
|
src/stump_data_pipeline/mongo_client.py
|
stump-vote/stump-data-pipeline
|
7a8a27577f20cc4f4bdcbf20fe9664b69efa6953
|
[
"MIT"
] | null | null | null |
src/stump_data_pipeline/mongo_client.py
|
stump-vote/stump-data-pipeline
|
7a8a27577f20cc4f4bdcbf20fe9664b69efa6953
|
[
"MIT"
] | null | null | null |
src/stump_data_pipeline/mongo_client.py
|
stump-vote/stump-data-pipeline
|
7a8a27577f20cc4f4bdcbf20fe9664b69efa6953
|
[
"MIT"
] | 1
|
2020-04-05T02:45:37.000Z
|
2020-04-05T02:45:37.000Z
|
import pymongo
def connect_to_mongo(username="", password="", host="localhost", port=27017):
credentials = ""
if username and password:
credentials = f"{username}:{password}@"
connection_url = f"mongodb://{credentials}{host}:{port}"
return pymongo.MongoClient(connection_url)
| 30.2
| 77
| 0.692053
|
import pymongo
def connect_to_mongo(username="", password="", host="localhost", port=27017):
credentials = ""
if username and password:
credentials = f"{username}:{password}@"
connection_url = f"mongodb://{credentials}{host}:{port}"
return pymongo.MongoClient(connection_url)
| true
| true
|
790865bb249f2d32e173e8e186585acbc7d6f77e
| 3,179
|
py
|
Python
|
examples/python/image_transfer.py
|
seatable/seatable-scripts-cn
|
2d1637207e1a7339406eeee33be62801f97e2566
|
[
"Apache-2.0"
] | 12
|
2020-09-24T09:52:18.000Z
|
2022-02-16T00:53:23.000Z
|
examples/python/image_transfer.py
|
seatable/seatable-scripts-cn
|
2d1637207e1a7339406eeee33be62801f97e2566
|
[
"Apache-2.0"
] | 1
|
2021-04-06T18:32:11.000Z
|
2021-11-18T06:39:45.000Z
|
examples/python/image_transfer.py
|
seatable/seatable-scripts-cn
|
2d1637207e1a7339406eeee33be62801f97e2566
|
[
"Apache-2.0"
] | 6
|
2020-08-27T03:39:49.000Z
|
2021-12-16T06:03:41.000Z
|
from seatable_api import Base, context
import requests
import time
import os
"""
该脚本用于从图片链接下载图片到图片列。你可以在一个文本列中记录图片的地址,然后用这个
脚本自动下载图片并上传到图片列中。
"""
###################---基本信息配置---###################
SERVER_URL = context.server_url or 'https://cloud.seatable.cn/'
API_TOKEN = context.api_token or 'cacc42497886e4d0aa8ac0531bdcccb1c93bd0f5'
TABLE_NAME = 'Table1'
IMAGE_FILE_TYPE = ['jpg', 'png', 'jpeg', 'bmp', 'gif'] # 图片的格式
IMG_URL_COL = '图片链接' # 包含图片链接的列名,需要是 URL 或者文本类型
IMG_COL = 'img' # 用于存储图片的列名,需要是图片类型
IMG_NAME_PRE = 'image' # 图片上传后使用的文件名称前缀
###################---基本信息配置---###################
def get_time_stamp():
return str(int(time.time()*100000))
def img_transfer():
# 1. 创建 base 对象并且认证
base = Base(API_TOKEN, SERVER_URL)
base.auth()
# 2. 获取行信息, 数据结构--列表嵌套字典
"""
数据结构例子:其中'img', '图片链接是用户自定义的列名'
[{
'_id': 'RNn2isDfRnSPWq5HIwRT0w',
'_mtime': '2020-11-10T03:02:55.549+00:00',
'Name': '冉继伟0',
'img': [{
'name': 'cut.png',
'size': 2778797,
'type': 'file',
'url': 'https://dev.seafile.com/dtable-web/workspace/104/asset/1d50c674-ca45-4acf-85b8-19d6e10ca5f0/files/2020-11/cut.png'
}],
'图片链接': 'https://timgsa.baidu.com/timg?image&quality=80xxx.jpg'
}, {
'_id': 'b2lrBxnDSGm1LsZDQTVGhw',
'_mtime': '2020-11-04T08:47:51.562+00:00',
'Name': '冉继伟1'
}, {
'_id': 'RBUZ_g6qS_KER0EjaSclFA',
'_mtime': '2020-11-04T09:26:45.961+00:00',
'Name': '冉继伟2',
'img': None
}, ......]
"""
rows = base.list_rows(TABLE_NAME)
count = 0
#3. 遍历每一行,获取‘图片链接‘列的信息
for row in rows:
time_stamp = get_time_stamp()
img_url = row.get(IMG_URL_COL, None)
img = row.get(IMG_COL, None)
try:
#若无图片链接或者img列有数据的话跳过,防止重复添加
if (not img_url) or img:
continue
#通过url链接获取文件扩展名
img_name_extend = img_url.strip().split('.')[-1]
img_name_extend = img_name_extend in IMAGE_FILE_TYPE and img_name_extend or 'jpg'
#通过uuid对下载的文件进行重命名 IMG_NAME_PRE + 时间戳 + 扩展名
img_name = "/tmp/image-%s.%s"%(time_stamp, img_name_extend)
#下载文件
response = requests.get(img_url)
if response.status_code != 200:
raise Exception('download file error')
with open(img_name, 'wb') as f:
f.write(response.content)
#文件上传
info_dict = base.upload_local_file(img_name, name=None, relative_path=None, file_type='image', replace=True)
row[IMG_COL] = [info_dict.get('url')]
base.update_row('Table1', row['_id'], row)
#上传完成之后删除
os.remove(img_name)
except Exception as err_msg:
print('count%s-%s-%s-message: %s' % (count, row['_id'], img_url, err_msg)) #发现异常打印行数等信息方便回查
continue
count += 1
if __name__ == "__main__":
img_transfer()
| 34.182796
| 134
| 0.538534
|
from seatable_api import Base, context
import requests
import time
import os
if __name__ == "__main__":
img_transfer()
| true
| true
|
7908669448f59ba23183ec8280e62b4adee09b21
| 494
|
py
|
Python
|
telegramsLibs/api_telega_intro.py
|
keys4words/bots
|
4f5c4342ad24d1fc9d39947c9940346d2ac2ec8b
|
[
"Apache-2.0"
] | null | null | null |
telegramsLibs/api_telega_intro.py
|
keys4words/bots
|
4f5c4342ad24d1fc9d39947c9940346d2ac2ec8b
|
[
"Apache-2.0"
] | 7
|
2020-11-09T13:35:15.000Z
|
2021-10-22T04:58:07.000Z
|
telegramsLibs/api_telega_intro.py
|
keys4words/bots
|
4f5c4342ad24d1fc9d39947c9940346d2ac2ec8b
|
[
"Apache-2.0"
] | null | null | null |
import requests
import pprint
from config import API_KEY
base_url = f'https://api.telegram.org/bot{API_KEY}/'
api_response = requests.get(base_url + 'getUpdates').json()
for update in api_response['result']:
message = update['message']
chat_id = message['chat']['id']
text = message['text']
reply_message = {
'chat_id': chat_id,
'text': text
}
requests.post(base_url + 'sendMessage', json=reply_message)
# pprint.pprint(api_response['result'][0])
| 24.7
| 63
| 0.668016
|
import requests
import pprint
from config import API_KEY
base_url = f'https://api.telegram.org/bot{API_KEY}/'
api_response = requests.get(base_url + 'getUpdates').json()
for update in api_response['result']:
message = update['message']
chat_id = message['chat']['id']
text = message['text']
reply_message = {
'chat_id': chat_id,
'text': text
}
requests.post(base_url + 'sendMessage', json=reply_message)
| true
| true
|
7908669c40a9de8ad57713317cc128f2f11fa349
| 37,793
|
py
|
Python
|
qlib/data/data.py
|
Tirbo06/qlib
|
ad0afc111cf27777bc05d712006ee5b14cc77840
|
[
"MIT"
] | 1
|
2020-10-15T20:18:09.000Z
|
2020-10-15T20:18:09.000Z
|
qlib/data/data.py
|
Tirbo06/qlib
|
ad0afc111cf27777bc05d712006ee5b14cc77840
|
[
"MIT"
] | null | null | null |
qlib/data/data.py
|
Tirbo06/qlib
|
ad0afc111cf27777bc05d712006ee5b14cc77840
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from .cache import H
from ..config import C
from .ops import *
from ..log import get_module_logger
from ..utils import parse_field, read_bin, hash_args, normalize_cache_fields
from .base import Feature
from .cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
"""Calendar provider base class
Provide calendar data.
"""
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
"""
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
def locate_index(self, start_time, end_time, freq, future):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
"""
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
"""Get the uri of calendar generation task."""
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
"""Instrument provider base class
Provide instrument data.
"""
@staticmethod
def instruments(market="all", filter_pipe=None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
"""
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
"""List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
"""
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
# instruments type
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
"""Feature provider class
Provide feature data.
"""
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
"""Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
"""
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
"""Expression provider class
Provide Expression data.
"""
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
"""Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
"""
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
"""Dataset provider class
Provide Dataset data.
"""
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
"""Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
"""
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
"""
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
# parse and check the input fields
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
# One process for one task, so that the memory will be freed quicker.
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"""
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# NOTE: This place is compatible with windows, windows multi-process is spawn
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
"""Local calendar data provider class
Provide calendar data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
"""Calendar file uri."""
if self.remote:
return os.path.join(C.mount_path, "calendars", "{}.txt")
else:
return os.path.join(C.provider_uri, "calendars", "{}.txt")
def _load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
"""
if future:
fname = self._uri_cal.format(freq + "_future")
# if future calendar not exists, return current calendar
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
class LocalInstrumentProvider(InstrumentProvider):
"""Local instrument data provider class
Provide instrument data from local data source.
"""
def __init__(self):
pass
@property
def _uri_inst(self):
"""Instrument file uri."""
return os.path.join(C.provider_uri, "instruments", "{}.txt")
def _load_instruments(self, market):
fname = self._uri_inst.format(market)
if not os.path.exists(fname):
raise ValueError("instruments not exists for market " + market)
_instruments = dict()
with open(fname) as f:
for line in f:
inst_time = line.strip().split()
inst = inst_time[0]
if len(inst_time) == 3:
# `day`
begin = inst_time[1]
end = inst_time[2]
elif len(inst_time) == 5:
# `1min`
begin = inst_time[1] + " " + inst_time[2]
end = inst_time[3] + " " + inst_time[4]
_instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))
return _instruments
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
market = instruments["market"]
if market in H["i"]:
_instruments = H["i"][market]
else:
_instruments = self._load_instruments(market)
H["i"][market] = _instruments
# strip
# use calendar boundary
cal = Cal.calendar(freq=freq)
start_time = pd.Timestamp(start_time or cal[0])
end_time = pd.Timestamp(end_time or cal[-1])
_instruments_filtered = {
inst: list(
filter(
lambda x: x[0] <= x[1],
[(max(start_time, x[0]), min(end_time, x[1])) for x in spans],
)
)
for inst, spans in _instruments.items()
}
_instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}
# filter
filter_pipe = instruments["filter_pipe"]
for filter_config in filter_pipe:
from . import filter as F
filter_t = getattr(F, filter_config["filter_type"]).from_config(filter_config)
_instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)
# as list
if as_list:
return list(_instruments_filtered)
return _instruments_filtered
class LocalFeatureProvider(FeatureProvider):
"""Local feature data provider class
Provide feature data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_data(self):
"""Static feature file uri."""
if self.remote:
return os.path.join(C.mount_path, "features", "{}", "{}.{}.bin")
else:
return os.path.join(C.provider_uri, "features", "{}", "{}.{}.bin")
def feature(self, instrument, field, start_index, end_index, freq):
# validate
field = str(field).lower()[1:]
uri_data = self._uri_data.format(instrument.lower(), field, freq)
if not os.path.exists(uri_data):
get_module_logger("data").warning("WARN: data not found for %s.%s" % (instrument, field))
return pd.Series()
# raise ValueError('uri_data not found: ' + uri_data)
# load
series = read_bin(uri_data, start_index, end_index)
return series
class LocalExpressionProvider(ExpressionProvider):
"""Local expression data provider class
Provide expression data from local data source.
"""
def __init__(self):
super().__init__()
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
expression = self.get_expression_instance(field)
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
lft_etd, rght_etd = expression.get_extended_window_size()
series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)
# Ensure that each column type is consistent
# FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented.
try:
series = series.astype(float)
except ValueError:
pass
if not series.empty:
series = series.loc[start_index:end_index]
return series
class LocalDatasetProvider(DatasetProvider):
"""Local dataset data provider class
Provide dataset data from local data source.
"""
def __init__(self):
pass
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
return data
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
"""
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
"""
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(
LocalDatasetProvider.cache_walker,
args=(
inst,
start_time,
end_time,
freq,
column_names,
),
)
p.close()
p.join()
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"""
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
"""
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
class ClientCalendarProvider(CalendarProvider):
"""Client calendar data provider class
Provide calendar data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
self.conn.send_request(
request_type="calendar",
request_content={
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"future": future,
},
msg_queue=self.queue,
msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],
)
result = self.queue.get(timeout=C["timeout"])
return result
class ClientInstrumentProvider(InstrumentProvider):
"""Client instrument data provider class
Provide instrument data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
def inst_msg_proc_func(response_content):
if isinstance(response_content, dict):
instrument = {
i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()
}
else:
instrument = response_content
return instrument
self.conn.send_request(
request_type="instrument",
request_content={
"instruments": instruments,
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"as_list": as_list,
},
msg_queue=self.queue,
msg_proc_func=inst_msg_proc_func,
)
result = self.queue.get(timeout=C["timeout"])
if isinstance(result, Exception):
raise result
get_module_logger("data").debug("get result")
return result
class ClientDatasetProvider(DatasetProvider):
"""Client dataset data provider class
Provide dataset data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
def set_conn(self, conn):
self.conn = conn
self.queue = queue.Queue()
def dataset(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=0,
return_uri=False,
):
if Inst.get_inst_type(instruments) == Inst.DICT:
get_module_logger("data").warning(
"Getting features from a dict of instruments is not recommended because the features will not be "
"cached! "
"The dict of instruments will be cleaned every day."
)
if disk_cache == 0:
"""
Call the server to generate the expression cache.
Then load the data from the expression cache directly.
- default using multi-kernel method.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 0,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
else:
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
if return_uri:
return data, feature_uri
else:
return data
else:
"""
Call the server to generate the data-set cache, get the uri of the cache file.
Then load the data from the file on NFS directly.
- using single-process implementation.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 1,
},
msg_queue=self.queue,
)
# - Done in callback
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
get_module_logger("data").debug("get result")
try:
# pre-mound nfs, used for demo
mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)
df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)
get_module_logger("data").debug("finish slicing data")
if return_uri:
return df, feature_uri
return df
except AttributeError:
raise IOError("Unable to fetch instruments from remote server!")
class BaseProvider:
"""Local provider class
To keep compatible with old qlib provider.
"""
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
return Cal.calendar(start_time, end_time, freq, future=future)
def instruments(self, market="all", filter_pipe=None, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
get_module_logger("Provider").warning(
"The instruments corresponds to a stock pool. "
"Parameters `start_time` and `end_time` does not take effect now."
)
return InstrumentProvider.instruments(market, filter_pipe)
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
):
"""
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
"""
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq)
class LocalProvider(BaseProvider):
def _uri(self, type, **kwargs):
"""_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
"""
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
"""features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq:
"""
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)
class ClientProvider(BaseProvider):
"""Client Provider
Requesting data from server as a client. Can propose requests:
- Calendar : Directly respond a list of calendars
- Instruments (without filter): Directly respond a list/dict of instruments
- Instruments (with filters): Respond a list/dict of instruments
- Features : Respond a cache uri
The general workflow is described as follows:
When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.
`BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.
"""
def __init__(self):
from .client import Client
self.client = Client(C.flask_server, C.flask_port)
self.logger = get_module_logger(self.__class__.__name__)
if isinstance(Cal, ClientCalendarProvider):
Cal.set_conn(self.client)
Inst.set_conn(self.client)
if hasattr(DatasetD, "provider"):
DatasetD.provider.set_conn(self.client)
else:
DatasetD.set_conn(self.client)
class Wrapper(object):
"""Data Provider Wrapper"""
def __init__(self):
self._provider = None
def register(self, provider):
self._provider = provider
def __getattr__(self, key):
if self._provider is None:
raise AttributeError("Please run qlib.init() first using qlib")
return getattr(self._provider, key)
def get_cls_from_name(cls_name):
return getattr(importlib.import_module(".data", package="qlib"), cls_name)
def get_provider_obj(config, **params):
if isinstance(config, dict):
params.update(config["kwargs"])
config = config["class"]
return get_cls_from_name(config)(**params)
def register_wrapper(wrapper, cls_or_obj):
"""register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py
"""
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
Cal = Wrapper()
Inst = Wrapper()
FeatureD = Wrapper()
ExpressionD = Wrapper()
DatasetD = Wrapper()
D = Wrapper()
def register_all_wrappers():
"""register_all_wrappers"""
logger = get_module_logger("data")
_calendar_provider = get_provider_obj(C.calendar_provider)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f"registering Cal {C.calendar_provider}-{C.calenar_cache}")
register_wrapper(Inst, C.instrument_provider)
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "expression_provider", None) is not None:
# This provider is unnecessary in client provider
_eprovider = get_provider_obj(C.expression_provider)
if getattr(C, "expression_cache", None) is not None:
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f"registering ExpressioneD {C.expression_provider}-{C.expression_cache}")
_dprovider = get_provider_obj(C.dataset_provider)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f"registering DataseteD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider)
logger.debug(f"registering D {C.provider}")
| 34.017102
| 362
| 0.594872
|
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from .cache import H
from ..config import C
from .ops import *
from ..log import get_module_logger
from ..utils import parse_field, read_bin, hash_args, normalize_cache_fields
from .base import Feature
from .cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
def locate_index(self, start_time, end_time, freq, future):
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
def _get_calendar(self, freq, future):
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)}
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
@staticmethod
def instruments(market="all", filter_pipe=None):
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
if isinstance(instruments, dict):
if "market" in instruments:
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
if self.remote:
return os.path.join(C.mount_path, "calendars", "{}.txt")
else:
return os.path.join(C.provider_uri, "calendars", "{}.txt")
def _load_calendar(self, freq, future):
if future:
fname = self._uri_cal.format(freq + "_future")
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
class LocalInstrumentProvider(InstrumentProvider):
def __init__(self):
pass
@property
def _uri_inst(self):
return os.path.join(C.provider_uri, "instruments", "{}.txt")
def _load_instruments(self, market):
fname = self._uri_inst.format(market)
if not os.path.exists(fname):
raise ValueError("instruments not exists for market " + market)
_instruments = dict()
with open(fname) as f:
for line in f:
inst_time = line.strip().split()
inst = inst_time[0]
if len(inst_time) == 3:
begin = inst_time[1]
end = inst_time[2]
elif len(inst_time) == 5:
begin = inst_time[1] + " " + inst_time[2]
end = inst_time[3] + " " + inst_time[4]
_instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))
return _instruments
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
market = instruments["market"]
if market in H["i"]:
_instruments = H["i"][market]
else:
_instruments = self._load_instruments(market)
H["i"][market] = _instruments
cal = Cal.calendar(freq=freq)
start_time = pd.Timestamp(start_time or cal[0])
end_time = pd.Timestamp(end_time or cal[-1])
_instruments_filtered = {
inst: list(
filter(
lambda x: x[0] <= x[1],
[(max(start_time, x[0]), min(end_time, x[1])) for x in spans],
)
)
for inst, spans in _instruments.items()
}
_instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}
filter_pipe = instruments["filter_pipe"]
for filter_config in filter_pipe:
from . import filter as F
filter_t = getattr(F, filter_config["filter_type"]).from_config(filter_config)
_instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)
if as_list:
return list(_instruments_filtered)
return _instruments_filtered
class LocalFeatureProvider(FeatureProvider):
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_data(self):
if self.remote:
return os.path.join(C.mount_path, "features", "{}", "{}.{}.bin")
else:
return os.path.join(C.provider_uri, "features", "{}", "{}.{}.bin")
def feature(self, instrument, field, start_index, end_index, freq):
field = str(field).lower()[1:]
uri_data = self._uri_data.format(instrument.lower(), field, freq)
if not os.path.exists(uri_data):
get_module_logger("data").warning("WARN: data not found for %s.%s" % (instrument, field))
return pd.Series()
series = read_bin(uri_data, start_index, end_index)
return series
class LocalExpressionProvider(ExpressionProvider):
def __init__(self):
super().__init__()
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
expression = self.get_expression_instance(field)
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
lft_etd, rght_etd = expression.get_extended_window_size()
series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)
try:
series = series.astype(float)
except ValueError:
pass
if not series.empty:
series = series.loc[start_index:end_index]
return series
class LocalDatasetProvider(DatasetProvider):
def __init__(self):
pass
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
return data
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(
LocalDatasetProvider.cache_walker,
args=(
inst,
start_time,
end_time,
freq,
column_names,
),
)
p.close()
p.join()
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
class ClientCalendarProvider(CalendarProvider):
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
self.conn.send_request(
request_type="calendar",
request_content={
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"future": future,
},
msg_queue=self.queue,
msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],
)
result = self.queue.get(timeout=C["timeout"])
return result
class ClientInstrumentProvider(InstrumentProvider):
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
def inst_msg_proc_func(response_content):
if isinstance(response_content, dict):
instrument = {
i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()
}
else:
instrument = response_content
return instrument
self.conn.send_request(
request_type="instrument",
request_content={
"instruments": instruments,
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"as_list": as_list,
},
msg_queue=self.queue,
msg_proc_func=inst_msg_proc_func,
)
result = self.queue.get(timeout=C["timeout"])
if isinstance(result, Exception):
raise result
get_module_logger("data").debug("get result")
return result
class ClientDatasetProvider(DatasetProvider):
def __init__(self):
self.conn = None
def set_conn(self, conn):
self.conn = conn
self.queue = queue.Queue()
def dataset(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=0,
return_uri=False,
):
if Inst.get_inst_type(instruments) == Inst.DICT:
get_module_logger("data").warning(
"Getting features from a dict of instruments is not recommended because the features will not be "
"cached! "
"The dict of instruments will be cleaned every day."
)
if disk_cache == 0:
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 0,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
else:
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
if return_uri:
return data, feature_uri
else:
return data
else:
"""
Call the server to generate the data-set cache, get the uri of the cache file.
Then load the data from the file on NFS directly.
- using single-process implementation.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 1,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
get_module_logger("data").debug("get result")
try:
mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)
df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)
get_module_logger("data").debug("finish slicing data")
if return_uri:
return df, feature_uri
return df
except AttributeError:
raise IOError("Unable to fetch instruments from remote server!")
class BaseProvider:
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
return Cal.calendar(start_time, end_time, freq, future=future)
def instruments(self, market="all", filter_pipe=None, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
get_module_logger("Provider").warning(
"The instruments corresponds to a stock pool. "
"Parameters `start_time` and `end_time` does not take effect now."
)
return InstrumentProvider.instruments(market, filter_pipe)
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
):
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq)
class LocalProvider(BaseProvider):
def _uri(self, type, **kwargs):
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)
class ClientProvider(BaseProvider):
def __init__(self):
from .client import Client
self.client = Client(C.flask_server, C.flask_port)
self.logger = get_module_logger(self.__class__.__name__)
if isinstance(Cal, ClientCalendarProvider):
Cal.set_conn(self.client)
Inst.set_conn(self.client)
if hasattr(DatasetD, "provider"):
DatasetD.provider.set_conn(self.client)
else:
DatasetD.set_conn(self.client)
class Wrapper(object):
def __init__(self):
self._provider = None
def register(self, provider):
self._provider = provider
def __getattr__(self, key):
if self._provider is None:
raise AttributeError("Please run qlib.init() first using qlib")
return getattr(self._provider, key)
def get_cls_from_name(cls_name):
return getattr(importlib.import_module(".data", package="qlib"), cls_name)
def get_provider_obj(config, **params):
if isinstance(config, dict):
params.update(config["kwargs"])
config = config["class"]
return get_cls_from_name(config)(**params)
def register_wrapper(wrapper, cls_or_obj):
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
Cal = Wrapper()
Inst = Wrapper()
FeatureD = Wrapper()
ExpressionD = Wrapper()
DatasetD = Wrapper()
D = Wrapper()
def register_all_wrappers():
logger = get_module_logger("data")
_calendar_provider = get_provider_obj(C.calendar_provider)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f"registering Cal {C.calendar_provider}-{C.calenar_cache}")
register_wrapper(Inst, C.instrument_provider)
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "expression_provider", None) is not None:
_eprovider = get_provider_obj(C.expression_provider)
if getattr(C, "expression_cache", None) is not None:
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f"registering ExpressioneD {C.expression_provider}-{C.expression_cache}")
_dprovider = get_provider_obj(C.dataset_provider)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f"registering DataseteD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider)
logger.debug(f"registering D {C.provider}")
| true
| true
|
790866a68399b005c227ee8b532afb93d8a5e92f
| 324
|
py
|
Python
|
workshops/migrations/0063_merge.py
|
tracykteal/amy
|
cb19e318d36b880b1c3be2104efff42ef776118a
|
[
"MIT"
] | 1
|
2015-04-03T20:26:56.000Z
|
2015-04-03T20:26:56.000Z
|
workshops/migrations/0063_merge.py
|
tracykteal/amy
|
cb19e318d36b880b1c3be2104efff42ef776118a
|
[
"MIT"
] | 1
|
2019-12-13T11:22:47.000Z
|
2019-12-13T11:22:47.000Z
|
workshops/migrations/0063_merge.py
|
tracykteal/amy
|
cb19e318d36b880b1c3be2104efff42ef776118a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0062_no_invoice_for_historic_events'),
('workshops', '0062_add_stalled_unresponsive_tags'),
]
operations = [
]
| 20.25
| 61
| 0.685185
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0062_no_invoice_for_historic_events'),
('workshops', '0062_add_stalled_unresponsive_tags'),
]
operations = [
]
| true
| true
|
790867292a1e779c8cdc2a8421fe4de1306821b9
| 5,993
|
py
|
Python
|
magenta/common/sequence_example_lib.py
|
sandutsar/magenta
|
77ed668af96edea7c993d38973b9da342bd31e82
|
[
"Apache-2.0"
] | 16,143
|
2016-05-14T04:44:54.000Z
|
2020-06-04T06:48:38.000Z
|
magenta/common/sequence_example_lib.py
|
sandutsar/magenta
|
77ed668af96edea7c993d38973b9da342bd31e82
|
[
"Apache-2.0"
] | 1,076
|
2016-05-19T14:13:43.000Z
|
2020-06-04T16:36:51.000Z
|
magenta/common/sequence_example_lib.py
|
sandutsar/magenta
|
77ed668af96edea7c993d38973b9da342bd31e82
|
[
"Apache-2.0"
] | 3,584
|
2016-05-14T05:55:19.000Z
|
2020-06-04T17:53:50.000Z
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with tf.train.SequenceExamples."""
import math
import tensorflow.compat.v1 as tf
QUEUE_CAPACITY = 500
SHUFFLE_MIN_AFTER_DEQUEUE = QUEUE_CAPACITY // 5
def _shuffle_inputs(input_tensors, capacity, min_after_dequeue, num_threads):
"""Shuffles tensors in `input_tensors`, maintaining grouping."""
shuffle_queue = tf.RandomShuffleQueue(
capacity, min_after_dequeue, dtypes=[t.dtype for t in input_tensors])
enqueue_op = shuffle_queue.enqueue(input_tensors)
runner = tf.train.QueueRunner(shuffle_queue, [enqueue_op] * num_threads)
tf.train.add_queue_runner(runner)
output_tensors = shuffle_queue.dequeue()
for i in range(len(input_tensors)):
output_tensors[i].set_shape(input_tensors[i].shape)
return output_tensors
def get_padded_batch(file_list, batch_size, input_size, label_shape=None,
num_enqueuing_threads=4, shuffle=False):
"""Reads batches of SequenceExamples from TFRecords and pads them.
Can deal with variable length SequenceExamples by padding each batch to the
length of the longest sequence with zeros.
Args:
file_list: A list of paths to TFRecord files containing SequenceExamples.
batch_size: The number of SequenceExamples to include in each batch.
input_size: The size of each input vector. The returned batch of inputs
will have a shape [batch_size, num_steps, input_size].
label_shape: Shape for labels. If not specified, will use [].
num_enqueuing_threads: The number of threads to use for enqueuing
SequenceExamples.
shuffle: Whether to shuffle the batches.
Returns:
inputs: A tensor of shape [batch_size, num_steps, input_size] of floats32s.
labels: A tensor of shape [batch_size, num_steps] of int64s.
lengths: A tensor of shape [batch_size] of int32s. The lengths of each
SequenceExample before padding.
Raises:
ValueError: If `shuffle` is True and `num_enqueuing_threads` is less than 2.
"""
file_queue = tf.train.string_input_producer(file_list)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(file_queue)
sequence_features = {
'inputs': tf.FixedLenSequenceFeature(shape=[input_size],
dtype=tf.float32),
'labels': tf.FixedLenSequenceFeature(shape=label_shape or [],
dtype=tf.int64)}
_, sequence = tf.parse_single_sequence_example(
serialized_example, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
input_tensors = [sequence['inputs'], sequence['labels'], length]
if shuffle:
if num_enqueuing_threads < 2:
raise ValueError(
'`num_enqueuing_threads` must be at least 2 when shuffling.')
shuffle_threads = int(math.ceil(num_enqueuing_threads) / 2.)
# Since there may be fewer records than SHUFFLE_MIN_AFTER_DEQUEUE, take the
# minimum of that number and the number of records.
min_after_dequeue = count_records(
file_list, stop_at=SHUFFLE_MIN_AFTER_DEQUEUE)
input_tensors = _shuffle_inputs(
input_tensors, capacity=QUEUE_CAPACITY,
min_after_dequeue=min_after_dequeue,
num_threads=shuffle_threads)
num_enqueuing_threads -= shuffle_threads
tf.logging.info(input_tensors)
return tf.train.batch(
input_tensors,
batch_size=batch_size,
capacity=QUEUE_CAPACITY,
num_threads=num_enqueuing_threads,
dynamic_pad=True,
allow_smaller_final_batch=False)
def count_records(file_list, stop_at=None):
"""Counts number of records in files from `file_list` up to `stop_at`.
Args:
file_list: List of TFRecord files to count records in.
stop_at: Optional number of records to stop counting at.
Returns:
Integer number of records in files from `file_list` up to `stop_at`.
"""
num_records = 0
for tfrecord_file in file_list:
tf.logging.info('Counting records in %s.', tfrecord_file)
for _ in tf.python_io.tf_record_iterator(tfrecord_file):
num_records += 1
if stop_at and num_records >= stop_at:
tf.logging.info('Number of records is at least %d.', num_records)
return num_records
tf.logging.info('Total records: %d', num_records)
return num_records
def flatten_maybe_padded_sequences(maybe_padded_sequences, lengths=None):
"""Flattens the batch of sequences, removing padding (if applicable).
Args:
maybe_padded_sequences: A tensor of possibly padded sequences to flatten,
sized `[N, M, ...]` where M = max(lengths).
lengths: Optional length of each sequence, sized `[N]`. If None, assumes no
padding.
Returns:
flatten_maybe_padded_sequences: The flattened sequence tensor, sized
`[sum(lengths), ...]`.
"""
def flatten_unpadded_sequences():
# The sequences are equal length, so we should just flatten over the first
# two dimensions.
return tf.reshape(maybe_padded_sequences,
[-1] + maybe_padded_sequences.shape.as_list()[2:])
if lengths is None:
return flatten_unpadded_sequences()
def flatten_padded_sequences():
indices = tf.where(tf.sequence_mask(lengths))
return tf.gather_nd(maybe_padded_sequences, indices)
return tf.cond(
tf.equal(tf.reduce_min(lengths), tf.shape(maybe_padded_sequences)[1]),
flatten_unpadded_sequences,
flatten_padded_sequences)
| 37.223602
| 80
| 0.724345
|
import math
import tensorflow.compat.v1 as tf
QUEUE_CAPACITY = 500
SHUFFLE_MIN_AFTER_DEQUEUE = QUEUE_CAPACITY // 5
def _shuffle_inputs(input_tensors, capacity, min_after_dequeue, num_threads):
shuffle_queue = tf.RandomShuffleQueue(
capacity, min_after_dequeue, dtypes=[t.dtype for t in input_tensors])
enqueue_op = shuffle_queue.enqueue(input_tensors)
runner = tf.train.QueueRunner(shuffle_queue, [enqueue_op] * num_threads)
tf.train.add_queue_runner(runner)
output_tensors = shuffle_queue.dequeue()
for i in range(len(input_tensors)):
output_tensors[i].set_shape(input_tensors[i].shape)
return output_tensors
def get_padded_batch(file_list, batch_size, input_size, label_shape=None,
num_enqueuing_threads=4, shuffle=False):
file_queue = tf.train.string_input_producer(file_list)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(file_queue)
sequence_features = {
'inputs': tf.FixedLenSequenceFeature(shape=[input_size],
dtype=tf.float32),
'labels': tf.FixedLenSequenceFeature(shape=label_shape or [],
dtype=tf.int64)}
_, sequence = tf.parse_single_sequence_example(
serialized_example, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
input_tensors = [sequence['inputs'], sequence['labels'], length]
if shuffle:
if num_enqueuing_threads < 2:
raise ValueError(
'`num_enqueuing_threads` must be at least 2 when shuffling.')
shuffle_threads = int(math.ceil(num_enqueuing_threads) / 2.)
min_after_dequeue = count_records(
file_list, stop_at=SHUFFLE_MIN_AFTER_DEQUEUE)
input_tensors = _shuffle_inputs(
input_tensors, capacity=QUEUE_CAPACITY,
min_after_dequeue=min_after_dequeue,
num_threads=shuffle_threads)
num_enqueuing_threads -= shuffle_threads
tf.logging.info(input_tensors)
return tf.train.batch(
input_tensors,
batch_size=batch_size,
capacity=QUEUE_CAPACITY,
num_threads=num_enqueuing_threads,
dynamic_pad=True,
allow_smaller_final_batch=False)
def count_records(file_list, stop_at=None):
num_records = 0
for tfrecord_file in file_list:
tf.logging.info('Counting records in %s.', tfrecord_file)
for _ in tf.python_io.tf_record_iterator(tfrecord_file):
num_records += 1
if stop_at and num_records >= stop_at:
tf.logging.info('Number of records is at least %d.', num_records)
return num_records
tf.logging.info('Total records: %d', num_records)
return num_records
def flatten_maybe_padded_sequences(maybe_padded_sequences, lengths=None):
def flatten_unpadded_sequences():
return tf.reshape(maybe_padded_sequences,
[-1] + maybe_padded_sequences.shape.as_list()[2:])
if lengths is None:
return flatten_unpadded_sequences()
def flatten_padded_sequences():
indices = tf.where(tf.sequence_mask(lengths))
return tf.gather_nd(maybe_padded_sequences, indices)
return tf.cond(
tf.equal(tf.reduce_min(lengths), tf.shape(maybe_padded_sequences)[1]),
flatten_unpadded_sequences,
flatten_padded_sequences)
| true
| true
|
790867d546611cbc16c04826a8f2387d1d94abf9
| 9,998
|
py
|
Python
|
statsmodels/sandbox/tests/test_formula.py
|
jarvmiller/statsmodels
|
15e90a99c81dd0b61c1aa76ebda2df008e88870d
|
[
"BSD-3-Clause"
] | 6
|
2017-08-23T12:43:44.000Z
|
2021-08-18T08:20:15.000Z
|
statsmodels/sandbox/tests/test_formula.py
|
jarvmiller/statsmodels
|
15e90a99c81dd0b61c1aa76ebda2df008e88870d
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/sandbox/tests/test_formula.py
|
jarvmiller/statsmodels
|
15e90a99c81dd0b61c1aa76ebda2df008e88870d
|
[
"BSD-3-Clause"
] | 3
|
2017-08-23T12:43:49.000Z
|
2018-04-24T02:27:33.000Z
|
"""
Test functions for models.formula
"""
import string
import numpy as np
import numpy.random as R
import numpy.linalg as L
from numpy.testing import assert_almost_equal, assert_equal, assert_, \
assert_raises
from statsmodels.sandbox import formula #, contrast #, utils
from statsmodels.sandbox import contrast_old as contrast
class TestTerm(object):
def test_init(self):
t1 = formula.Term("trivial")
sqr = lambda x: x*x
t2 = formula.Term("not_so_trivial", sqr, "sqr")
assert_raises(ValueError, formula.Term, "name", termname=0)
def test_str(self):
t = formula.Term("name")
s = str(t)
def test_add(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 + t2
assert_(isinstance(f, formula.Formula))
assert_(f.hasterm(t1))
assert_(f.hasterm(t2))
def test_mul(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 * t2
assert_(isinstance(f, formula.Formula))
intercept = formula.Term("intercept")
f = t1 * intercept
assert_equal(str(f), str(formula.Formula(t1)))
f = intercept * t1
assert_equal(str(f), str(formula.Formula(t1)))
class TestFormula(object):
def setup(self):
self.X = R.standard_normal((40,10))
self.namespace = {}
self.terms = []
for i in range(10):
name = '%s' % string.ascii_uppercase[i]
self.namespace[name] = self.X[:,i]
self.terms.append(formula.Term(name))
self.formula = self.terms[0]
for i in range(1, 10):
self.formula += self.terms[i]
self.formula.namespace = self.namespace
def test_namespace(self):
space1 = {'X':np.arange(50), 'Y':np.arange(50)*2}
space2 = {'X':np.arange(20), 'Y':np.arange(20)*2}
space3 = {'X':np.arange(30), 'Y':np.arange(30)*2}
X = formula.Term('X')
Y = formula.Term('Y')
X.namespace = space1
assert_almost_equal(X(), np.arange(50))
Y.namespace = space2
assert_almost_equal(Y(), np.arange(20)*2)
f = X + Y
f.namespace = space1
assert_equal(f().shape, (2,50))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space2
assert_equal(f().shape, (2,20))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space3
assert_equal(f().shape, (2,30))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
xx = X**2
assert_equal(xx().shape, (50,))
xx.namespace = space3
assert_equal(xx().shape, (30,))
xx = X * formula.I
assert_equal(xx().shape, (50,))
xx.namespace = space3
assert_equal(xx().shape, (30,))
xx = X * X
assert_equal(xx.namespace, X.namespace)
xx = X + Y
assert_equal(xx.namespace, {})
Y.namespace = {'X':np.arange(50), 'Y':np.arange(50)*2}
xx = X + Y
assert_equal(xx.namespace, {})
Y.namespace = X.namespace
xx = X+Y
assert_equal(xx.namespace, Y.namespace)
def test_termcolumns(self):
t1 = formula.Term("A")
t2 = formula.Term("B")
f = t1 + t2 + t1 * t2
def other(val):
return np.array([3.2*val,4.342*val**2, 5.234*val**3])
q = formula.Quantitative(['other%d' % i for i in range(1,4)], termname='other', func=t1, transform=other)
f += q
q.namespace = f.namespace = self.formula.namespace
a = q()
b = f()
c = f.termcolumns(q)
b = b[c]
assert_almost_equal(a,b)
def test_str(self):
s = str(self.formula)
def test_call(self):
x = self.formula()
assert_equal(np.array(x).shape, (10, 40))
def test_design(self):
x = self.formula.design()
assert_equal(x.shape, (40, 10))
def test_product(self):
prod = self.formula['A'] * self.formula['C']
f = self.formula + prod
f.namespace = self.namespace
x = f.design()
p = f['A*C']
p.namespace = self.namespace
col = f.termcolumns(prod, dict=False)
assert_almost_equal(np.squeeze(x[:,col]), self.X[:,0] * self.X[:,2])
assert_almost_equal(np.squeeze(p()), self.X[:,0] * self.X[:,2])
def test_intercept1(self):
prod = self.terms[0] * self.terms[2]
f = self.formula + formula.I
icol = f.names().index('intercept')
f.namespace = self.namespace
assert_almost_equal(f()[icol], np.ones((40,)))
def test_intercept3(self):
t = self.formula['A']
t.namespace = self.namespace
prod = t * formula.I
prod.namespace = self.formula.namespace
assert_almost_equal(np.squeeze(prod()), t())
def test_contrast1(self):
term = self.terms[0] + self.terms[2]
c = contrast.Contrast(term, self.formula)
col1 = self.formula.termcolumns(self.terms[0], dict=False)
col2 = self.formula.termcolumns(self.terms[1], dict=False)
test = [[1] + [0]*9, [0]*2 + [1] + [0]*7]
assert_almost_equal(c.matrix, test)
def test_contrast2(self):
dummy = formula.Term('zero')
self.namespace['zero'] = np.zeros((40,), np.float64)
term = dummy + self.terms[2]
c = contrast.Contrast(term, self.formula)
test = [0]*2 + [1] + [0]*7
assert_almost_equal(c.matrix, test)
def test_contrast3(self):
X = self.formula.design()
P = np.dot(X, L.pinv(X))
dummy = formula.Term('noise')
resid = np.identity(40) - P
self.namespace['noise'] = np.transpose(np.dot(resid, R.standard_normal((40,5))))
terms = dummy + self.terms[2]
terms.namespace = self.formula.namespace
c = contrast.Contrast(terms, self.formula)
assert_equal(c.matrix.shape, (10,))
def test_power(self):
t = self.terms[2]
t2 = t**2
t.namespace = t2.namespace = self.formula.namespace
assert_almost_equal(t()**2, t2())
def test_quantitative(self):
t = self.terms[2]
sint = formula.Quantitative('t', func=t, transform=np.sin)
t.namespace = sint.namespace = self.formula.namespace
assert_almost_equal(np.sin(t()), sint())
def test_factor1(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(list(fac.values()), f)
def test_factor2(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac().shape, (3,30))
def test_factor3(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=1)
m.namespace = fac.namespace
assert_equal(m().shape, (2,30))
def test_factor4(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=2)
m.namespace = fac.namespace
r = np.array([np.identity(3)]*10)
r.shape = (30,3)
r = r.T
_m = np.array([r[0]-r[2],r[1]-r[2]])
assert_almost_equal(_m, m())
def test_factor5(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac(), [[1,0,0]*3,
[0,1,0]*3,
[0,0,1]*3])
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [0,1,2]*3)
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor2(self):
f = ['b','c', 'a']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [1,2,0]*3)
assert_equal(fac['a'], [0,0,1]*3)
assert_equal(fac['b'], [1,0,0]*3)
assert_equal(fac['c'], [0,1,0]*3)
def test_contrast4(self):
f = self.formula + self.terms[5] + self.terms[5]
f.namespace = self.namespace
estimable = False
c = contrast.Contrast(self.terms[5], f)
assert_equal(estimable, False)
def test_interactions(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'a*b', 'a*c', 'b*c']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=3)
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[1,2,3])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[3])
assert_equal(set(f.termnames()), set(['a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
def test_subtract(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
ff = f - f['a*b']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'a*c', 'b*c']))
ff = f - f['a*b'] - f['a*c']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
ff = f - (f['a*b'] + f['a*c'])
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
| 31.639241
| 145
| 0.534407
|
import string
import numpy as np
import numpy.random as R
import numpy.linalg as L
from numpy.testing import assert_almost_equal, assert_equal, assert_, \
assert_raises
from statsmodels.sandbox import formula atsmodels.sandbox import contrast_old as contrast
class TestTerm(object):
def test_init(self):
t1 = formula.Term("trivial")
sqr = lambda x: x*x
t2 = formula.Term("not_so_trivial", sqr, "sqr")
assert_raises(ValueError, formula.Term, "name", termname=0)
def test_str(self):
t = formula.Term("name")
s = str(t)
def test_add(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 + t2
assert_(isinstance(f, formula.Formula))
assert_(f.hasterm(t1))
assert_(f.hasterm(t2))
def test_mul(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 * t2
assert_(isinstance(f, formula.Formula))
intercept = formula.Term("intercept")
f = t1 * intercept
assert_equal(str(f), str(formula.Formula(t1)))
f = intercept * t1
assert_equal(str(f), str(formula.Formula(t1)))
class TestFormula(object):
def setup(self):
self.X = R.standard_normal((40,10))
self.namespace = {}
self.terms = []
for i in range(10):
name = '%s' % string.ascii_uppercase[i]
self.namespace[name] = self.X[:,i]
self.terms.append(formula.Term(name))
self.formula = self.terms[0]
for i in range(1, 10):
self.formula += self.terms[i]
self.formula.namespace = self.namespace
def test_namespace(self):
space1 = {'X':np.arange(50), 'Y':np.arange(50)*2}
space2 = {'X':np.arange(20), 'Y':np.arange(20)*2}
space3 = {'X':np.arange(30), 'Y':np.arange(30)*2}
X = formula.Term('X')
Y = formula.Term('Y')
X.namespace = space1
assert_almost_equal(X(), np.arange(50))
Y.namespace = space2
assert_almost_equal(Y(), np.arange(20)*2)
f = X + Y
f.namespace = space1
assert_equal(f().shape, (2,50))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space2
assert_equal(f().shape, (2,20))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space3
assert_equal(f().shape, (2,30))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
xx = X**2
assert_equal(xx().shape, (50,))
xx.namespace = space3
assert_equal(xx().shape, (30,))
xx = X * formula.I
assert_equal(xx().shape, (50,))
xx.namespace = space3
assert_equal(xx().shape, (30,))
xx = X * X
assert_equal(xx.namespace, X.namespace)
xx = X + Y
assert_equal(xx.namespace, {})
Y.namespace = {'X':np.arange(50), 'Y':np.arange(50)*2}
xx = X + Y
assert_equal(xx.namespace, {})
Y.namespace = X.namespace
xx = X+Y
assert_equal(xx.namespace, Y.namespace)
def test_termcolumns(self):
t1 = formula.Term("A")
t2 = formula.Term("B")
f = t1 + t2 + t1 * t2
def other(val):
return np.array([3.2*val,4.342*val**2, 5.234*val**3])
q = formula.Quantitative(['other%d' % i for i in range(1,4)], termname='other', func=t1, transform=other)
f += q
q.namespace = f.namespace = self.formula.namespace
a = q()
b = f()
c = f.termcolumns(q)
b = b[c]
assert_almost_equal(a,b)
def test_str(self):
s = str(self.formula)
def test_call(self):
x = self.formula()
assert_equal(np.array(x).shape, (10, 40))
def test_design(self):
x = self.formula.design()
assert_equal(x.shape, (40, 10))
def test_product(self):
prod = self.formula['A'] * self.formula['C']
f = self.formula + prod
f.namespace = self.namespace
x = f.design()
p = f['A*C']
p.namespace = self.namespace
col = f.termcolumns(prod, dict=False)
assert_almost_equal(np.squeeze(x[:,col]), self.X[:,0] * self.X[:,2])
assert_almost_equal(np.squeeze(p()), self.X[:,0] * self.X[:,2])
def test_intercept1(self):
prod = self.terms[0] * self.terms[2]
f = self.formula + formula.I
icol = f.names().index('intercept')
f.namespace = self.namespace
assert_almost_equal(f()[icol], np.ones((40,)))
def test_intercept3(self):
t = self.formula['A']
t.namespace = self.namespace
prod = t * formula.I
prod.namespace = self.formula.namespace
assert_almost_equal(np.squeeze(prod()), t())
def test_contrast1(self):
term = self.terms[0] + self.terms[2]
c = contrast.Contrast(term, self.formula)
col1 = self.formula.termcolumns(self.terms[0], dict=False)
col2 = self.formula.termcolumns(self.terms[1], dict=False)
test = [[1] + [0]*9, [0]*2 + [1] + [0]*7]
assert_almost_equal(c.matrix, test)
def test_contrast2(self):
dummy = formula.Term('zero')
self.namespace['zero'] = np.zeros((40,), np.float64)
term = dummy + self.terms[2]
c = contrast.Contrast(term, self.formula)
test = [0]*2 + [1] + [0]*7
assert_almost_equal(c.matrix, test)
def test_contrast3(self):
X = self.formula.design()
P = np.dot(X, L.pinv(X))
dummy = formula.Term('noise')
resid = np.identity(40) - P
self.namespace['noise'] = np.transpose(np.dot(resid, R.standard_normal((40,5))))
terms = dummy + self.terms[2]
terms.namespace = self.formula.namespace
c = contrast.Contrast(terms, self.formula)
assert_equal(c.matrix.shape, (10,))
def test_power(self):
t = self.terms[2]
t2 = t**2
t.namespace = t2.namespace = self.formula.namespace
assert_almost_equal(t()**2, t2())
def test_quantitative(self):
t = self.terms[2]
sint = formula.Quantitative('t', func=t, transform=np.sin)
t.namespace = sint.namespace = self.formula.namespace
assert_almost_equal(np.sin(t()), sint())
def test_factor1(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(list(fac.values()), f)
def test_factor2(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac().shape, (3,30))
def test_factor3(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=1)
m.namespace = fac.namespace
assert_equal(m().shape, (2,30))
def test_factor4(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=2)
m.namespace = fac.namespace
r = np.array([np.identity(3)]*10)
r.shape = (30,3)
r = r.T
_m = np.array([r[0]-r[2],r[1]-r[2]])
assert_almost_equal(_m, m())
def test_factor5(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac(), [[1,0,0]*3,
[0,1,0]*3,
[0,0,1]*3])
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [0,1,2]*3)
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor2(self):
f = ['b','c', 'a']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [1,2,0]*3)
assert_equal(fac['a'], [0,0,1]*3)
assert_equal(fac['b'], [1,0,0]*3)
assert_equal(fac['c'], [0,1,0]*3)
def test_contrast4(self):
f = self.formula + self.terms[5] + self.terms[5]
f.namespace = self.namespace
estimable = False
c = contrast.Contrast(self.terms[5], f)
assert_equal(estimable, False)
def test_interactions(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'a*b', 'a*c', 'b*c']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=3)
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[1,2,3])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[3])
assert_equal(set(f.termnames()), set(['a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
def test_subtract(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
ff = f - f['a*b']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'a*c', 'b*c']))
ff = f - f['a*b'] - f['a*c']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
ff = f - (f['a*b'] + f['a*c'])
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
| true
| true
|
7908689ce4a719ab15bd49a368a87f9cad7c6d61
| 11,740
|
py
|
Python
|
tensorflow/contrib/lite/python/op_hint.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 71
|
2017-05-25T16:02:15.000Z
|
2021-06-09T16:08:08.000Z
|
tensorflow/contrib/lite/python/op_hint.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/contrib/lite/python/op_hint.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 31
|
2018-09-11T02:17:17.000Z
|
2021-12-15T10:33:35.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.contrib.lite.OpHint("cool_activation")
input = custom.add_inputs(input)
output = tf.sigmoid(input) * input
custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,
[image], [output])
[image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
TODO(aselle): When TensorFlow functions functionality works for arbitrary
constructs, this mechanism can be retired and changed to use python defun's.
"""
# Attr constants that are used for representation in the GraphDef
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_inputs(self, *args):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
# pylint: enable=protected-access
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
# pylint: enable=protected-access
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Properties:
self.inputs: inputs to the op (hash from index # to argument)
self.outputs: outputs to the op (hash from index # to argument)
self.function_name: the tflite custom op name to use
self.uuid: a unique call id for this particular call (i.e.
multiple function calls would have the same function_name but different
uuids.
self.params: A param name to key value for op constant data. I.e. for
axis on a reduction, strides on a convolution, etc.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
"""Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
# TODO(aselle): Remember the attribute tensors so we can put them
# in collapse.
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
"""
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
# TODO(aselle): Support quantized flag properly
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
| 37.993528
| 80
| 0.725639
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import itertools as _itertools
import uuid as _uuid
from tensorflow.contrib import framework as _framework
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util.all_util import remove_undocumented
class OpHint(object):
FUNCTION_NAME_ATTR = "_tflite_function_name"
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
def __init__(self, function_name, **kwargs):
self._function_name = function_name
self._unique_function_id = _uuid.uuid1().hex
self._curr_input_index = 0
self._curr_output_index = 0
self._attrs_to_store_later = kwargs
self._stored_attrs = False
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
def add_inputs(self, *args):
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_INPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_input_index))
self._curr_input_index += 1
return identity_op
return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args):
def augmented_identity(arg):
identity_op = _array_ops.identity(arg)
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(s=self._function_name))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(s=self._unique_function_id))
identity_op.op._set_attr(
OpHint.FUNCTION_OUTPUT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=self._curr_output_index))
self._curr_output_index += 1
return identity_op
wrapped_outputs = [augmented_identity(arg) for arg in args]
if not self._stored_attrs:
for key, value in self._attrs_to_store_later.iteritems():
self._setattr(wrapped_outputs[0], "_tflite_attr_" + key, value)
self._stored_attrs = True
return wrapped_outputs
class _LiteFuncCall(object):
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
def __str__(self):
return "tflite function %s call %s\n\tinputs: %r\n\toutputs: %r" % (
self.function_name, self.uuid, self.inputs, self.outputs)
def _find_all_hints_in_graph_def(session):
func_calls = _collections.defaultdict(_LiteFuncCall)
seen_ops = set()
for op in session.graph.get_operations():
for operand in _itertools.chain(op.inputs, op.outputs):
if operand in seen_ops:
continue
seen_ops.add(operand)
attr = operand.op.node_def.attr
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
if OpHint.FUNCTION_UUID_ATTR not in attr:
continue
call_def = func_calls[uuid]
call_def.uuid = uuid
if OpHint.FUNCTION_UUID_ATTR in attr:
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand
for a in attr:
if a.startswith("_tflite_attr_"):
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _tensor_name_base(full_tensor_name):
return full_tensor_name.name.split(":")[0]
def convert_op_hints_to_stubs(session):
hints = _find_all_hints_in_graph_def(session)
current_graph_def = session.graph_def
for call in hints.values():
input_names = [None] * len(call.inputs)
output_names = [None] * len(call.outputs)
output_dtypes = [None] * len(call.outputs)
output_quantized = False
for input_index, tensor in call.inputs.items():
input_names[input_index] = _tensor_name_base(tensor)
for output_index, tensor in call.outputs.items():
output_names[output_index] = _tensor_name_base(tensor)
output_dtypes[output_index] = tensor.dtype.as_datatype_enum
current_graph_def = _framework.fuse_op(
current_graph_def, input_names, output_names, output_dtypes,
output_quantized, call.uuid, call.function_name)
for node in current_graph_def.node:
if node.name == call.uuid:
for param, tensor in call.params.items():
node.attr[param].tensor.CopyFrom(tensor)
return current_graph_def
_allowed_symbols = ["OpHint", "convert_op_hints_to_stubs"]
remove_undocumented(__name__, _allowed_symbols)
| true
| true
|
7908697ff9cc4b3365466309dd5c999fcebecc22
| 659
|
py
|
Python
|
utils/locators.py
|
gunesmes/page-object-python-selenium
|
c4b181dc57ece7b920e67811bdd6bbf7bebbd7ee
|
[
"MIT"
] | 176
|
2015-05-15T18:01:57.000Z
|
2022-03-29T05:06:32.000Z
|
utils/locators.py
|
shashwatahalder01/page-object-python-selenium
|
c4b181dc57ece7b920e67811bdd6bbf7bebbd7ee
|
[
"MIT"
] | 1
|
2018-01-12T20:36:29.000Z
|
2018-01-12T20:36:29.000Z
|
utils/locators.py
|
shashwatahalder01/page-object-python-selenium
|
c4b181dc57ece7b920e67811bdd6bbf7bebbd7ee
|
[
"MIT"
] | 103
|
2015-08-17T00:31:14.000Z
|
2022-03-28T08:20:14.000Z
|
from selenium.webdriver.common.by import By
# for maintainability we can seperate web objects by page name
class MainPageLocators(object):
LOGO = (By.ID, 'nav-logo')
ACCOUNT = (By.ID, 'nav-link-accountList')
SIGNUP = (By.CSS_SELECTOR, '#nav-signin-tooltip > div > a')
LOGIN = (By.CSS_SELECTOR, '#nav-signin-tooltip > a')
SEARCH = (By.ID, 'twotabsearchtextbox')
SEARCH_LIST = (By.CSS_SELECTOR, 'div[data-component-type="s-search-result"]')
class LoginPageLocators(object):
EMAIL = (By.ID, 'ap_email')
PASSWORD = (By.ID, 'ap_password')
SUBMIT = (By.ID, 'signInSubmit-input')
ERROR_MESSAGE = (By.ID, 'message_error')
| 32.95
| 81
| 0.6783
|
from selenium.webdriver.common.by import By
class MainPageLocators(object):
LOGO = (By.ID, 'nav-logo')
ACCOUNT = (By.ID, 'nav-link-accountList')
SIGNUP = (By.CSS_SELECTOR, '#nav-signin-tooltip > div > a')
LOGIN = (By.CSS_SELECTOR, '#nav-signin-tooltip > a')
SEARCH = (By.ID, 'twotabsearchtextbox')
SEARCH_LIST = (By.CSS_SELECTOR, 'div[data-component-type="s-search-result"]')
class LoginPageLocators(object):
EMAIL = (By.ID, 'ap_email')
PASSWORD = (By.ID, 'ap_password')
SUBMIT = (By.ID, 'signInSubmit-input')
ERROR_MESSAGE = (By.ID, 'message_error')
| true
| true
|
79086ad19879f9b7884c14974c6adef58fe06ae2
| 1,635
|
py
|
Python
|
CovertMark/constants.py
|
chongyangshi/CovertMark
|
a3156b45acceadf5fc1b9a56fa56550b4893c285
|
[
"MIT"
] | 4
|
2021-01-04T09:00:33.000Z
|
2021-10-02T13:37:03.000Z
|
CovertMark/constants.py
|
chongyangshi/CovertMark
|
a3156b45acceadf5fc1b9a56fa56550b4893c285
|
[
"MIT"
] | null | null | null |
CovertMark/constants.py
|
chongyangshi/CovertMark
|
a3156b45acceadf5fc1b9a56fa56550b4893c285
|
[
"MIT"
] | null | null | null |
"""
This module stores constants used during the operations of the UI.
"""
# Application info.
CM_NAME = "CovertMark"
CM_VER = "0.1"
CM_RELEASE = "alpha"
CM_AUTHOR = "C Shi"
CM_LINK = "https://github.com/chongyangshi"
CM_LICENSE = "Please see LICENSE.md for terms of usage of this program."
CM_TITLE = """\
_____ _ ___ ___ _
/ __ \ | | | \/ | | |
| / \/ _____ _____ _ __| |_| . . | __ _ _ __| | __
| | / _ \ \ / / _ | '__| __| |\/| |/ _` | '__| |/ /
| \__/| (_) \ V | __| | | |_| | | | (_| | | | <
\____/\___/ \_/ \___|_| \__\_| |_/\__,_|_| |_|\_\\
"""
DIVIDER = "-" * 40
PROCEDURE_RUN_FIELDS = ["strategy", "run_order", "user_params", "pt_pcap",
"pt_filters", "pt_collection", "neg_pcap", "neg_filters", "neg_collection",
"user_defined_name"]
# UI colours.
class colours:
GREEN = '\033[92m'
YELLOW = '\033[93m'
PURPLE = '\033[95m'
RED = '\033[91m'
GRAY = '\033[90m'
BGC = "\033[;7m"
BOLD = '\033[1m'
ENDC = '\033[0m'
RATINGS = {
(0, 75.0): (colours.GREEN, "This strategy is not very effective in identifying this obfuscation protocol."),
(75.0, 90.0): (colours.PURPLE, "This strategy is reasonably effective in identifying this obfuscation protocol, and can be deployed by a state censor with some difficulties."),
(90.0, 100.0): (colours.RED, "This strategy is very effective in identifying this obfuscation protocol, and can be easily deployed by a state censor.")
}
RATING_BANDS = {
(0, 75.0): "Good Covertness",
(75.0, 90.0): "Reasonable Covertness",
(90.0, 100.0): "Bad Covertness"
}
| 33.367347
| 180
| 0.582875
|
CM_NAME = "CovertMark"
CM_VER = "0.1"
CM_RELEASE = "alpha"
CM_AUTHOR = "C Shi"
CM_LINK = "https://github.com/chongyangshi"
CM_LICENSE = "Please see LICENSE.md for terms of usage of this program."
CM_TITLE = """\
_____ _ ___ ___ _
/ __ \ | | | \/ | | |
| / \/ _____ _____ _ __| |_| . . | __ _ _ __| | __
| | / _ \ \ / / _ | '__| __| |\/| |/ _` | '__| |/ /
| \__/| (_) \ V | __| | | |_| | | | (_| | | | <
\____/\___/ \_/ \___|_| \__\_| |_/\__,_|_| |_|\_\\
"""
DIVIDER = "-" * 40
PROCEDURE_RUN_FIELDS = ["strategy", "run_order", "user_params", "pt_pcap",
"pt_filters", "pt_collection", "neg_pcap", "neg_filters", "neg_collection",
"user_defined_name"]
class colours:
GREEN = '\033[92m'
YELLOW = '\033[93m'
PURPLE = '\033[95m'
RED = '\033[91m'
GRAY = '\033[90m'
BGC = "\033[;7m"
BOLD = '\033[1m'
ENDC = '\033[0m'
RATINGS = {
(0, 75.0): (colours.GREEN, "This strategy is not very effective in identifying this obfuscation protocol."),
(75.0, 90.0): (colours.PURPLE, "This strategy is reasonably effective in identifying this obfuscation protocol, and can be deployed by a state censor with some difficulties."),
(90.0, 100.0): (colours.RED, "This strategy is very effective in identifying this obfuscation protocol, and can be easily deployed by a state censor.")
}
RATING_BANDS = {
(0, 75.0): "Good Covertness",
(75.0, 90.0): "Reasonable Covertness",
(90.0, 100.0): "Bad Covertness"
}
| true
| true
|
79086b55e56304e633779fa6b9b599a688b49686
| 8,157
|
py
|
Python
|
src/ohsome/tests/test_ohsome_client.py
|
redfrexx/osm_association_rules
|
33975ce25047f9ab3b21e890bc5ed9bab59a0a2f
|
[
"BSD-3-Clause"
] | null | null | null |
src/ohsome/tests/test_ohsome_client.py
|
redfrexx/osm_association_rules
|
33975ce25047f9ab3b21e890bc5ed9bab59a0a2f
|
[
"BSD-3-Clause"
] | null | null | null |
src/ohsome/tests/test_ohsome_client.py
|
redfrexx/osm_association_rules
|
33975ce25047f9ab3b21e890bc5ed9bab59a0a2f
|
[
"BSD-3-Clause"
] | 2
|
2021-05-10T10:19:13.000Z
|
2021-09-15T10:32:10.000Z
|
#!/usr/bin/env python
"""
Tests for ohsome client
"""
import os
import pandas as pd
from nose.tools import raises
import geojson
import geopandas as gpd
import ohsome
@raises(ohsome.OhsomeException)
def test_handle_multiple_responses_throw_timeouterror():
"""
Tests counting elements within a bounding box for two timestamps
:return:
"""
# GIVEN
bboxes = [8.67066,49.41423,8.68177,49.4204]
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
# WHEN
client = ohsome.OhsomeClientParallel()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values, timeout=2)
del client
def test_elements_count():
"""
Tests counting elements within a bounding box for two timestamps
:return:
"""
# GIVEN
bboxes = [8.67066,49.41423,8.68177,49.4204]
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [53.0, 256.0]
expected = pd.DataFrame({"timestamp": timestamps, "value": counts})
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_dataframe()
del client
# THEN
assert expected.equals(result)
def test_elements_count_group_by_key():
"""
Tests counting elements within a bounding box and grouping them by keys
:return:
"""
#GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2011-01-01/P1Y"
groupByKeys = ["building"]
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z", "2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [482.0, 628.0, 53.0, 256.0]
keys = ["remainder", "remainder", "building", "building"]
expected = pd.DataFrame({"key": keys, "timestamp": timestamps, "value": counts})
expected.set_index(["key", "timestamp"], inplace=True)
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.groupBy.key.post(bboxes=bboxes, groupByKeys=groupByKeys, time=time)
results = response.as_dataframe()
# THEN
assert expected.equals(results)
def test_elemets_count_ratio():
"""
Tests count ratio
:return:
"""
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01"
keys = ["building"]
keys2 = ["addr:city"]
values = [""]
values2 = [""]
expected = 365.0
client = ohsome.OhsomeClient()
response = client.elements.count.ratio.post(bboxes=bboxes, time=time, keys=keys, keys2=keys2,
values=values, values2=values2)
#results = response.as_dataframe()
# Cache is disabled
"""
def test_use_cache_dir():
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2018-01-01/P1Y"
keys = ["building"]
values = [""]
cache_dir = "./tmp"
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [53.0, 256.0]
expected = pd.DataFrame({"timestamp": timestamps, "value": counts})
# WHEN
client = ohsome.OhsomeClient(cache_dir=cache_dir)
assert os.path.exists(cache_dir)
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_dataframe()
#del client
"""
@raises(AssertionError)
def test_elements_count_exception():
"""
Tests whether a TypeError is raised if the result cannot be converted to a geodataframe object
:return:
"""
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
response.as_geodataframe()
def test_elements_geometry():
"""
Tests whether the result of an elements/geometry query can be converted to a geodataframe
:return:
"""
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01"
keys = ["landuse"]
values = ["grass"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_geodataframe()
del client
# THEN
assert len(result.geometry) == 9
def test_to_file_assert_filetype():
"""
Asserts whether an error is thrown if the output file is not json or geojson
:return:
"""
output_file = "./out.shp"
def test_format_coordinates():
"""
Asserts that coordinates of a MultiPolygon are concerted correctly
:return:
"""
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"geometry": {"coordinates": [[[[13,51], [13,51.1], [13.1,51.1], [13.1,51], [13,51]],
[[13,51], [14,51.1], [14.1,51.1], [14.1,51], [14,51]]]],
"type": "MultiPolygon"}}])
time = "2018-01-01"
keys = ["landuse"]
values = ["grass"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.geometry.post(bpolys=ohsome.format_coordinates(bpolys), time=time, keys=keys, values=values)
result = response.as_geodataframe()
del client
# THEN
assert len(result.geometry) == 74
def test_format_geodataframe():
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"properties": {"id": 0},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}}])
bpolys_df = gpd.GeoDataFrame().from_features(bpolys)
time = "2018-01-01"
keys = ["amenity"]
values = [""]
format = "geojson"
properties = ["tags", "metadata"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.groupBy.boundary.post(bpolys=bpolys_df, time=time, keys=keys, values=values,
format=format, properties=properties)
result = response.as_geodataframe()
del client
# THEN
assert result["value"][0] == 538
def test_parallel_user():
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"properties": {"id": 0},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}},
{"type": "Feature",
"properties": {"id": 1},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}}
])
bpolys_df = gpd.GeoDataFrame().from_features(bpolys)
timeperiod = "2017-01-01,2018-01-01"
keys = ["amenity"]
values = [""]
format = "json"
properties = ["metadata"]
# WHEN
client = ohsome.OhsomeClientParallel(chunksize=1)
response = client.users.count.groupBy.boundary.post(bpolys=bpolys_df, time=timeperiod, keys=keys, values=values,
format=format, properties=properties)
result = response.as_dataframe()
del client
# THEN
assert result["value"][0] == 33.
| 32.628
| 125
| 0.55388
|
import os
import pandas as pd
from nose.tools import raises
import geojson
import geopandas as gpd
import ohsome
@raises(ohsome.OhsomeException)
def test_handle_multiple_responses_throw_timeouterror():
bboxes = [8.67066,49.41423,8.68177,49.4204]
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
client = ohsome.OhsomeClientParallel()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values, timeout=2)
del client
def test_elements_count():
bboxes = [8.67066,49.41423,8.68177,49.4204]
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [53.0, 256.0]
expected = pd.DataFrame({"timestamp": timestamps, "value": counts})
client = ohsome.OhsomeClient()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_dataframe()
del client
assert expected.equals(result)
def test_elements_count_group_by_key():
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2011-01-01/P1Y"
groupByKeys = ["building"]
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z", "2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [482.0, 628.0, 53.0, 256.0]
keys = ["remainder", "remainder", "building", "building"]
expected = pd.DataFrame({"key": keys, "timestamp": timestamps, "value": counts})
expected.set_index(["key", "timestamp"], inplace=True)
client = ohsome.OhsomeClient()
response = client.elements.count.groupBy.key.post(bboxes=bboxes, groupByKeys=groupByKeys, time=time)
results = response.as_dataframe()
assert expected.equals(results)
def test_elemets_count_ratio():
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01"
keys = ["building"]
keys2 = ["addr:city"]
values = [""]
values2 = [""]
expected = 365.0
client = ohsome.OhsomeClient()
response = client.elements.count.ratio.post(bboxes=bboxes, time=time, keys=keys, keys2=keys2,
values=values, values2=values2)
@raises(AssertionError)
def test_elements_count_exception():
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
client = ohsome.OhsomeClient()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
response.as_geodataframe()
def test_elements_geometry():
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01"
keys = ["landuse"]
values = ["grass"]
client = ohsome.OhsomeClient()
response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_geodataframe()
del client
assert len(result.geometry) == 9
def test_to_file_assert_filetype():
output_file = "./out.shp"
def test_format_coordinates():
bpolys = geojson.FeatureCollection([{"type": "Feature",
"geometry": {"coordinates": [[[[13,51], [13,51.1], [13.1,51.1], [13.1,51], [13,51]],
[[13,51], [14,51.1], [14.1,51.1], [14.1,51], [14,51]]]],
"type": "MultiPolygon"}}])
time = "2018-01-01"
keys = ["landuse"]
values = ["grass"]
client = ohsome.OhsomeClient()
response = client.elements.geometry.post(bpolys=ohsome.format_coordinates(bpolys), time=time, keys=keys, values=values)
result = response.as_geodataframe()
del client
assert len(result.geometry) == 74
def test_format_geodataframe():
bpolys = geojson.FeatureCollection([{"type": "Feature",
"properties": {"id": 0},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}}])
bpolys_df = gpd.GeoDataFrame().from_features(bpolys)
time = "2018-01-01"
keys = ["amenity"]
values = [""]
format = "geojson"
properties = ["tags", "metadata"]
client = ohsome.OhsomeClient()
response = client.elements.count.groupBy.boundary.post(bpolys=bpolys_df, time=time, keys=keys, values=values,
format=format, properties=properties)
result = response.as_geodataframe()
del client
assert result["value"][0] == 538
def test_parallel_user():
bpolys = geojson.FeatureCollection([{"type": "Feature",
"properties": {"id": 0},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}},
{"type": "Feature",
"properties": {"id": 1},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}}
])
bpolys_df = gpd.GeoDataFrame().from_features(bpolys)
timeperiod = "2017-01-01,2018-01-01"
keys = ["amenity"]
values = [""]
format = "json"
properties = ["metadata"]
client = ohsome.OhsomeClientParallel(chunksize=1)
response = client.users.count.groupBy.boundary.post(bpolys=bpolys_df, time=timeperiod, keys=keys, values=values,
format=format, properties=properties)
result = response.as_dataframe()
del client
assert result["value"][0] == 33.
| true
| true
|
79086b9baa6bccfa683766ed58e78a2863a0073a
| 5,013
|
py
|
Python
|
demo/hrnet_w32_coco_256x192.py
|
vineethbabu/mmaction2
|
f2e4289807c95bad7dd83757a49c5d9ebd2f881e
|
[
"Apache-2.0"
] | 1
|
2021-12-06T08:42:23.000Z
|
2021-12-06T08:42:23.000Z
|
demo/hrnet_w32_coco_256x192.py
|
wuyy258/mmaction2
|
3f3ad9cae291c991b822cbc2ecfb88c1188e87c5
|
[
"Apache-2.0"
] | null | null | null |
demo/hrnet_w32_coco_256x192.py
|
wuyy258/mmaction2
|
3f3ad9cae291c991b822cbc2ecfb88c1188e87c5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=32,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 28.810345
| 79
| 0.591063
|
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=32,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| true
| true
|
79086cd5adae52deb112b0cc2e985ad98c92d82d
| 23,578
|
py
|
Python
|
dassl/engine/trainer.py
|
zhaoxin94/Dassl.pytorch
|
c0690f3669c561f2ed7410c22fc65eaef30dfd22
|
[
"MIT"
] | 1
|
2021-07-25T10:19:02.000Z
|
2021-07-25T10:19:02.000Z
|
dassl/engine/trainer.py
|
zhaoxin94/Dassl.pytorch
|
c0690f3669c561f2ed7410c22fc65eaef30dfd22
|
[
"MIT"
] | null | null | null |
dassl/engine/trainer.py
|
zhaoxin94/Dassl.pytorch
|
c0690f3669c561f2ed7410c22fc65eaef30dfd22
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import os.path as osp
import datetime
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import nni
from dassl.data import DataManager
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import (
MetricMeter, AverageMeter, tolist_if_not, count_num_param, load_checkpoint,
save_checkpoint, resume_from_checkpoint, load_pretrained_weights
)
from dassl.modeling import build_head, build_backbone
from dassl.evaluation import build_evaluator
class SimpleNet(nn.Module):
"""A simple neural network composed of a CNN backbone
and optionally a head such as mlp for classification.
"""
def __init__(self, cfg, model_cfg, num_classes, **kwargs):
super().__init__()
self.backbone = build_backbone(
model_cfg.BACKBONE.NAME,
verbose=cfg.VERBOSE,
pretrained=model_cfg.BACKBONE.PRETRAINED,
**kwargs
)
fdim = self.backbone.out_features
print("------------------------fdim:", fdim)
self.head = None
if model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS:
self.head = build_head(
model_cfg.HEAD.NAME,
verbose=cfg.VERBOSE,
in_features=fdim,
hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS,
activation=model_cfg.HEAD.ACTIVATION,
bn=model_cfg.HEAD.BN,
dropout=model_cfg.HEAD.DROPOUT,
**kwargs
)
fdim = self.head.out_features
self.classifier = None
if num_classes > 0:
self.classifier = nn.Linear(fdim, num_classes)
self._fdim = fdim
@property
def fdim(self):
return self._fdim
def forward(self, x, return_feature=False):
f = self.backbone(x)
if self.head is not None:
f = self.head(f)
if self.classifier is None:
return f
y = self.classifier(f)
if return_feature:
return y, f
return y
class TrainerBase:
"""Base class for iterative trainer."""
def __init__(self):
self._models = OrderedDict()
self._optims = OrderedDict()
self._scheds = OrderedDict()
self._writer = None
def register_model(self, name='model', model=None, optim=None, sched=None):
if self.__dict__.get('_models') is None:
raise AttributeError(
'Cannot assign model before super().__init__() call'
)
if self.__dict__.get('_optims') is None:
raise AttributeError(
'Cannot assign optim before super().__init__() call'
)
if self.__dict__.get('_scheds') is None:
raise AttributeError(
'Cannot assign sched before super().__init__() call'
)
assert name not in self._models, 'Found duplicate model names'
self._models[name] = model
self._optims[name] = optim
self._scheds[name] = sched
def get_model_names(self, names=None):
names_real = list(self._models.keys())
if names is not None:
names = tolist_if_not(names)
for name in names:
assert name in names_real
return names
else:
return names_real
def save_model(self, epoch, directory, is_best=False, model_name=''):
names = self.get_model_names()
for name in names:
model_dict = self._models[name].state_dict()
optim_dict = None
if self._optims[name] is not None:
optim_dict = self._optims[name].state_dict()
sched_dict = None
if self._scheds[name] is not None:
sched_dict = self._scheds[name].state_dict()
save_checkpoint(
{
'state_dict': model_dict,
'epoch': epoch + 1,
'optimizer': optim_dict,
'scheduler': sched_dict
},
osp.join(directory, name),
is_best=is_best,
model_name=model_name
)
def resume_model_if_exist(self, directory):
names = self.get_model_names()
file_missing = False
for name in names:
path = osp.join(directory, name)
if not osp.exists(path):
file_missing = True
break
if file_missing:
print('No checkpoint found, train from scratch')
return 0
print(
'Found checkpoint in "{}". Will resume training'.format(directory)
)
for name in names:
path = osp.join(directory, name)
start_epoch = resume_from_checkpoint(
path, self._models[name], self._optims[name],
self._scheds[name]
)
return start_epoch
def load_model(self, directory, epoch=None):
if not directory:
print(
'Note that load_model() is skipped as no pretrained model is given'
)
return
names = self.get_model_names()
# By default, the best model is loaded
model_file = 'model-best.pth.tar'
if epoch is not None:
model_file = 'model.pth.tar-' + str(epoch)
for name in names:
model_path = osp.join(directory, name, model_file)
if not osp.exists(model_path):
raise FileNotFoundError(
'Model not found at "{}"'.format(model_path)
)
checkpoint = load_checkpoint(model_path)
state_dict = checkpoint['state_dict']
epoch = checkpoint['epoch']
print(
'Loading weights to {} '
'from "{}" (epoch = {})'.format(name, model_path, epoch)
)
self._models[name].load_state_dict(state_dict)
def set_model_mode(self, mode='train', names=None):
names = self.get_model_names(names)
for name in names:
if mode == 'train':
self._models[name].train()
else:
self._models[name].eval()
def update_lr(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._scheds[name] is not None:
self._scheds[name].step()
def detect_anomaly(self, loss):
if not torch.isfinite(loss).all():
raise FloatingPointError('Loss is infinite or NaN!')
def init_writer(self, log_dir):
if self.__dict__.get('_writer') is None or self._writer is None:
print(
'Initializing summary writer for tensorboard '
'with log_dir={}'.format(log_dir)
)
self._writer = SummaryWriter(log_dir=log_dir)
def close_writer(self):
if self._writer is not None:
self._writer.close()
def write_scalar(self, tag, scalar_value, global_step=None):
if self._writer is None:
# Do nothing if writer is not initialized
# Note that writer is only used when training is needed
pass
else:
self._writer.add_scalar(tag, scalar_value, global_step)
def train(self, start_epoch, max_epoch):
"""Generic training loops."""
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train()
def before_train(self):
pass
def after_train(self):
pass
def before_epoch(self):
pass
def after_epoch(self):
pass
def run_epoch(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def parse_batch_train(self, batch):
raise NotImplementedError
def parse_batch_test(self, batch):
raise NotImplementedError
def forward_backward(self, batch):
raise NotImplementedError
def model_inference(self, input):
raise NotImplementedError
def model_zero_grad(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
self._optims[name].zero_grad()
def model_backward(self, loss):
self.detect_anomaly(loss)
if not self.use_amp:
loss.backward()
else:
self.scaler.scale(loss).backward()
def model_update(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
if not self.use_amp:
self._optims[name].step()
else:
self.scaler.step(self._optims[name])
def model_backward_and_update(self, loss, names=None):
self.model_zero_grad(names)
self.model_backward(loss)
self.model_update(names)
if self.use_amp:
self.scaler.update()
class SimpleTrainer(TrainerBase):
"""A simple trainer class implementing generic functions."""
def __init__(self, cfg):
super().__init__()
self.check_cfg(cfg)
if torch.cuda.is_available() and cfg.USE_CUDA:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# use amp to accelerate training
self.use_amp = cfg.TRAIN.USE_AMP
if self.use_amp:
self.scaler = torch.cuda.amp.GradScaler()
# Save as attributes some frequently used variables
self.start_epoch = self.epoch = 0
self.max_epoch = cfg.OPTIM.MAX_EPOCH
self.output_dir = cfg.OUTPUT_DIR
self.cfg = cfg
self.build_data_loader()
self.build_model()
self.evaluator = build_evaluator(cfg, lab2cname=self.dm.lab2cname)
# zhaoxin modify
self.best_val_acc = -np.inf
self.best_test_acc = -np.inf
self.best_val_test_acc = 0
self.best_val_epoch = 0
self.best_test_epoch = 0
def check_cfg(self, cfg):
"""Check whether some variables are set correctly for
the trainer (optional).
For example, a trainer might require a particular sampler
for training such as 'RandomDomainSampler', so it is good
to do the checking:
assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'
"""
pass
def build_data_loader(self):
"""Create essential data-related attributes.
What must be done in the re-implementation
of this method:
1) initialize data manager
2) assign as attributes the data loaders
3) assign as attribute the number of classes
"""
self.dm = DataManager(self.cfg)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes
def build_model(self):
"""Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary.
"""
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)
# for name, module in self.model.named_children():
# print(name)
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
print('# params: {:,}'.format(count_num_param(self.model)))
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model('model', self.model, self.optim, self.sched)
def train(self):
super().train(self.start_epoch, self.max_epoch)
def before_train(self):
# directory = self.cfg.OUTPUT_DIR
if self.cfg.RESUME:
directory = self.cfg.RESUME
self.start_epoch = self.resume_model_if_exist(directory)
# Initialize summary writer
self.init_writer(self.output_dir)
# Remember the starting time (for computing the elapsed time)
self.time_start = time.time()
def after_train(self):
print('Finished training')
do_test = not self.cfg.TEST.NO_TEST
if do_test and not self.cfg.NNI:
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print('Deploy the model with the best val performance')
self.load_model(self.output_dir)
# zhaoxin modify
if self.cfg.TEST.PER_CLASS_RESULT:
self.best_val_test_acc, per_class_accs = self.test(return_per_class_results=True)
perclass_path = osp.join(self.output_dir, 'perclass_result.txt')
with open(perclass_path, 'w') as f:
for acc in per_class_accs:
f.write("{:6f}\n".format(acc))
else:
self.best_val_test_acc = self.test()
# zhaoxin add
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print(
'best_val_acc: {}\nbest_val_epoch: {}\nbest_val_test_acc: {}'.
format(
self.best_val_acc, self.best_val_epoch,
self.best_val_test_acc
)
)
if self.cfg.TEST.TEST_EVERY_EPOCH:
print(
'best_test_acc: {}\nbest_test_epoch: {}'.format(
self.best_test_acc, self.best_test_epoch
)
)
result_path = osp.join(self.output_dir, 'result.txt')
with open(result_path, 'w') as f:
f.write("{:6f}\n".format(self.best_val_test_acc))
if self.cfg.NNI:
nni.report_final_result(self.best_val_acc)
# Show elapsed time
elapsed = round(time.time() - self.time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed: {}'.format(elapsed))
# Close writer
self.close_writer()
def after_epoch(self):
last_epoch = (self.epoch + 1) == self.max_epoch
do_test = not self.cfg.TEST.NO_TEST
meet_checkpoint_freq = (
self.epoch + 1
) % self.cfg.TRAIN.CHECKPOINT_FREQ == 0 if self.cfg.TRAIN.CHECKPOINT_FREQ > 0 else False
# zhaoxin modify
if do_test and self.cfg.TEST.FINAL_MODEL == 'best_val':
curr_val_acc = self.test(split='val')
# nni: report intermediate result
if self.cfg.NNI:
nni.report_intermediate_result(curr_val_acc)
is_best = curr_val_acc > self.best_val_acc
if is_best:
self.best_val_acc = curr_val_acc
self.best_val_epoch = self.epoch + 1
self.save_model(
self.epoch,
self.output_dir,
model_name='model-best.pth.tar'
)
if do_test and self.cfg.TEST.TEST_EVERY_EPOCH:
curr_test_acc = self.test(split='test')
if curr_test_acc > self.best_test_acc:
self.best_test_acc = curr_test_acc
self.best_test_epoch = self.epoch + 1
# if self.cfg.TEST.FINAL_MODEL == 'best_val':
# if is_best:
# self.best_val_test_acc = curr_test_acc
if meet_checkpoint_freq or last_epoch:
self.save_model(self.epoch, self.output_dir)
@torch.no_grad()
def test(self, split=None, return_per_class_results=False):
"""A generic testing pipeline."""
self.set_model_mode('eval')
self.evaluator.reset()
if split is None:
split = self.cfg.TEST.SPLIT
if split == 'val' and self.val_loader is not None:
data_loader = self.val_loader
print('Do evaluation on {} set'.format(split))
else:
data_loader = self.test_loader
print('Do evaluation on test set')
for batch_idx, batch in enumerate(data_loader):
input, label = self.parse_batch_test(batch)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
for k, v in results.items():
if k == 'perclass_accuracies':
continue
tag = '{}/{}'.format(split, k)
self.write_scalar(tag, v, self.epoch)
if not return_per_class_results:
return list(results.values())[0]
else:
return results['accuracy'], results['perclass_accuracies']
def model_inference(self, input):
return self.model(input)
def parse_batch_test(self, batch):
input = batch['img']
label = batch['label']
input = input.to(self.device)
label = label.to(self.device)
return input, label
def get_current_lr(self, names=None):
names = self.get_model_names(names)
name = names[0]
return self._optims[name].param_groups[0]['lr']
class TrainerXU(SimpleTrainer):
"""A base trainer using both labeled and unlabeled data.
In the context of domain adaptation, labeled and unlabeled data
come from source and target domains respectively.
When it comes to semi-supervised learning, all data comes from the
same domain.
"""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
# Decide to iterate over labeled or unlabeled dataset
len_train_loader_x = len(self.train_loader_x)
len_train_loader_u = len(self.train_loader_u)
if self.cfg.TRAIN.COUNT_ITER == 'train_x':
self.num_batches = len_train_loader_x
elif self.cfg.TRAIN.COUNT_ITER == 'train_u':
self.num_batches = len_train_loader_u
elif self.cfg.TRAIN.COUNT_ITER == 'smaller_one':
self.num_batches = min(len_train_loader_x, len_train_loader_u)
else:
raise ValueError
train_loader_x_iter = iter(self.train_loader_x)
train_loader_u_iter = iter(self.train_loader_u)
end = time.time()
for self.batch_idx in range(self.num_batches):
try:
batch_x = next(train_loader_x_iter)
except StopIteration:
train_loader_x_iter = iter(self.train_loader_x)
batch_x = next(train_loader_x_iter)
try:
batch_u = next(train_loader_u_iter)
except StopIteration:
train_loader_u_iter = iter(self.train_loader_u)
batch_u = next(train_loader_u_iter)
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch_x, batch_u)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
label_x = batch_x['label']
input_u = batch_u['img']
input_x = input_x.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
return input_x, label_x, input_u
class TrainerX(SimpleTrainer):
"""A base trainer using labeled data only."""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
self.num_batches = len(self.train_loader_x)
end = time.time()
for self.batch_idx, batch in enumerate(self.train_loader_x):
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch):
input = batch['img']
label = batch['label']
domain = batch['domain']
input = input.to(self.device)
label = label.to(self.device)
domain = domain.to(self.device)
return input, label, domain
| 33.30226
| 97
| 0.57015
|
import time
import numpy as np
import os.path as osp
import datetime
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import nni
from dassl.data import DataManager
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import (
MetricMeter, AverageMeter, tolist_if_not, count_num_param, load_checkpoint,
save_checkpoint, resume_from_checkpoint, load_pretrained_weights
)
from dassl.modeling import build_head, build_backbone
from dassl.evaluation import build_evaluator
class SimpleNet(nn.Module):
def __init__(self, cfg, model_cfg, num_classes, **kwargs):
super().__init__()
self.backbone = build_backbone(
model_cfg.BACKBONE.NAME,
verbose=cfg.VERBOSE,
pretrained=model_cfg.BACKBONE.PRETRAINED,
**kwargs
)
fdim = self.backbone.out_features
print("------------------------fdim:", fdim)
self.head = None
if model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS:
self.head = build_head(
model_cfg.HEAD.NAME,
verbose=cfg.VERBOSE,
in_features=fdim,
hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS,
activation=model_cfg.HEAD.ACTIVATION,
bn=model_cfg.HEAD.BN,
dropout=model_cfg.HEAD.DROPOUT,
**kwargs
)
fdim = self.head.out_features
self.classifier = None
if num_classes > 0:
self.classifier = nn.Linear(fdim, num_classes)
self._fdim = fdim
@property
def fdim(self):
return self._fdim
def forward(self, x, return_feature=False):
f = self.backbone(x)
if self.head is not None:
f = self.head(f)
if self.classifier is None:
return f
y = self.classifier(f)
if return_feature:
return y, f
return y
class TrainerBase:
def __init__(self):
self._models = OrderedDict()
self._optims = OrderedDict()
self._scheds = OrderedDict()
self._writer = None
def register_model(self, name='model', model=None, optim=None, sched=None):
if self.__dict__.get('_models') is None:
raise AttributeError(
'Cannot assign model before super().__init__() call'
)
if self.__dict__.get('_optims') is None:
raise AttributeError(
'Cannot assign optim before super().__init__() call'
)
if self.__dict__.get('_scheds') is None:
raise AttributeError(
'Cannot assign sched before super().__init__() call'
)
assert name not in self._models, 'Found duplicate model names'
self._models[name] = model
self._optims[name] = optim
self._scheds[name] = sched
def get_model_names(self, names=None):
names_real = list(self._models.keys())
if names is not None:
names = tolist_if_not(names)
for name in names:
assert name in names_real
return names
else:
return names_real
def save_model(self, epoch, directory, is_best=False, model_name=''):
names = self.get_model_names()
for name in names:
model_dict = self._models[name].state_dict()
optim_dict = None
if self._optims[name] is not None:
optim_dict = self._optims[name].state_dict()
sched_dict = None
if self._scheds[name] is not None:
sched_dict = self._scheds[name].state_dict()
save_checkpoint(
{
'state_dict': model_dict,
'epoch': epoch + 1,
'optimizer': optim_dict,
'scheduler': sched_dict
},
osp.join(directory, name),
is_best=is_best,
model_name=model_name
)
def resume_model_if_exist(self, directory):
names = self.get_model_names()
file_missing = False
for name in names:
path = osp.join(directory, name)
if not osp.exists(path):
file_missing = True
break
if file_missing:
print('No checkpoint found, train from scratch')
return 0
print(
'Found checkpoint in "{}". Will resume training'.format(directory)
)
for name in names:
path = osp.join(directory, name)
start_epoch = resume_from_checkpoint(
path, self._models[name], self._optims[name],
self._scheds[name]
)
return start_epoch
def load_model(self, directory, epoch=None):
if not directory:
print(
'Note that load_model() is skipped as no pretrained model is given'
)
return
names = self.get_model_names()
model_file = 'model-best.pth.tar'
if epoch is not None:
model_file = 'model.pth.tar-' + str(epoch)
for name in names:
model_path = osp.join(directory, name, model_file)
if not osp.exists(model_path):
raise FileNotFoundError(
'Model not found at "{}"'.format(model_path)
)
checkpoint = load_checkpoint(model_path)
state_dict = checkpoint['state_dict']
epoch = checkpoint['epoch']
print(
'Loading weights to {} '
'from "{}" (epoch = {})'.format(name, model_path, epoch)
)
self._models[name].load_state_dict(state_dict)
def set_model_mode(self, mode='train', names=None):
names = self.get_model_names(names)
for name in names:
if mode == 'train':
self._models[name].train()
else:
self._models[name].eval()
def update_lr(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._scheds[name] is not None:
self._scheds[name].step()
def detect_anomaly(self, loss):
if not torch.isfinite(loss).all():
raise FloatingPointError('Loss is infinite or NaN!')
def init_writer(self, log_dir):
if self.__dict__.get('_writer') is None or self._writer is None:
print(
'Initializing summary writer for tensorboard '
'with log_dir={}'.format(log_dir)
)
self._writer = SummaryWriter(log_dir=log_dir)
def close_writer(self):
if self._writer is not None:
self._writer.close()
def write_scalar(self, tag, scalar_value, global_step=None):
if self._writer is None:
pass
else:
self._writer.add_scalar(tag, scalar_value, global_step)
def train(self, start_epoch, max_epoch):
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train()
def before_train(self):
pass
def after_train(self):
pass
def before_epoch(self):
pass
def after_epoch(self):
pass
def run_epoch(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def parse_batch_train(self, batch):
raise NotImplementedError
def parse_batch_test(self, batch):
raise NotImplementedError
def forward_backward(self, batch):
raise NotImplementedError
def model_inference(self, input):
raise NotImplementedError
def model_zero_grad(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
self._optims[name].zero_grad()
def model_backward(self, loss):
self.detect_anomaly(loss)
if not self.use_amp:
loss.backward()
else:
self.scaler.scale(loss).backward()
def model_update(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
if not self.use_amp:
self._optims[name].step()
else:
self.scaler.step(self._optims[name])
def model_backward_and_update(self, loss, names=None):
self.model_zero_grad(names)
self.model_backward(loss)
self.model_update(names)
if self.use_amp:
self.scaler.update()
class SimpleTrainer(TrainerBase):
def __init__(self, cfg):
super().__init__()
self.check_cfg(cfg)
if torch.cuda.is_available() and cfg.USE_CUDA:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.use_amp = cfg.TRAIN.USE_AMP
if self.use_amp:
self.scaler = torch.cuda.amp.GradScaler()
self.start_epoch = self.epoch = 0
self.max_epoch = cfg.OPTIM.MAX_EPOCH
self.output_dir = cfg.OUTPUT_DIR
self.cfg = cfg
self.build_data_loader()
self.build_model()
self.evaluator = build_evaluator(cfg, lab2cname=self.dm.lab2cname)
self.best_val_acc = -np.inf
self.best_test_acc = -np.inf
self.best_val_test_acc = 0
self.best_val_epoch = 0
self.best_test_epoch = 0
def check_cfg(self, cfg):
pass
def build_data_loader(self):
self.dm = DataManager(self.cfg)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes
def build_model(self):
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
print('# params: {:,}'.format(count_num_param(self.model)))
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model('model', self.model, self.optim, self.sched)
def train(self):
super().train(self.start_epoch, self.max_epoch)
def before_train(self):
if self.cfg.RESUME:
directory = self.cfg.RESUME
self.start_epoch = self.resume_model_if_exist(directory)
self.init_writer(self.output_dir)
self.time_start = time.time()
def after_train(self):
print('Finished training')
do_test = not self.cfg.TEST.NO_TEST
if do_test and not self.cfg.NNI:
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print('Deploy the model with the best val performance')
self.load_model(self.output_dir)
if self.cfg.TEST.PER_CLASS_RESULT:
self.best_val_test_acc, per_class_accs = self.test(return_per_class_results=True)
perclass_path = osp.join(self.output_dir, 'perclass_result.txt')
with open(perclass_path, 'w') as f:
for acc in per_class_accs:
f.write("{:6f}\n".format(acc))
else:
self.best_val_test_acc = self.test()
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print(
'best_val_acc: {}\nbest_val_epoch: {}\nbest_val_test_acc: {}'.
format(
self.best_val_acc, self.best_val_epoch,
self.best_val_test_acc
)
)
if self.cfg.TEST.TEST_EVERY_EPOCH:
print(
'best_test_acc: {}\nbest_test_epoch: {}'.format(
self.best_test_acc, self.best_test_epoch
)
)
result_path = osp.join(self.output_dir, 'result.txt')
with open(result_path, 'w') as f:
f.write("{:6f}\n".format(self.best_val_test_acc))
if self.cfg.NNI:
nni.report_final_result(self.best_val_acc)
elapsed = round(time.time() - self.time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed: {}'.format(elapsed))
self.close_writer()
def after_epoch(self):
last_epoch = (self.epoch + 1) == self.max_epoch
do_test = not self.cfg.TEST.NO_TEST
meet_checkpoint_freq = (
self.epoch + 1
) % self.cfg.TRAIN.CHECKPOINT_FREQ == 0 if self.cfg.TRAIN.CHECKPOINT_FREQ > 0 else False
if do_test and self.cfg.TEST.FINAL_MODEL == 'best_val':
curr_val_acc = self.test(split='val')
if self.cfg.NNI:
nni.report_intermediate_result(curr_val_acc)
is_best = curr_val_acc > self.best_val_acc
if is_best:
self.best_val_acc = curr_val_acc
self.best_val_epoch = self.epoch + 1
self.save_model(
self.epoch,
self.output_dir,
model_name='model-best.pth.tar'
)
if do_test and self.cfg.TEST.TEST_EVERY_EPOCH:
curr_test_acc = self.test(split='test')
if curr_test_acc > self.best_test_acc:
self.best_test_acc = curr_test_acc
self.best_test_epoch = self.epoch + 1
if meet_checkpoint_freq or last_epoch:
self.save_model(self.epoch, self.output_dir)
@torch.no_grad()
def test(self, split=None, return_per_class_results=False):
self.set_model_mode('eval')
self.evaluator.reset()
if split is None:
split = self.cfg.TEST.SPLIT
if split == 'val' and self.val_loader is not None:
data_loader = self.val_loader
print('Do evaluation on {} set'.format(split))
else:
data_loader = self.test_loader
print('Do evaluation on test set')
for batch_idx, batch in enumerate(data_loader):
input, label = self.parse_batch_test(batch)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
for k, v in results.items():
if k == 'perclass_accuracies':
continue
tag = '{}/{}'.format(split, k)
self.write_scalar(tag, v, self.epoch)
if not return_per_class_results:
return list(results.values())[0]
else:
return results['accuracy'], results['perclass_accuracies']
def model_inference(self, input):
return self.model(input)
def parse_batch_test(self, batch):
input = batch['img']
label = batch['label']
input = input.to(self.device)
label = label.to(self.device)
return input, label
def get_current_lr(self, names=None):
names = self.get_model_names(names)
name = names[0]
return self._optims[name].param_groups[0]['lr']
class TrainerXU(SimpleTrainer):
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
len_train_loader_x = len(self.train_loader_x)
len_train_loader_u = len(self.train_loader_u)
if self.cfg.TRAIN.COUNT_ITER == 'train_x':
self.num_batches = len_train_loader_x
elif self.cfg.TRAIN.COUNT_ITER == 'train_u':
self.num_batches = len_train_loader_u
elif self.cfg.TRAIN.COUNT_ITER == 'smaller_one':
self.num_batches = min(len_train_loader_x, len_train_loader_u)
else:
raise ValueError
train_loader_x_iter = iter(self.train_loader_x)
train_loader_u_iter = iter(self.train_loader_u)
end = time.time()
for self.batch_idx in range(self.num_batches):
try:
batch_x = next(train_loader_x_iter)
except StopIteration:
train_loader_x_iter = iter(self.train_loader_x)
batch_x = next(train_loader_x_iter)
try:
batch_u = next(train_loader_u_iter)
except StopIteration:
train_loader_u_iter = iter(self.train_loader_u)
batch_u = next(train_loader_u_iter)
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch_x, batch_u)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
label_x = batch_x['label']
input_u = batch_u['img']
input_x = input_x.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
return input_x, label_x, input_u
class TrainerX(SimpleTrainer):
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
self.num_batches = len(self.train_loader_x)
end = time.time()
for self.batch_idx, batch in enumerate(self.train_loader_x):
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch):
input = batch['img']
label = batch['label']
domain = batch['domain']
input = input.to(self.device)
label = label.to(self.device)
domain = domain.to(self.device)
return input, label, domain
| true
| true
|
79086cf415b3df58a648cc1b688198fca1325d34
| 9,165
|
py
|
Python
|
python/3.2/Pubnub.py
|
goodybag/pubnub-api
|
7d37385f5dc58551c4e73674d35a26b0f239cc06
|
[
"MIT"
] | null | null | null |
python/3.2/Pubnub.py
|
goodybag/pubnub-api
|
7d37385f5dc58551c4e73674d35a26b0f239cc06
|
[
"MIT"
] | null | null | null |
python/3.2/Pubnub.py
|
goodybag/pubnub-api
|
7d37385f5dc58551c4e73674d35a26b0f239cc06
|
[
"MIT"
] | null | null | null |
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
| 26.488439
| 114
| 0.481069
|
n = 'pubsub.pubnub.com',
pres_uuid = None
) :
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
s['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
tr(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
lf.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
n self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
in args :
raise Exception('Missing Channel.')
return False
in args :
raise Exception('Missing Callback.')
return False
str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
ue :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
esponse = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
ot len(messages) :
continue
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
in args :
raise Exception('Missing Channel.')
return False
in args :
raise Exception('Missing Callback.')
return False
str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
channel = str(args['channel'])
nel :
raise Exception('Missing Channel')
return False
equest([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
rgs.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
nel :
raise Exception('Missing Channel')
return False
rn self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
| true
| true
|
79086d64a14b56e12fb10948fc468d18dfb75294
| 8,125
|
py
|
Python
|
misc/python/materialize/cli/mzbench.py
|
sungchun12/materialize
|
7282293960368019d583163b2f06545b1379c0a1
|
[
"MIT"
] | null | null | null |
misc/python/materialize/cli/mzbench.py
|
sungchun12/materialize
|
7282293960368019d583163b2f06545b1379c0a1
|
[
"MIT"
] | 49
|
2021-07-20T18:33:05.000Z
|
2022-03-28T22:15:20.000Z
|
misc/python/materialize/cli/mzbench.py
|
sungchun12/materialize
|
7282293960368019d583163b2f06545b1379c0a1
|
[
"MIT"
] | null | null | null |
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
#
# mzbuild.py -- script to run materialized benchmarks
import argparse
import csv
import itertools
import multiprocessing
import os
import pathlib
import subprocess
import sys
import typing
import uuid
import webbrowser
def mzbuild_tag(git_ref: str) -> str:
if not git_ref:
return git_ref
try:
return (
subprocess.check_output(
["git", "describe", "--exact-match", git_ref], stderr=subprocess.STDOUT
)
.strip()
.decode()
)
except subprocess.CalledProcessError:
unstable_ref = (
subprocess.check_output(["git", "rev-parse", "--verify", git_ref])
.strip()
.decode()
)
return f"unstable-{unstable_ref}"
def mzcompose_location(mz_root: str) -> pathlib.Path:
"""Return the absolute path to mzcompose.
MZ_ROOT is expected to be set via pyactivate.
"""
return pathlib.Path(mz_root, "bin", "mzcompose")
def main(args: argparse.Namespace) -> None:
# Ensure that we are working out of the git directory so that commands, such as git, will work
mz_root = os.environ["MZ_ROOT"]
os.chdir(mz_root)
worker_counts = enumerate_cpu_counts()
if args.no_benchmark_this_checkout:
git_references = args.git_references
else:
git_references = [None, *args.git_references]
if args.verbose:
build_tags = [None, *[mzbuild_tag(ref) for ref in args.git_references]]
print(f"DEBUG: num_iterations={args.num_measurements}")
print(f"DEBUG: worker_counts={worker_counts}")
print(f"DEBUG: mzbuild_tags={build_tags}")
if args.size == "benchmark-ci":
# Explicitly override the worker counts for the CI benchmark
worker_counts = [1]
setup_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"setup-benchmark-{args.size}",
]
run_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"run-benchmark-{args.size}",
]
field_names = [
"git_revision",
"num_workers",
"iteration",
"seconds_taken",
"rows_per_second",
"grafana_url",
]
results_writer = csv.DictWriter(sys.stdout, field_names)
results_writer.writeheader()
# We use check_output because check_call does not capture output
try:
subprocess.check_output(setup_benchmark, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
if args.web:
try:
web_command = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"web",
f"perf-dash-web",
]
output = subprocess.check_output(web_command, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(f"Failed to open browser to perf-dash:\n{e.output.decode()}")
raise
iterations = range(0, args.num_measurements)
for (iteration, worker_count, git_ref) in itertools.product(
iterations, worker_counts, git_references
):
# Sadly, environment variables are the only way to pass this information into containers
# started by mzcompose
child_env = os.environ.copy()
child_env["MZ_ROOT"] = mz_root
child_env["MZ_WORKERS"] = str(worker_count)
child_env["MZBENCH_ID"] = args.benchmark_id
child_env["MZBUILD_WAIT_FOR_IMAGE"] = "true"
if git_ref:
child_env["MZBENCH_GIT_REF"] = git_ref
child_env["MZBUILD_MATERIALIZED_TAG"] = mzbuild_tag(git_ref)
try:
output = subprocess.check_output(
run_benchmark, env=child_env, stderr=subprocess.STDOUT
)
except (subprocess.CalledProcessError,) as e:
# TODO: Don't exit with error on simple benchmark failure
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
# TODO: Replace parsing output from mzcompose with reading from a well known file or topic
for line in output.decode().splitlines():
if line.startswith("SUCCESS!"):
for token in line.split(" "):
if token.startswith("seconds_taken="):
seconds_taken = token[len("seconds_taken=") :]
elif token.startswith("rows_per_sec="):
rows_per_second = token[len("rows_per_sec=") :]
elif line.startswith("Grafana URL: "):
grafana_url = line[len("Grafana URL: ") :]
results_writer.writerow(
{
"git_revision": git_ref if git_ref else "None",
"num_workers": worker_count,
"iteration": iteration,
"seconds_taken": seconds_taken,
"rows_per_second": rows_per_second,
"grafana_url": grafana_url,
}
)
def enumerate_cpu_counts() -> typing.List[int]:
"""This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10].
"""
# 15% overhead and count physical cores only
max_cpus = round(multiprocessing.cpu_count() * 0.425)
num_trials = 4
# Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0
worker_counts = [round(i * max_cpus / num_trials) for i in range(num_trials, 0, -1)]
return list(reversed(sorted(set(worker_counts))))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--benchmark-id",
type=str,
default=str(uuid.uuid4()),
help="Pseudo-unique identifier to use for this benchmark",
)
parser.add_argument(
"-n",
"--num-measurements",
type=int,
default=6,
help="Number of times to repeat each benchmark iteration",
)
parser.add_argument(
"-s",
"--size",
type=str,
default="medium",
choices=["medium", "ci", "large"],
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"--no-benchmark-this-checkout",
action="store_true",
help="Don't benchmark the version of materialized in this checkout",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging output"
)
parser.add_argument(
"-w",
"--web",
action="store_true",
help="Open a web browser showing results visualizations",
)
parser.add_argument(
"composition",
type=str,
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"git_references",
type=str,
nargs="*",
help="Materialized builds to test as well, identified by git reference",
)
args = parser.parse_args()
main(args)
| 31.130268
| 98
| 0.610092
|
import argparse
import csv
import itertools
import multiprocessing
import os
import pathlib
import subprocess
import sys
import typing
import uuid
import webbrowser
def mzbuild_tag(git_ref: str) -> str:
if not git_ref:
return git_ref
try:
return (
subprocess.check_output(
["git", "describe", "--exact-match", git_ref], stderr=subprocess.STDOUT
)
.strip()
.decode()
)
except subprocess.CalledProcessError:
unstable_ref = (
subprocess.check_output(["git", "rev-parse", "--verify", git_ref])
.strip()
.decode()
)
return f"unstable-{unstable_ref}"
def mzcompose_location(mz_root: str) -> pathlib.Path:
return pathlib.Path(mz_root, "bin", "mzcompose")
def main(args: argparse.Namespace) -> None:
mz_root = os.environ["MZ_ROOT"]
os.chdir(mz_root)
worker_counts = enumerate_cpu_counts()
if args.no_benchmark_this_checkout:
git_references = args.git_references
else:
git_references = [None, *args.git_references]
if args.verbose:
build_tags = [None, *[mzbuild_tag(ref) for ref in args.git_references]]
print(f"DEBUG: num_iterations={args.num_measurements}")
print(f"DEBUG: worker_counts={worker_counts}")
print(f"DEBUG: mzbuild_tags={build_tags}")
if args.size == "benchmark-ci":
worker_counts = [1]
setup_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"setup-benchmark-{args.size}",
]
run_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"run-benchmark-{args.size}",
]
field_names = [
"git_revision",
"num_workers",
"iteration",
"seconds_taken",
"rows_per_second",
"grafana_url",
]
results_writer = csv.DictWriter(sys.stdout, field_names)
results_writer.writeheader()
try:
subprocess.check_output(setup_benchmark, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
if args.web:
try:
web_command = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"web",
f"perf-dash-web",
]
output = subprocess.check_output(web_command, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(f"Failed to open browser to perf-dash:\n{e.output.decode()}")
raise
iterations = range(0, args.num_measurements)
for (iteration, worker_count, git_ref) in itertools.product(
iterations, worker_counts, git_references
):
child_env = os.environ.copy()
child_env["MZ_ROOT"] = mz_root
child_env["MZ_WORKERS"] = str(worker_count)
child_env["MZBENCH_ID"] = args.benchmark_id
child_env["MZBUILD_WAIT_FOR_IMAGE"] = "true"
if git_ref:
child_env["MZBENCH_GIT_REF"] = git_ref
child_env["MZBUILD_MATERIALIZED_TAG"] = mzbuild_tag(git_ref)
try:
output = subprocess.check_output(
run_benchmark, env=child_env, stderr=subprocess.STDOUT
)
except (subprocess.CalledProcessError,) as e:
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
# TODO: Replace parsing output from mzcompose with reading from a well known file or topic
for line in output.decode().splitlines():
if line.startswith("SUCCESS!"):
for token in line.split(" "):
if token.startswith("seconds_taken="):
seconds_taken = token[len("seconds_taken=") :]
elif token.startswith("rows_per_sec="):
rows_per_second = token[len("rows_per_sec=") :]
elif line.startswith("Grafana URL: "):
grafana_url = line[len("Grafana URL: ") :]
results_writer.writerow(
{
"git_revision": git_ref if git_ref else "None",
"num_workers": worker_count,
"iteration": iteration,
"seconds_taken": seconds_taken,
"rows_per_second": rows_per_second,
"grafana_url": grafana_url,
}
)
def enumerate_cpu_counts() -> typing.List[int]:
# 15% overhead and count physical cores only
max_cpus = round(multiprocessing.cpu_count() * 0.425)
num_trials = 4
# Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0
worker_counts = [round(i * max_cpus / num_trials) for i in range(num_trials, 0, -1)]
return list(reversed(sorted(set(worker_counts))))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--benchmark-id",
type=str,
default=str(uuid.uuid4()),
help="Pseudo-unique identifier to use for this benchmark",
)
parser.add_argument(
"-n",
"--num-measurements",
type=int,
default=6,
help="Number of times to repeat each benchmark iteration",
)
parser.add_argument(
"-s",
"--size",
type=str,
default="medium",
choices=["medium", "ci", "large"],
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"--no-benchmark-this-checkout",
action="store_true",
help="Don't benchmark the version of materialized in this checkout",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging output"
)
parser.add_argument(
"-w",
"--web",
action="store_true",
help="Open a web browser showing results visualizations",
)
parser.add_argument(
"composition",
type=str,
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"git_references",
type=str,
nargs="*",
help="Materialized builds to test as well, identified by git reference",
)
args = parser.parse_args()
main(args)
| true
| true
|
79086dbd3c6207c0aff9b746906ada863490ee0e
| 39,901
|
py
|
Python
|
timesketch/lib/datastores/elastic.py
|
toshiro92/timesketch
|
3c8f39e2599dfab412412ff6787bc2c40e199f5f
|
[
"Apache-2.0"
] | null | null | null |
timesketch/lib/datastores/elastic.py
|
toshiro92/timesketch
|
3c8f39e2599dfab412412ff6787bc2c40e199f5f
|
[
"Apache-2.0"
] | null | null | null |
timesketch/lib/datastores/elastic.py
|
toshiro92/timesketch
|
3c8f39e2599dfab412412ff6787bc2c40e199f5f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
| 36.673713
| 170
| 0.524774
|
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000
DEFAULT_FLUSH_RETRY_LIMIT = 3
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m'
def __init__(self, host='127.0.0.1', port=9200):
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
label_query = {
'bool': {
'must': []
}
}
for label in labels:
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
if not chip.get('active', True):
continue
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
if aggregations:
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# that have __ts_timeline_id set and those that don't.
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m'
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
search_type = 'query_then_fetch'
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
max_labels = 10000
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
def get_event(self, searchindex_id, event_id):
METRICS['search_get_event'].inc()
try:
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
except Exception:
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
version_info = self.client.info().get('version')
return version_info.get('number')
| true
| true
|
79086dcff1ed12f2fb081ac280624f3e8c4a1686
| 24,031
|
py
|
Python
|
nova/tests/unit/api/openstack/fakes.py
|
mmnelemane/nova_stable_juno
|
e049553c7312480080091ad2481dcaf49113d571
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/api/openstack/fakes.py
|
mmnelemane/nova_stable_juno
|
e049553c7312480080091ad2481dcaf49113d571
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:39:52.000Z
|
2021-03-21T11:39:52.000Z
|
nova/tests/unit/api/openstack/fakes.py
|
mmnelemane/nova_stable_juno
|
e049553c7312480080091ad2481dcaf49113d571
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:37:33.000Z
|
2021-03-21T11:37:33.000Z
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import timeutils
import routes
import six
from six.moves import range
import webob
import webob.dec
import webob.request
from nova.api import auth as api_auth
from nova.api import openstack as openstack_api
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import auth
from nova.api.openstack import compute
from nova.api.openstack.compute.legacy_v2 import limits
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
import nova.netconf
from nova.network import api as network_api
from nova import objects
from nova.objects import base
from nova import quota
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit.objects import test_keypair
from nova import utils
from nova import wsgi
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
class Context(object):
pass
class FakeRouter(wsgi.Router):
def __init__(self, ext_mgr=None):
pass
@webob.dec.wsgify
def __call__(self, req):
res = webob.Response()
res.status = '200'
res.headers['X-Test-Success'] = 'True'
return res
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app(inner_app_v2=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None):
if not inner_app_v2:
inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
if use_no_auth:
api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v2)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v2)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v2
mapper['/v1.1'] = api_v2
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def wsgi_app_v21(inner_app_v21=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None, v2_compatible=False):
if not inner_app_v21:
inner_app_v21 = compute.APIRouterV21(init_only)
if v2_compatible:
inner_app_v21 = openstack_api.LegacyV2CompatibleWrapper(inner_app_v21)
if use_no_auth:
api_v21 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
limits.RateLimitingMiddleware(inner_app_v21)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v21 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v21)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v21
mapper['/v2.1'] = api_v21
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def stub_out_key_pair_funcs(stubs, have_key_pair=True, **kwargs):
def key_pair(context, user_id):
return [dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)]
def one_key_pair(context, user_id, name):
if name == 'key':
return dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)
else:
raise exc.KeypairNotFound(user_id=user_id, name=name)
def no_key_pair(context, user_id):
return []
if have_key_pair:
stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
stubs.Set(nova.db, 'key_pair_get', one_key_pair)
else:
stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
super(limits.RateLimitingMiddleware, self).__init__(app)
self.application = app
v2_limits = nova.api.openstack.compute.legacy_v2.limits
stubs.Set(v2_limits.RateLimitingMiddleware, '__init__', fake_rate_init)
stubs.Set(v2_limits.RateLimitingMiddleware, '__call__', fake_wsgi)
def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
requested = deltas.pop(resource, 0)
if requested > allowed:
quotas = dict(instances=1, cores=1, ram=1)
quotas[resource] = quota
usages = dict(instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0))
usages[resource]['in_use'] = (quotas[resource] * 0.9 -
allowed)
usages[resource]['reserved'] = quotas[resource] * 0.1
raise exc.OverQuota(overs=[resource], quotas=quotas,
usages=usages)
stubs.Set(QUOTAS, 'reserve', fake_reserve)
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
stubs.Set(netutils, 'get_my_ipv4', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance, name, extra_properties=None):
# emulate glance rejecting image names which are too long
if len(name) > 256:
raise exc.Invalid
return dict(id='123', status='ACTIVE', name=name,
properties=extra_properties)
stubs.Set(compute_api.API, 'snapshot', snapshot)
class stub_out_compute_api_backup(object):
def __init__(self, stubs):
self.stubs = stubs
self.extra_props_last_call = None
stubs.Set(compute_api.API, 'backup', self.backup)
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
self.extra_props_last_call = extra_properties
props = dict(backup_type=backup_type,
rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'
if not publics:
publics = ['1.2.3.4']
class Fake(object):
def __init__(self, skip_policy_check=False):
pass
def get_instance_nw_info(*args, **kwargs):
pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
def validate_networks(self, context, networks, max_count):
return max_count
def create_pci_requests_for_sriov_ports(self, context,
system_metadata,
requested_networks):
pass
if cls is None:
cls = Fake
stubs.Set(network_api, 'API', cls)
fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@staticmethod
def blank(*args, **kwargs):
kwargs['base_url'] = 'http://localhost/v2'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
out.api_version_request = api_version.APIVersionRequest(version)
return out
class HTTPRequestV21(os_wsgi.Request):
@staticmethod
def blank(*args, **kwargs):
kwargs['base_url'] = 'http://localhost/v3'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
out = os_wsgi.Request.blank(*args, **kwargs)
out.api_version_request = api_version.APIVersionRequest(version)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
return out
class TestRouter(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class TestRouterV21(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.ResourceV21(controller))
super(TestRouterV21, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
class FakeRateLimiter(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
return self.application
def create_info_cache(nw_cache):
if nw_cache is None:
pub0 = ('192.168.1.100',)
pub1 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub1]}]}}]
if not isinstance(nw_cache, six.string_types):
nw_cache = jsonutils.dumps(nw_cache)
return {
"info_cache": {
"network_info": nw_cache,
"deleted": False,
"created_at": None,
"deleted_at": None,
"updated_at": None,
}
}
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
def fake_instance_get(**kwargs):
def _return_server(context, uuid, columns_to_join=None, use_slave=False):
return stub_instance(1, **kwargs)
return _return_server
def fake_compute_get(**kwargs):
def _return_server_obj(context, uuid, want_objects=False,
expected_attrs=None):
return stub_instance_obj(context, **kwargs)
return _return_server_obj
def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
def _return_servers(context, *args, **kwargs):
servers_list = []
marker = None
limit = None
found_marker = False
if "marker" in kwargs:
marker = kwargs["marker"]
if "limit" in kwargs:
limit = kwargs["limit"]
if 'columns_to_join' in kwargs:
kwargs.pop('columns_to_join')
if 'use_slave' in kwargs:
kwargs.pop('use_slave')
if 'sort_keys' in kwargs:
kwargs.pop('sort_keys')
if 'sort_dirs' in kwargs:
kwargs.pop('sort_dirs')
for i in range(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
if marker is not None and uuid == marker:
found_marker = True
servers_list = []
if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
def fake_compute_get_all(num_servers=5, **kwargs):
def _return_servers_objs(context, search_opts=None, limit=None,
marker=None, want_objects=False,
expected_attrs=None, sort_keys=None,
sort_dirs=None):
db_insts = fake_instance_get_all_by_filters()(None,
limit=limit,
marker=marker)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
return base.obj_make_list(context, objects.InstanceList(),
objects.Instance, db_insts,
expected_attrs=expected)
return _return_servers_objs
def stub_instance(id=1, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
limit=None, marker=None,
launched_at=timeutils.utcnow(),
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
instance_type=None, launch_index=0, kernel_id="",
ramdisk_id="", user_data=None):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
project_id = 'fake_project'
if metadata:
metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
metadata = []
inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
sys_meta = flavors.save_flavor_info({}, inst_type)
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
if security_groups is None:
security_groups = [{"id": 1, "name": "test", "description": "Foo:",
"project_id": "project", "user_id": "user",
"created_at": None, "updated_at": None,
"deleted_at": None, "deleted": False}]
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
info_cache = create_info_cache(nw_cache)
if instance_type is None:
instance_type = flavors.get_default_flavor()
flavorinfo = jsonutils.dumps({
'cur': instance_type.obj_to_primitive(),
'old': None,
'new': None,
})
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
"deleted": None,
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": kernel_id,
"ramdisk_id": ramdisk_id,
"launch_index": launch_index,
"key_name": key_name,
"key_data": key_data,
"config_drive": config_drive,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"power_state": power_state,
"memory_mb": memory_mb,
"vcpus": vcpus,
"root_gb": root_gb,
"ephemeral_gb": ephemeral_gb,
"ephemeral_key_uuid": None,
"hostname": display_name or server_name,
"host": host,
"node": node,
"instance_type_id": 1,
"instance_type": inst_type,
"user_data": user_data,
"reservation_id": reservation_id,
"mac_address": "",
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": "",
"locked": locked_by is not None,
"locked_by": locked_by,
"metadata": metadata,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"shutdown_terminate": True,
"disable_terminate": False,
"security_groups": security_groups,
"root_device_name": root_device_name,
"system_metadata": utils.dict_to_metadata(sys_meta),
"pci_devices": [],
"vm_mode": "",
"default_swap_device": "",
"default_ephemeral_device": "",
"launched_on": "",
"cell_name": "",
"architecture": "",
"os_type": "",
"extra": {"numa_topology": None,
"pci_requests": None,
"flavor": flavorinfo,
},
"cleaned": cleaned}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
return instance
def stub_instance_obj(ctxt, *args, **kwargs):
db_inst = stub_instance(*args, **kwargs)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
inst = objects.Instance._from_db_object(ctxt, objects.Instance(),
db_inst,
expected_attrs=expected)
inst.fault = None
return inst
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_notfound(self, context, volume_id):
raise exc.VolumeNotFound(volume_id=volume_id)
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_check_attach(self, context, *args, **param):
pass
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': timeutils.utcnow(),
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)
def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
return {'snapshot': {'id': 100, 'volumeId': volume_id}}
def stub_snapshot_delete(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
return stub_snapshot(snapshot_id)
def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'volume_id2', 'instance_uuid': instance_uuid})]
def fake_get_available_languages():
existing_translations = ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US']
return existing_translations
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
| 33.100551
| 79
| 0.624402
|
import datetime
import uuid
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import timeutils
import routes
import six
from six.moves import range
import webob
import webob.dec
import webob.request
from nova.api import auth as api_auth
from nova.api import openstack as openstack_api
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import auth
from nova.api.openstack import compute
from nova.api.openstack.compute.legacy_v2 import limits
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
import nova.netconf
from nova.network import api as network_api
from nova import objects
from nova.objects import base
from nova import quota
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit.objects import test_keypair
from nova import utils
from nova import wsgi
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
class Context(object):
pass
class FakeRouter(wsgi.Router):
def __init__(self, ext_mgr=None):
pass
@webob.dec.wsgify
def __call__(self, req):
res = webob.Response()
res.status = '200'
res.headers['X-Test-Success'] = 'True'
return res
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app(inner_app_v2=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None):
if not inner_app_v2:
inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
if use_no_auth:
api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v2)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v2)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v2
mapper['/v1.1'] = api_v2
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def wsgi_app_v21(inner_app_v21=None, fake_auth_context=None,
use_no_auth=False, ext_mgr=None, init_only=None, v2_compatible=False):
if not inner_app_v21:
inner_app_v21 = compute.APIRouterV21(init_only)
if v2_compatible:
inner_app_v21 = openstack_api.LegacyV2CompatibleWrapper(inner_app_v21)
if use_no_auth:
api_v21 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
limits.RateLimitingMiddleware(inner_app_v21)))
else:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v21 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
limits.RateLimitingMiddleware(inner_app_v21)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v21
mapper['/v2.1'] = api_v21
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def stub_out_key_pair_funcs(stubs, have_key_pair=True, **kwargs):
def key_pair(context, user_id):
return [dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)]
def one_key_pair(context, user_id, name):
if name == 'key':
return dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)
else:
raise exc.KeypairNotFound(user_id=user_id, name=name)
def no_key_pair(context, user_id):
return []
if have_key_pair:
stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
stubs.Set(nova.db, 'key_pair_get', one_key_pair)
else:
stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
super(limits.RateLimitingMiddleware, self).__init__(app)
self.application = app
v2_limits = nova.api.openstack.compute.legacy_v2.limits
stubs.Set(v2_limits.RateLimitingMiddleware, '__init__', fake_rate_init)
stubs.Set(v2_limits.RateLimitingMiddleware, '__call__', fake_wsgi)
def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
requested = deltas.pop(resource, 0)
if requested > allowed:
quotas = dict(instances=1, cores=1, ram=1)
quotas[resource] = quota
usages = dict(instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0))
usages[resource]['in_use'] = (quotas[resource] * 0.9 -
allowed)
usages[resource]['reserved'] = quotas[resource] * 0.1
raise exc.OverQuota(overs=[resource], quotas=quotas,
usages=usages)
stubs.Set(QUOTAS, 'reserve', fake_reserve)
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
stubs.Set(netutils, 'get_my_ipv4', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance, name, extra_properties=None):
if len(name) > 256:
raise exc.Invalid
return dict(id='123', status='ACTIVE', name=name,
properties=extra_properties)
stubs.Set(compute_api.API, 'snapshot', snapshot)
class stub_out_compute_api_backup(object):
def __init__(self, stubs):
self.stubs = stubs
self.extra_props_last_call = None
stubs.Set(compute_api.API, 'backup', self.backup)
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
self.extra_props_last_call = extra_properties
props = dict(backup_type=backup_type,
rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'
if not publics:
publics = ['1.2.3.4']
class Fake(object):
def __init__(self, skip_policy_check=False):
pass
def get_instance_nw_info(*args, **kwargs):
pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
def validate_networks(self, context, networks, max_count):
return max_count
def create_pci_requests_for_sriov_ports(self, context,
system_metadata,
requested_networks):
pass
if cls is None:
cls = Fake
stubs.Set(network_api, 'API', cls)
fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@staticmethod
def blank(*args, **kwargs):
kwargs['base_url'] = 'http://localhost/v2'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
out.api_version_request = api_version.APIVersionRequest(version)
return out
class HTTPRequestV21(os_wsgi.Request):
@staticmethod
def blank(*args, **kwargs):
kwargs['base_url'] = 'http://localhost/v3'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
out = os_wsgi.Request.blank(*args, **kwargs)
out.api_version_request = api_version.APIVersionRequest(version)
out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
is_admin=use_admin_context)
return out
class TestRouter(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class TestRouterV21(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.ResourceV21(controller))
super(TestRouterV21, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
class FakeRateLimiter(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
return self.application
def create_info_cache(nw_cache):
if nw_cache is None:
pub0 = ('192.168.1.100',)
pub1 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub1]}]}}]
if not isinstance(nw_cache, six.string_types):
nw_cache = jsonutils.dumps(nw_cache)
return {
"info_cache": {
"network_info": nw_cache,
"deleted": False,
"created_at": None,
"deleted_at": None,
"updated_at": None,
}
}
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
def fake_instance_get(**kwargs):
def _return_server(context, uuid, columns_to_join=None, use_slave=False):
return stub_instance(1, **kwargs)
return _return_server
def fake_compute_get(**kwargs):
def _return_server_obj(context, uuid, want_objects=False,
expected_attrs=None):
return stub_instance_obj(context, **kwargs)
return _return_server_obj
def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
def _return_servers(context, *args, **kwargs):
servers_list = []
marker = None
limit = None
found_marker = False
if "marker" in kwargs:
marker = kwargs["marker"]
if "limit" in kwargs:
limit = kwargs["limit"]
if 'columns_to_join' in kwargs:
kwargs.pop('columns_to_join')
if 'use_slave' in kwargs:
kwargs.pop('use_slave')
if 'sort_keys' in kwargs:
kwargs.pop('sort_keys')
if 'sort_dirs' in kwargs:
kwargs.pop('sort_dirs')
for i in range(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
if marker is not None and uuid == marker:
found_marker = True
servers_list = []
if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
def fake_compute_get_all(num_servers=5, **kwargs):
def _return_servers_objs(context, search_opts=None, limit=None,
marker=None, want_objects=False,
expected_attrs=None, sort_keys=None,
sort_dirs=None):
db_insts = fake_instance_get_all_by_filters()(None,
limit=limit,
marker=marker)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
return base.obj_make_list(context, objects.InstanceList(),
objects.Instance, db_insts,
expected_attrs=expected)
return _return_servers_objs
def stub_instance(id=1, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
limit=None, marker=None,
launched_at=timeutils.utcnow(),
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
instance_type=None, launch_index=0, kernel_id="",
ramdisk_id="", user_data=None):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
project_id = 'fake_project'
if metadata:
metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
metadata = []
inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
sys_meta = flavors.save_flavor_info({}, inst_type)
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
if security_groups is None:
security_groups = [{"id": 1, "name": "test", "description": "Foo:",
"project_id": "project", "user_id": "user",
"created_at": None, "updated_at": None,
"deleted_at": None, "deleted": False}]
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
info_cache = create_info_cache(nw_cache)
if instance_type is None:
instance_type = flavors.get_default_flavor()
flavorinfo = jsonutils.dumps({
'cur': instance_type.obj_to_primitive(),
'old': None,
'new': None,
})
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
"deleted": None,
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": kernel_id,
"ramdisk_id": ramdisk_id,
"launch_index": launch_index,
"key_name": key_name,
"key_data": key_data,
"config_drive": config_drive,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"power_state": power_state,
"memory_mb": memory_mb,
"vcpus": vcpus,
"root_gb": root_gb,
"ephemeral_gb": ephemeral_gb,
"ephemeral_key_uuid": None,
"hostname": display_name or server_name,
"host": host,
"node": node,
"instance_type_id": 1,
"instance_type": inst_type,
"user_data": user_data,
"reservation_id": reservation_id,
"mac_address": "",
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": "",
"locked": locked_by is not None,
"locked_by": locked_by,
"metadata": metadata,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"shutdown_terminate": True,
"disable_terminate": False,
"security_groups": security_groups,
"root_device_name": root_device_name,
"system_metadata": utils.dict_to_metadata(sys_meta),
"pci_devices": [],
"vm_mode": "",
"default_swap_device": "",
"default_ephemeral_device": "",
"launched_on": "",
"cell_name": "",
"architecture": "",
"os_type": "",
"extra": {"numa_topology": None,
"pci_requests": None,
"flavor": flavorinfo,
},
"cleaned": cleaned}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
return instance
def stub_instance_obj(ctxt, *args, **kwargs):
db_inst = stub_instance(*args, **kwargs)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
inst = objects.Instance._from_db_object(ctxt, objects.Instance(),
db_inst,
expected_attrs=expected)
inst.fault = None
return inst
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_notfound(self, context, volume_id):
raise exc.VolumeNotFound(volume_id=volume_id)
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_check_attach(self, context, *args, **param):
pass
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': timeutils.utcnow(),
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)
def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
return {'snapshot': {'id': 100, 'volumeId': volume_id}}
def stub_snapshot_delete(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
return stub_snapshot(snapshot_id)
def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'volume_id2', 'instance_uuid': instance_uuid})]
def fake_get_available_languages():
existing_translations = ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US']
return existing_translations
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
| true
| true
|
79086f3324c1f07e8fa8af6bd70c4d211628eab7
| 1,160
|
py
|
Python
|
hooks/post_gen_project.py
|
glyg/cookiecutter-flask
|
3aef56e4169308b52dc405e2d475ad3357b93f3c
|
[
"MIT"
] | 1
|
2016-12-06T03:10:09.000Z
|
2016-12-06T03:10:09.000Z
|
hooks/post_gen_project.py
|
glyg/cookiecutter-flask
|
3aef56e4169308b52dc405e2d475ad3357b93f3c
|
[
"MIT"
] | 146
|
2021-03-18T07:07:52.000Z
|
2022-03-25T22:07:02.000Z
|
hooks/post_gen_project.py
|
moxuanchen/cookiecutter-flask
|
649818848bf401e1a1e5e4397c4eb19e0f801812
|
[
"MIT"
] | null | null | null |
"""Post gen hook to ensure that the generated project
has only one package management, either pipenv or pip."""
import logging
import os
import shutil
import sys
_logger = logging.getLogger()
def clean_extra_package_management_files():
"""Removes either requirements files and folder or the Pipfile."""
use_pipenv = "{{cookiecutter.use_pipenv}}"
use_heroku = "{{cookiecutter.use_heroku}}"
to_delete = []
if use_pipenv == "yes":
to_delete = to_delete + ["requirements.txt", "requirements"]
else:
to_delete.append("Pipfile")
if use_heroku == "no":
to_delete = to_delete + ["Procfile", "app.json"]
try:
for file_or_dir in to_delete:
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
shutil.rmtree(file_or_dir)
shutil.copy(".env.example", ".env")
open("dev.db", 'a').close()
except OSError as e:
_logger.warning("While attempting to remove file(s) an error occurred")
_logger.warning(f"Error: {e}")
sys.exit(1)
if __name__ == "__main__":
clean_extra_package_management_files()
| 28.292683
| 79
| 0.637931
|
import logging
import os
import shutil
import sys
_logger = logging.getLogger()
def clean_extra_package_management_files():
use_pipenv = "{{cookiecutter.use_pipenv}}"
use_heroku = "{{cookiecutter.use_heroku}}"
to_delete = []
if use_pipenv == "yes":
to_delete = to_delete + ["requirements.txt", "requirements"]
else:
to_delete.append("Pipfile")
if use_heroku == "no":
to_delete = to_delete + ["Procfile", "app.json"]
try:
for file_or_dir in to_delete:
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
shutil.rmtree(file_or_dir)
shutil.copy(".env.example", ".env")
open("dev.db", 'a').close()
except OSError as e:
_logger.warning("While attempting to remove file(s) an error occurred")
_logger.warning(f"Error: {e}")
sys.exit(1)
if __name__ == "__main__":
clean_extra_package_management_files()
| true
| true
|
79087125bbd37efa9befe18eb7b00bad23b84183
| 1,577
|
py
|
Python
|
DuoratChar/data/convert.py
|
WDZRMPCBIT/chase
|
c23906b109282f7f5ec529279af35964b7c77744
|
[
"MIT"
] | null | null | null |
DuoratChar/data/convert.py
|
WDZRMPCBIT/chase
|
c23906b109282f7f5ec529279af35964b7c77744
|
[
"MIT"
] | null | null | null |
DuoratChar/data/convert.py
|
WDZRMPCBIT/chase
|
c23906b109282f7f5ec529279af35964b7c77744
|
[
"MIT"
] | null | null | null |
import json
import os
import argparse
def main(split):
with open(args.data_path + '/' + split + '.json') as f:
data = json.load(f)
sparc = []
for i in range(len(data)):
d = data[i]
for j in range(len(d['interaction'])):
turn = d['interaction'][j]
sparc.append({})
sparc[-1]['interaction_id'] = i + 1
sparc[-1]['turn_id'] = j + 1
sparc[-1]['db_id'] = d['database_id']
sparc[-1]['query'] = turn['query']
sparc[-1]['question'] = turn['utterance'].replace('“', '\"').replace(
'”', '\"').replace('‘', '\"').replace('’', '\"') + '>>>'
sparc[-1]['query_toks_no_value'] = turn['query_toks_no_value']
sparc[-1]['question_toks'] = turn['utterance_toks']
if j:
sparc[-1]['question'] = sparc[-1]['question'] + \
sparc[-2]['question']
sparc[-1]['sql'] = turn['sql']
sparc[-1]['question'] = sparc[-1]['question'].replace('*', '')
sparc[-1]['question_toks'] = [tok.replace('*', '')
for tok in sparc[-1]['question_toks'] if tok != '*']
with open(os.path.join(args.data_path, split) + '.json', 'w') as f:
json.dump(sparc, f, ensure_ascii=False, indent=2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", '-dp', type=str)
args = parser.parse_args()
for split in ['train', 'dev']:
main(split)
print('convert done')
| 35.840909
| 94
| 0.492708
|
import json
import os
import argparse
def main(split):
with open(args.data_path + '/' + split + '.json') as f:
data = json.load(f)
sparc = []
for i in range(len(data)):
d = data[i]
for j in range(len(d['interaction'])):
turn = d['interaction'][j]
sparc.append({})
sparc[-1]['interaction_id'] = i + 1
sparc[-1]['turn_id'] = j + 1
sparc[-1]['db_id'] = d['database_id']
sparc[-1]['query'] = turn['query']
sparc[-1]['question'] = turn['utterance'].replace('“', '\"').replace(
'”', '\"').replace('‘', '\"').replace('’', '\"') + '>>>'
sparc[-1]['query_toks_no_value'] = turn['query_toks_no_value']
sparc[-1]['question_toks'] = turn['utterance_toks']
if j:
sparc[-1]['question'] = sparc[-1]['question'] + \
sparc[-2]['question']
sparc[-1]['sql'] = turn['sql']
sparc[-1]['question'] = sparc[-1]['question'].replace('*', '')
sparc[-1]['question_toks'] = [tok.replace('*', '')
for tok in sparc[-1]['question_toks'] if tok != '*']
with open(os.path.join(args.data_path, split) + '.json', 'w') as f:
json.dump(sparc, f, ensure_ascii=False, indent=2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", '-dp', type=str)
args = parser.parse_args()
for split in ['train', 'dev']:
main(split)
print('convert done')
| true
| true
|
790872f131452d532e5c7b2a86e596058a502cb9
| 1,572
|
py
|
Python
|
credit-card-fraud/src/features/build_features.py
|
samie-hash/data-science-repo
|
574ebad704e3f2ebce18f573af87cd95571b4cc9
|
[
"MIT"
] | null | null | null |
credit-card-fraud/src/features/build_features.py
|
samie-hash/data-science-repo
|
574ebad704e3f2ebce18f573af87cd95571b4cc9
|
[
"MIT"
] | null | null | null |
credit-card-fraud/src/features/build_features.py
|
samie-hash/data-science-repo
|
574ebad704e3f2ebce18f573af87cd95571b4cc9
|
[
"MIT"
] | null | null | null |
# build_features.py
# This module holds utility classes and functions that creates and manipulates input features
# This module also holds the various input transformers
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
def correlation_columns(dataset: pd.DataFrame, target_column: str, k: float=0.5):
"""
Columns that are correlated to the target point
Parameters
----------
dataset: pd.DataFrame
The pandas dataframe
target_column: str
The target column to calculate correlation against
k: float
The correlation cuttoff point; defaults to -0.5 and 0.5.
The values passed in represents the negative and positive cutofff
Returns
-------
columns: list
A list of columns that are correlated to the target column based on the cutoff point
"""
corr = np.abs(dataset.corr()[target_column])
corr_sorted = corr.sort_values(ascending=False)
columns = [col for col, value in zip(corr_sorted.index, corr_sorted.values) if value >= k and col != target_column]
return columns
class ColumnExtractor(BaseEstimator, TransformerMixin):
"""Columns Extractor based on correlation to the output label"""
def __init__(self, columns):
print(columns)
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.columns]
if __name__ == '__main__':
correlation_columns(pd.read_csv('././data/raw/creditcard.csv'), 'Class', k=0.3)
| 29.660377
| 119
| 0.692112
|
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
def correlation_columns(dataset: pd.DataFrame, target_column: str, k: float=0.5):
corr = np.abs(dataset.corr()[target_column])
corr_sorted = corr.sort_values(ascending=False)
columns = [col for col, value in zip(corr_sorted.index, corr_sorted.values) if value >= k and col != target_column]
return columns
class ColumnExtractor(BaseEstimator, TransformerMixin):
def __init__(self, columns):
print(columns)
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.columns]
if __name__ == '__main__':
correlation_columns(pd.read_csv('././data/raw/creditcard.csv'), 'Class', k=0.3)
| true
| true
|
790873705a04a5482d6eb62a668a5405a1019ab5
| 11,218
|
py
|
Python
|
docs/conf.py
|
adafruit/Adafruit_MicroPython_PCF8523
|
78f5162c5db5832d6654d99042936db265d01f26
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
docs/conf.py
|
adafruit/Adafruit_MicroPython_PCF8523
|
78f5162c5db5832d6654d99042936db265d01f26
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
docs/conf.py
|
adafruit/Adafruit_MicroPython_PCF8523
|
78f5162c5db5832d6654d99042936db265d01f26
|
[
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
#
# Adafruit PCF8523 RTC Library documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 11 21:37:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.intersphinx"]
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["adafruit_bus_device", "adafruit_register"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit PCF8523 RTC Library"
copyright = "2016, Philip Moyer"
author = "Philip Moyer"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Adafruit PCF8523 RTC Library v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
# htmlhelp_basename = 'AdafruitsPCF8523RTCLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitPCF8523RTCLibrary.tex",
"Adafruit PCF8523 RTC Library Documentation",
"Philip Moyer",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitPCF8523RTCLibrary23rtclibrary",
"Adafruit PCF8523 RTC Library Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitPCF8523RTCLibrary",
"Adafruit PCF8523 RTC Library Documentation",
author,
"AdafruitPCF8523RTCLibrary",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"CircuitPython": ("https://docs.circuitpython.org/en/latest/", None),
}
| 29.598945
| 85
| 0.700214
|
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
extensions = ["sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.intersphinx"]
templates_path = ["_templates"]
source_suffix = [".rst", ".md"]
master_doc = "index"
project = "Adafruit PCF8523 RTC Library"
copyright = "2016, Philip Moyer"
author = "Philip Moyer"
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
html_favicon = "_static/favicon.ico"
html_static_path = ["_static"]
latex_elements = {
}
latex_documents = [
(
master_doc,
"AdafruitPCF8523RTCLibrary.tex",
"Adafruit PCF8523 RTC Library Documentation",
"Philip Moyer",
"manual",
),
]
man_pages = [
(
master_doc,
"AdafruitPCF8523RTCLibrary23rtclibrary",
"Adafruit PCF8523 RTC Library Documentation",
[author],
1,
)
]
texinfo_documents = [
(
master_doc,
"AdafruitPCF8523RTCLibrary",
"Adafruit PCF8523 RTC Library Documentation",
author,
"AdafruitPCF8523RTCLibrary",
"One line description of project.",
"Miscellaneous",
),
]
#
# texinfo_no_detailmenu = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"CircuitPython": ("https://docs.circuitpython.org/en/latest/", None),
}
| true
| true
|
7908755cd17f8c5d8f139d16b941849fc55fe625
| 519
|
py
|
Python
|
examples/simple_sweep/rosen_1.py
|
zoidy/puq
|
ed70f113f7c135ee61adeebfc9bd18c541970caf
|
[
"MIT"
] | null | null | null |
examples/simple_sweep/rosen_1.py
|
zoidy/puq
|
ed70f113f7c135ee61adeebfc9bd18c541970caf
|
[
"MIT"
] | null | null | null |
examples/simple_sweep/rosen_1.py
|
zoidy/puq
|
ed70f113f7c135ee61adeebfc9bd18c541970caf
|
[
"MIT"
] | null | null | null |
from puq import *
import numpy as np
# test case with just a single point
def run():
# Declare our parameters here. Both are uniform on [-2, 2]
x = UniformParameter('x', 'x', min=-2, max=2)
y = UniformParameter('y', 'y', min=-2, max=2)
# Create a host
host = InteractiveHost()
# any of these should work
# valarray = np.array([[1],[0]])
valarray = [[-1,0,1], [0,0,0]]
uq = SimpleSweep([x,y], valarray)
prog = TestProgram('./rosen_prog.py')
return Sweep(uq, host, prog)
| 23.590909
| 62
| 0.597303
|
from puq import *
import numpy as np
def run():
x = UniformParameter('x', 'x', min=-2, max=2)
y = UniformParameter('y', 'y', min=-2, max=2)
host = InteractiveHost()
valarray = [[-1,0,1], [0,0,0]]
uq = SimpleSweep([x,y], valarray)
prog = TestProgram('./rosen_prog.py')
return Sweep(uq, host, prog)
| true
| true
|
7908758620b02ca7311d63dcbdc7574cbd5dd6ab
| 5,012
|
py
|
Python
|
src/bionev/evaluation.py
|
bhevencious/BioNEV
|
3ec46c503fb147a8fb1b017d90b0f4ba2317f8f7
|
[
"MIT"
] | 1
|
2021-05-30T05:32:10.000Z
|
2021-05-30T05:32:10.000Z
|
src/bionev/evaluation.py
|
bhevencious/BioNEV
|
3ec46c503fb147a8fb1b017d90b0f4ba2317f8f7
|
[
"MIT"
] | null | null | null |
src/bionev/evaluation.py
|
bhevencious/BioNEV
|
3ec46c503fb147a8fb1b017d90b0f4ba2317f8f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from bionev.utils import *
def LinkPrediction(embedding_look_up, original_graph, train_graph, test_pos_edges, seed):
random.seed(seed)
train_neg_edges = generate_neg_edges(original_graph, len(train_graph.edges()), seed)
# create a auxiliary graph to ensure that testing negative edges will not used in training
G_aux = copy.deepcopy(original_graph)
G_aux.add_edges_from(train_neg_edges)
test_neg_edges = generate_neg_edges(G_aux, len(test_pos_edges), seed)
# construct X_train, y_train, X_test, y_test
X_train = []
y_train = []
for edge in train_graph.edges():
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(1)
for edge in train_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(0)
X_test = []
y_test = []
for edge in test_pos_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(1)
for edge in test_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(0)
# shuffle for training and testing
c = list(zip(X_train, y_train))
random.shuffle(c)
X_train, y_train = zip(*c)
c = list(zip(X_test, y_test))
random.shuffle(c)
X_test, y_test = zip(*c)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
clf1 = LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs')
clf1.fit(X_train, y_train)
y_pred_proba = clf1.predict_proba(X_test)[:, 1]
y_pred = clf1.predict(X_test)
auc_roc = roc_auc_score(y_test, y_pred_proba)
avg_pr = average_precision_score(y_test, y_pred_proba)
precision = precision_score(y_test, y_pred, average='binary')
recall = recall_score(y_test, y_pred, average='binary')
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
mcc = matthews_corrcoef(y_test, y_pred)
top_1, top_3 = predHits(y_test, y_pred, clf1.predict(X_test), clf1.predict(X_test))
print('#' * 35 + ' Link Prediction Performance ' + '#' * 35)
print(f'AUC-ROC: {auc_roc:.3f}, AVG-PR: {avg_pr:.3f}, Precision: {precision:.3f}, Recall: {recall:.3f}, Accuracy: {accuracy:.3f}, F1: {f1:.3f}, MCC: {mcc:.3f}, Top_1: {top_1:.3f}, Top_3: {top_3:.3f}')
print('#' * 100)
return auc_roc, avg_pr, precision, recall, accuracy, f1, mcc, top_1, top_3
def NodeClassification(embedding_look_up, node_list, labels, testing_ratio, seed):
X_train, y_train, X_test, y_test = split_train_test_classify(embedding_look_up, node_list, labels,
testing_ratio=testing_ratio,seed=seed)
binarizer = MultiLabelBinarizer(sparse_output=True)
y_all = np.append(y_train, y_test)
binarizer.fit(y_all)
y_train = binarizer.transform(y_train).todense()
y_test = binarizer.transform(y_test).todense()
model = OneVsRestClassifier(LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs'))
model.fit(X_train, y_train)
y_pred_prob = model.predict_proba(X_test)
## small trick : we assume that we know how many label to predict
y_pred = get_y_pred(y_test, y_pred_prob)
accuracy = accuracy_score(y_test, y_pred)
micro_f1 = f1_score(y_test, y_pred, average="micro")
macro_f1 = f1_score(y_test, y_pred, average="macro")
print('#' * 9 + ' Node Classification Performance ' + '#' * 9)
print(f'Accuracy: {accuracy:.3f}, Micro-F1: {micro_f1:.3f}, Macro-F1: {macro_f1:.3f}')
print('#' * 50)
return accuracy, micro_f1, macro_f1
def predHits(truth, pred1, pred2, pred3):
hits_1 = 0
hits_3 = 0
pred1 = np.rint(pred1).astype(np.int32)
pred2 = np.rint(pred2).astype(np.int32)
pred3 = np.rint(pred3).astype(np.int32)
for i in range(len(truth)):
if truth[i] == pred1[i]:
hits_1 = hits_1 + 1
if (truth[i] == pred1[i]) or (truth[i] == pred2[i]) or (truth[i] == pred3[i]):
hits_3 = hits_3 + 1
top_1 = hits_1/len(truth)
top_3 = hits_3/len(truth)
return top_1, top_3
| 39.15625
| 204
| 0.67937
|
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from bionev.utils import *
def LinkPrediction(embedding_look_up, original_graph, train_graph, test_pos_edges, seed):
random.seed(seed)
train_neg_edges = generate_neg_edges(original_graph, len(train_graph.edges()), seed)
G_aux = copy.deepcopy(original_graph)
G_aux.add_edges_from(train_neg_edges)
test_neg_edges = generate_neg_edges(G_aux, len(test_pos_edges), seed)
X_train = []
y_train = []
for edge in train_graph.edges():
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(1)
for edge in train_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(0)
X_test = []
y_test = []
for edge in test_pos_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(1)
for edge in test_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(0)
c = list(zip(X_train, y_train))
random.shuffle(c)
X_train, y_train = zip(*c)
c = list(zip(X_test, y_test))
random.shuffle(c)
X_test, y_test = zip(*c)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
clf1 = LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs')
clf1.fit(X_train, y_train)
y_pred_proba = clf1.predict_proba(X_test)[:, 1]
y_pred = clf1.predict(X_test)
auc_roc = roc_auc_score(y_test, y_pred_proba)
avg_pr = average_precision_score(y_test, y_pred_proba)
precision = precision_score(y_test, y_pred, average='binary')
recall = recall_score(y_test, y_pred, average='binary')
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
mcc = matthews_corrcoef(y_test, y_pred)
top_1, top_3 = predHits(y_test, y_pred, clf1.predict(X_test), clf1.predict(X_test))
print('#' * 35 + ' Link Prediction Performance ' + '#' * 35)
print(f'AUC-ROC: {auc_roc:.3f}, AVG-PR: {avg_pr:.3f}, Precision: {precision:.3f}, Recall: {recall:.3f}, Accuracy: {accuracy:.3f}, F1: {f1:.3f}, MCC: {mcc:.3f}, Top_1: {top_1:.3f}, Top_3: {top_3:.3f}')
print('#' * 100)
return auc_roc, avg_pr, precision, recall, accuracy, f1, mcc, top_1, top_3
def NodeClassification(embedding_look_up, node_list, labels, testing_ratio, seed):
X_train, y_train, X_test, y_test = split_train_test_classify(embedding_look_up, node_list, labels,
testing_ratio=testing_ratio,seed=seed)
binarizer = MultiLabelBinarizer(sparse_output=True)
y_all = np.append(y_train, y_test)
binarizer.fit(y_all)
y_train = binarizer.transform(y_train).todense()
y_test = binarizer.transform(y_test).todense()
model = OneVsRestClassifier(LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs'))
model.fit(X_train, y_train)
y_pred_prob = model.predict_proba(X_test)
curacy_score(y_test, y_pred)
micro_f1 = f1_score(y_test, y_pred, average="micro")
macro_f1 = f1_score(y_test, y_pred, average="macro")
print('#' * 9 + ' Node Classification Performance ' + '#' * 9)
print(f'Accuracy: {accuracy:.3f}, Micro-F1: {micro_f1:.3f}, Macro-F1: {macro_f1:.3f}')
print('#' * 50)
return accuracy, micro_f1, macro_f1
def predHits(truth, pred1, pred2, pred3):
hits_1 = 0
hits_3 = 0
pred1 = np.rint(pred1).astype(np.int32)
pred2 = np.rint(pred2).astype(np.int32)
pred3 = np.rint(pred3).astype(np.int32)
for i in range(len(truth)):
if truth[i] == pred1[i]:
hits_1 = hits_1 + 1
if (truth[i] == pred1[i]) or (truth[i] == pred2[i]) or (truth[i] == pred3[i]):
hits_3 = hits_3 + 1
top_1 = hits_1/len(truth)
top_3 = hits_3/len(truth)
return top_1, top_3
| true
| true
|
79087737c1dbee45d9da6d2e19b2f69e5847f1c9
| 1,466
|
py
|
Python
|
exotica_python/src/pyexotica/publish_trajectory.py
|
LongfeiProjects/exotica
|
206b296edf9bf3b653ca3984b1449151ca17d374
|
[
"BSD-3-Clause"
] | 1
|
2019-04-12T20:26:59.000Z
|
2019-04-12T20:26:59.000Z
|
exotica_python/src/pyexotica/publish_trajectory.py
|
LongfeiProjects/exotica
|
206b296edf9bf3b653ca3984b1449151ca17d374
|
[
"BSD-3-Clause"
] | null | null | null |
exotica_python/src/pyexotica/publish_trajectory.py
|
LongfeiProjects/exotica
|
206b296edf9bf3b653ca3984b1449151ca17d374
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
from time import sleep
import matplotlib.pyplot as plt
import signal
def sigIntHandler(signal, frame):
raise KeyboardInterrupt
def publishPose(q, problem, t=0.0):
problem.getScene().Update(q, t)
problem.getScene().getSolver().publishFrames()
def publishTrajectory(traj, T, problem):
if len(traj) == 0:
print("Trajectory has zero elements")
raise
signal.signal(signal.SIGINT, sigIntHandler)
print('Playing back trajectory '+str(T)+'s')
dt = float(T)/float(len(traj))
t = 0
while True:
try:
publishPose(traj[t], problem, float(t)*dt)
sleep(dt)
t = (t+1) % len(traj)
except KeyboardInterrupt:
return False
return True
def publishTimeIndexedTrajectory(traj, Ts, problem, once=False):
if len(traj) == 0:
print("Trajectory has zero elements")
raise
signal.signal(signal.SIGINT, sigIntHandler)
print('Playing back trajectory '+str(len(Ts)) +
' states in '+str(Ts[len(Ts)-1]))
idx = 0
while True:
try:
for i in range(1, len(Ts)-1):
publishPose(traj[i], problem, Ts[i])
sleep(Ts[i]-Ts[i-1])
if once:
break
except KeyboardInterrupt:
return False
return True
def plot(solution):
print('Plotting the solution')
plt.plot(solution, '.-')
plt.show()
| 24.847458
| 64
| 0.595498
|
from __future__ import print_function
from time import sleep
import matplotlib.pyplot as plt
import signal
def sigIntHandler(signal, frame):
raise KeyboardInterrupt
def publishPose(q, problem, t=0.0):
problem.getScene().Update(q, t)
problem.getScene().getSolver().publishFrames()
def publishTrajectory(traj, T, problem):
if len(traj) == 0:
print("Trajectory has zero elements")
raise
signal.signal(signal.SIGINT, sigIntHandler)
print('Playing back trajectory '+str(T)+'s')
dt = float(T)/float(len(traj))
t = 0
while True:
try:
publishPose(traj[t], problem, float(t)*dt)
sleep(dt)
t = (t+1) % len(traj)
except KeyboardInterrupt:
return False
return True
def publishTimeIndexedTrajectory(traj, Ts, problem, once=False):
if len(traj) == 0:
print("Trajectory has zero elements")
raise
signal.signal(signal.SIGINT, sigIntHandler)
print('Playing back trajectory '+str(len(Ts)) +
' states in '+str(Ts[len(Ts)-1]))
idx = 0
while True:
try:
for i in range(1, len(Ts)-1):
publishPose(traj[i], problem, Ts[i])
sleep(Ts[i]-Ts[i-1])
if once:
break
except KeyboardInterrupt:
return False
return True
def plot(solution):
print('Plotting the solution')
plt.plot(solution, '.-')
plt.show()
| true
| true
|
7908779da07bb5ebf13361570653d8dfae22a5d9
| 7,969
|
py
|
Python
|
pointer_network.py
|
00wuweimin/jubilant-dollop
|
cc91caf8ee7aba5824abe25cbb3870299b369f91
|
[
"Apache-2.0"
] | null | null | null |
pointer_network.py
|
00wuweimin/jubilant-dollop
|
cc91caf8ee7aba5824abe25cbb3870299b369f91
|
[
"Apache-2.0"
] | null | null | null |
pointer_network.py
|
00wuweimin/jubilant-dollop
|
cc91caf8ee7aba5824abe25cbb3870299b369f91
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
train_data = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\train_data_10num.npy")
train_aim = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\train_label_10num.npy")
train_data = train_data.reshape(train_data.shape[0],10,1)
train_data = train_data.swapaxes(0, 1)
train_data = torch.from_numpy(train_data).type(torch.FloatTensor)
train_aim = torch.from_numpy(train_aim).type(torch.FloatTensor)
test_data = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\test_data_10num.npy")
test_aim = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\test_label_10num.npy")
test_data = test_data.reshape(test_data.shape[0],10,1)
test_data = test_data.swapaxes(0, 1)
test_data = torch.from_numpy(test_data).type(torch.FloatTensor)
test_aim = torch.from_numpy(test_aim).type(torch.FloatTensor)
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, batch_size, bidirectional=True):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.batch_size = batch_size
self.bidirectional = bidirectional
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=False, bidirectional=bidirectional)
def forward(self, inputs, hidden):
output, hidden = self.lstm(inputs, hidden)
return output, hidden
def init_hidden(self):
return (torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size),
torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size)) #(num_layers * num_directions, batch, hidden_size)
class AttentionDecoder(nn.Module):
def __init__(self, hidden_size, output_size, batch_size, vocab_size,seq_len):
super(AttentionDecoder, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.batch_size = batch_size
self.seq_len = seq_len
self.vocab_size = vocab_size
self.attn = nn.Linear(hidden_size + output_size + vocab_size, 1)
self.lstm = nn.LSTM(hidden_size + vocab_size, output_size)
self.final = nn.Linear(output_size, vocab_size)
def init_hidden(self):
return (torch.zeros(1, self.batch_size, self.output_size),
torch.zeros(1, self.batch_size, self.output_size))
def forward(self, decoder_hidden, encoder_outputs, input):
seq = 0
weights= []
i = 0
output = torch.zeros(self.batch_size, self.vocab_size)
for i in range(len(encoder_outputs)):
weights.append(self.attn(torch.cat((decoder_hidden[0][:].squeeze(0),encoder_outputs[i],output), dim=1)))
normalized_weight = F.softmax(torch.cat(weights, 1), 1)
normalized_weights = normalized_weight
attn_applied = torch.bmm(normalized_weight.unsqueeze(1),
encoder_outputs.transpose(0,1))
input_lstm = torch.cat((attn_applied.transpose(0,1)[0], output),
dim=1) # if we are using embedding, use embedding of input here instead
output_, hidden = self.lstm(input_lstm.unsqueeze(0), decoder_hidden)
output = self.final(output_[0]) #output 为(vocab_size, output_size)
#output = self.final2(output)
# hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
# hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
# decoder_hidden = (hidden0, hidden1)
# decoder_hiddens = decoder_hidden
out = F.softmax(output,1)
return out
seq_len = 10
input_size = 1
hidden_size = 2
batch_size = train_data.shape[1]
bidirectional = True
output_size = hidden_size * (1 + bidirectional)
vocal_size = 10
input = []
for i in range(10):
m = np.ones((10000,10))*i
input.append(m)
input = np.array(input)
input = torch.from_numpy(input).type(torch.FloatTensor)
class pointer_atten(nn.Module):
def __init__(self):
super(pointer_atten, self).__init__()
self.layer1 = Encoder(input_size = input_size,
hidden_size = hidden_size,
batch_size = batch_size,
bidirectional=True)
self.layer2 = AttentionDecoder(
hidden_size = hidden_size * (1 + bidirectional),
output_size = output_size,
batch_size = batch_size,
vocab_size = vocal_size,
seq_len = 1
)
def forward(self,x):
output, hidden = self.layer1.forward(x, self.layer1.init_hidden())
hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
decoder_hidden = (hidden0, hidden1)
encoder_outputs = output
last_output = self.layer2.forward(decoder_hidden, output, input)
return last_output
Net = pointer_atten()
learning_rate = 0.05
Loss = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(Net.parameters(), lr=learning_rate)
###########################################
# train
###########################################
loss_list = []
True_list = []
num_epochs = 10000
epoch = 10000
batch = train_aim.detach().numpy().size
Net.load_state_dict(torch.load('E:\\quant_research\\train the rank of ten points\\RNN_point\\net_10num\\net720.pkl'))
for epoch in range(1000):
train_data = Variable(train_data,requires_grad=True)
train_aim = Variable(train_aim,requires_grad=True)
# Forward pass
outputs = Net(train_data)
loss = Loss(outputs, train_aim)
loss_list.append(loss)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch) % 10 == 0:
print ('Epoch [{}/{}], Loss: {:.4f}'
.format(epoch+1,num_epochs,loss.item()))
is_not = outputs.detach().numpy() - train_aim.detach().numpy()
is_not = np.where(is_not < -0.1, 10, is_not)
is_not = np.where(is_not < 0.1, 1, 0)
T_pre = np.nansum(is_not)
True_rate = T_pre / batch
True_list.append(True_rate)
print('accuracy of prediction in training data:', True_rate)
if epoch % 10 ==0:
torch.save(Net.state_dict(), 'E:\\quant_research\\train the rank of ten points\\\RNN_point\\net_10num\\net{}.pkl'.format(epoch))
loss_array = np.array(loss_list)
true_array = np.array(True_list)
np.save('E:\\quant_research\\train the rank of ten points\\\RNN_point\\loss',loss_array)
np.save('E:\\quant_research\\train the rank of ten points\\\RNN_point\\true',true_array)
loss_array = np.load('E:\\quant_research\\train the rank of ten points\\\RNN_point\\loss.npy',allow_pickle=True)
true_array = np.load('E:\\quant_research\\train the rank of ten points\\\RNN_point\\true.npy')
outputs = Net(train_data)
loss = Loss(outputs, train_aim)
label = np.argmax(outputs.detach().numpy(),axis = 1)
label_aim = np.argmax(train_aim.detach().numpy(),axis = 1)
True_rate = np.sum(label == label_aim) / 10000
print('loss in testing data:%.5f,accuracy of prediction in testing data:%.5f'%(loss,True_rate))
outputs = Net(test_data)
loss = Loss(outputs, test_aim)
label = np.argmax(outputs.detach().numpy(),axis = 1)
label_aim = np.argmax(test_aim.detach().numpy(),axis = 1)
True_rate = np.sum(label == label_aim) / 10000
print('loss in training data:%.5f,accuracy of prediction in training data:%.5f'%(loss,True_rate))
| 37.589623
| 146
| 0.648638
|
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
train_data = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\train_data_10num.npy")
train_aim = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\train_label_10num.npy")
train_data = train_data.reshape(train_data.shape[0],10,1)
train_data = train_data.swapaxes(0, 1)
train_data = torch.from_numpy(train_data).type(torch.FloatTensor)
train_aim = torch.from_numpy(train_aim).type(torch.FloatTensor)
test_data = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\test_data_10num.npy")
test_aim = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\test_label_10num.npy")
test_data = test_data.reshape(test_data.shape[0],10,1)
test_data = test_data.swapaxes(0, 1)
test_data = torch.from_numpy(test_data).type(torch.FloatTensor)
test_aim = torch.from_numpy(test_aim).type(torch.FloatTensor)
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, batch_size, bidirectional=True):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.batch_size = batch_size
self.bidirectional = bidirectional
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=False, bidirectional=bidirectional)
def forward(self, inputs, hidden):
output, hidden = self.lstm(inputs, hidden)
return output, hidden
def init_hidden(self):
return (torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size),
torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size))
class AttentionDecoder(nn.Module):
def __init__(self, hidden_size, output_size, batch_size, vocab_size,seq_len):
super(AttentionDecoder, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.batch_size = batch_size
self.seq_len = seq_len
self.vocab_size = vocab_size
self.attn = nn.Linear(hidden_size + output_size + vocab_size, 1)
self.lstm = nn.LSTM(hidden_size + vocab_size, output_size)
self.final = nn.Linear(output_size, vocab_size)
def init_hidden(self):
return (torch.zeros(1, self.batch_size, self.output_size),
torch.zeros(1, self.batch_size, self.output_size))
def forward(self, decoder_hidden, encoder_outputs, input):
seq = 0
weights= []
i = 0
output = torch.zeros(self.batch_size, self.vocab_size)
for i in range(len(encoder_outputs)):
weights.append(self.attn(torch.cat((decoder_hidden[0][:].squeeze(0),encoder_outputs[i],output), dim=1)))
normalized_weight = F.softmax(torch.cat(weights, 1), 1)
normalized_weights = normalized_weight
attn_applied = torch.bmm(normalized_weight.unsqueeze(1),
encoder_outputs.transpose(0,1))
input_lstm = torch.cat((attn_applied.transpose(0,1)[0], output),
dim=1)
output_, hidden = self.lstm(input_lstm.unsqueeze(0), decoder_hidden)
output = self.final(output_[0])
out = F.softmax(output,1)
return out
seq_len = 10
input_size = 1
hidden_size = 2
batch_size = train_data.shape[1]
bidirectional = True
output_size = hidden_size * (1 + bidirectional)
vocal_size = 10
input = []
for i in range(10):
m = np.ones((10000,10))*i
input.append(m)
input = np.array(input)
input = torch.from_numpy(input).type(torch.FloatTensor)
class pointer_atten(nn.Module):
def __init__(self):
super(pointer_atten, self).__init__()
self.layer1 = Encoder(input_size = input_size,
hidden_size = hidden_size,
batch_size = batch_size,
bidirectional=True)
self.layer2 = AttentionDecoder(
hidden_size = hidden_size * (1 + bidirectional),
output_size = output_size,
batch_size = batch_size,
vocab_size = vocal_size,
seq_len = 1
)
def forward(self,x):
output, hidden = self.layer1.forward(x, self.layer1.init_hidden())
hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
decoder_hidden = (hidden0, hidden1)
encoder_outputs = output
last_output = self.layer2.forward(decoder_hidden, output, input)
return last_output
Net = pointer_atten()
learning_rate = 0.05
Loss = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(Net.parameters(), lr=learning_rate)
10000
print('loss in testing data:%.5f,accuracy of prediction in testing data:%.5f'%(loss,True_rate))
outputs = Net(test_data)
loss = Loss(outputs, test_aim)
label = np.argmax(outputs.detach().numpy(),axis = 1)
label_aim = np.argmax(test_aim.detach().numpy(),axis = 1)
True_rate = np.sum(label == label_aim) / 10000
print('loss in training data:%.5f,accuracy of prediction in training data:%.5f'%(loss,True_rate))
| true
| true
|
790877aec35bcd0a4897d4505442464175c1d3d6
| 1,654
|
py
|
Python
|
services/syslogcollector/syslogserver.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/syslogcollector/syslogserver.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/syslogcollector/syslogserver.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# Syslog server
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import logging
import time
from typing import Tuple
# NOC modules
from noc.config import config
from noc.core.perf import metrics
from noc.core.ioloop.udpserver import UDPServer
from noc.core.comp import smart_text
logger = logging.getLogger(__name__)
class SyslogServer(UDPServer):
def __init__(self, service):
super().__init__()
self.service = service
def enable_reuseport(self):
return config.syslogcollector.enable_reuseport
def enable_freebind(self):
return config.syslogcollector.enable_freebind
def on_read(self, data: bytes, address: Tuple[str, int]):
metrics["syslog_msg_in"] += 1
cfg = self.service.lookup_config(address[0])
if not cfg:
return # Invalid event source
# Convert data to valid UTF8
data = smart_text(data, errors="ignore")
# Parse priority
priority = 0
if data.startswith("<"):
idx = data.find(">")
if idx == -1:
return
try:
priority = int(data[1:idx])
except ValueError:
pass
data = data[idx + 1 :].strip()
# Get timestamp
ts = int(time.time())
#
self.service.register_message(cfg, ts, data, facility=priority >> 3, severity=priority & 7)
| 30.072727
| 99
| 0.538089
|
import logging
import time
from typing import Tuple
from noc.config import config
from noc.core.perf import metrics
from noc.core.ioloop.udpserver import UDPServer
from noc.core.comp import smart_text
logger = logging.getLogger(__name__)
class SyslogServer(UDPServer):
def __init__(self, service):
super().__init__()
self.service = service
def enable_reuseport(self):
return config.syslogcollector.enable_reuseport
def enable_freebind(self):
return config.syslogcollector.enable_freebind
def on_read(self, data: bytes, address: Tuple[str, int]):
metrics["syslog_msg_in"] += 1
cfg = self.service.lookup_config(address[0])
if not cfg:
return
data = smart_text(data, errors="ignore")
priority = 0
if data.startswith("<"):
idx = data.find(">")
if idx == -1:
return
try:
priority = int(data[1:idx])
except ValueError:
pass
data = data[idx + 1 :].strip()
ts = int(time.time())
self.service.register_message(cfg, ts, data, facility=priority >> 3, severity=priority & 7)
| true
| true
|
79087905447cd6452d3c2ca48a0be3d6d1fb92ec
| 3,152
|
py
|
Python
|
up/utils/general/cfg_helper.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 196
|
2021-10-30T05:15:36.000Z
|
2022-03-30T18:43:40.000Z
|
up/utils/general/cfg_helper.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 12
|
2021-10-30T11:33:28.000Z
|
2022-03-31T14:22:58.000Z
|
up/utils/general/cfg_helper.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 23
|
2021-11-01T07:26:17.000Z
|
2022-03-27T05:55:37.000Z
|
# Standard Library
import copy
import json
import re
from .log_helper import default_logger as logger
def format_cfg(cfg):
"""Format experiment config for friendly display"""
# json_str = json.dumps(cfg, indent=2, ensure_ascii=False)
# return json_str
def list2str(cfg):
for key, value in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if len(value) == 0 or isinstance(value[0], (int, float)):
cfg[key] = str(value)
else:
for i, item in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split("\n")
# json_str = [re.sub(r"(\"|,$|\{|\}|\[$|\s$)", "", line) for line in json_str if line.strip() not in "{}[]"]
json_str = [re.sub(r"(\"|(!\],$)|\s$)", "", line) for line in json_str]
cfg_str = "\n".join([line.rstrip() for line in json_str if line.strip()])
return cfg_str
def is_number(num):
pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$')
res = pattern.match(num)
if res:
return True
return False
def try_decode(val):
"""bool, int, float, or str"""
if val.upper() == 'FALSE':
return False
elif val.upper() == 'TRUE':
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val
def merge_opts_into_cfg(opts, cfg):
cfg = copy.deepcopy(cfg)
if opts is None or len(opts) == 0:
return cfg
assert len(opts) % 2 == 0
keys, values = opts[0::2], opts[1::2]
for key, val in zip(keys, values):
logger.info(f'replacing {key}')
val = try_decode(val)
cur_cfg = cfg
# for hooks
if '-' in key:
key_p, key_s = key.split('-')
k_module, k_type = key_p.split('.')
cur_cfg = cur_cfg[k_module]
flag_exist = False
for idx in range(len(cur_cfg)):
if cur_cfg[idx]['type'] != k_type:
continue
flag_exist = True
cur_cfg_temp = cur_cfg[idx]
key_s = key_s.split('.')
for k in key_s[:-1]:
cur_cfg_temp = cur_cfg_temp.setdefault(k, {})
cur_cfg_temp[key_s[-1]] = val
if not flag_exist:
_cur_cfg = {}
cur_cfg_temp = _cur_cfg
key_s = key_s.split('.')
for k in key_s[:-1]:
cur_cfg_temp = cur_cfg_temp.setdefault(k, {})
cur_cfg_temp[key_s[-1]] = val
cur_cfg.append(_cur_cfg)
else:
key = key.split('.')
for k in key[:-1]:
cur_cfg = cur_cfg.setdefault(k, {})
cur_cfg[key[-1]] = val
return cfg
def upgrade_cfg(cfg):
# cfg = upgrade_fp16(cfg)
return cfg
| 30.901961
| 112
| 0.510152
|
import copy
import json
import re
from .log_helper import default_logger as logger
def format_cfg(cfg):
def list2str(cfg):
for key, value in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if len(value) == 0 or isinstance(value[0], (int, float)):
cfg[key] = str(value)
else:
for i, item in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split("\n")
json_str = [re.sub(r"(\"|(!\],$)|\s$)", "", line) for line in json_str]
cfg_str = "\n".join([line.rstrip() for line in json_str if line.strip()])
return cfg_str
def is_number(num):
pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$')
res = pattern.match(num)
if res:
return True
return False
def try_decode(val):
if val.upper() == 'FALSE':
return False
elif val.upper() == 'TRUE':
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val
def merge_opts_into_cfg(opts, cfg):
cfg = copy.deepcopy(cfg)
if opts is None or len(opts) == 0:
return cfg
assert len(opts) % 2 == 0
keys, values = opts[0::2], opts[1::2]
for key, val in zip(keys, values):
logger.info(f'replacing {key}')
val = try_decode(val)
cur_cfg = cfg
if '-' in key:
key_p, key_s = key.split('-')
k_module, k_type = key_p.split('.')
cur_cfg = cur_cfg[k_module]
flag_exist = False
for idx in range(len(cur_cfg)):
if cur_cfg[idx]['type'] != k_type:
continue
flag_exist = True
cur_cfg_temp = cur_cfg[idx]
key_s = key_s.split('.')
for k in key_s[:-1]:
cur_cfg_temp = cur_cfg_temp.setdefault(k, {})
cur_cfg_temp[key_s[-1]] = val
if not flag_exist:
_cur_cfg = {}
cur_cfg_temp = _cur_cfg
key_s = key_s.split('.')
for k in key_s[:-1]:
cur_cfg_temp = cur_cfg_temp.setdefault(k, {})
cur_cfg_temp[key_s[-1]] = val
cur_cfg.append(_cur_cfg)
else:
key = key.split('.')
for k in key[:-1]:
cur_cfg = cur_cfg.setdefault(k, {})
cur_cfg[key[-1]] = val
return cfg
def upgrade_cfg(cfg):
return cfg
| true
| true
|
79087986bb0bf55f7733843a920fc0e4c0796ec8
| 26
|
py
|
Python
|
asolut/__init__.py
|
Marios-Mamalis/asolut
|
36bc7f7893b7596d7e7a734dd3013046f9e0c700
|
[
"MIT"
] | null | null | null |
asolut/__init__.py
|
Marios-Mamalis/asolut
|
36bc7f7893b7596d7e7a734dd3013046f9e0c700
|
[
"MIT"
] | null | null | null |
asolut/__init__.py
|
Marios-Mamalis/asolut
|
36bc7f7893b7596d7e7a734dd3013046f9e0c700
|
[
"MIT"
] | null | null | null |
from asolut.main import *
| 13
| 25
| 0.769231
|
from asolut.main import *
| true
| true
|
79087a3b0c4d93b0290d269b6cb4e54c678a9b13
| 484
|
py
|
Python
|
plotly/validators/scattergl/marker/line/_cmax.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/scattergl/marker/line/_cmax.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/scattergl/marker/line/_cmax.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='cmax',
parent_name='scattergl.marker.line',
**kwargs
):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
implied_edits={'cauto': False},
role='info',
**kwargs
)
| 24.2
| 66
| 0.584711
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='cmax',
parent_name='scattergl.marker.line',
**kwargs
):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
implied_edits={'cauto': False},
role='info',
**kwargs
)
| true
| true
|
79087ac5f529145040e99aec0cded231ca8fe9d1
| 234
|
py
|
Python
|
universal_notifications/__init__.py
|
ArabellaTech/universal_notifications
|
dc84f412961c1bb5aeecbfb100855cc2db8045b4
|
[
"MIT"
] | 15
|
2017-05-03T06:23:38.000Z
|
2018-11-02T18:16:32.000Z
|
universal_notifications/__init__.py
|
ArabellaTech/universal_notifications
|
dc84f412961c1bb5aeecbfb100855cc2db8045b4
|
[
"MIT"
] | 36
|
2016-12-18T15:07:23.000Z
|
2018-11-13T11:54:51.000Z
|
universal_notifications/__init__.py
|
HealthByRo/universal_notifications
|
dc84f412961c1bb5aeecbfb100855cc2db8045b4
|
[
"MIT"
] | 3
|
2020-01-08T05:42:46.000Z
|
2021-01-27T11:23:47.000Z
|
# -*- coding: utf-8 -*-
__title__ = "Universal Notifications"
__version__ = "1.5.0"
__author__ = "Pawel Krzyzaniak"
__license__ = "MIT"
__copyright__ = "Copyright 2017-2018 Arabella; 2018+ Ro"
# Version synonym
VERSION = __version__
| 23.4
| 56
| 0.726496
|
__title__ = "Universal Notifications"
__version__ = "1.5.0"
__author__ = "Pawel Krzyzaniak"
__license__ = "MIT"
__copyright__ = "Copyright 2017-2018 Arabella; 2018+ Ro"
VERSION = __version__
| true
| true
|
79087b023c629fe3bd8d12233d6cbe8af8a4211c
| 279
|
py
|
Python
|
hdbo/febo/solvers/__init__.py
|
eric-vader/HD-BO-Additive-Models
|
0d7e1d46194af2e3d402631caec6e7be9a50376a
|
[
"MIT"
] | 5
|
2021-03-25T02:58:01.000Z
|
2022-02-19T12:58:52.000Z
|
hdbo/febo/solvers/__init__.py
|
eric-vader/HD-BO-Additive-Models
|
0d7e1d46194af2e3d402631caec6e7be9a50376a
|
[
"MIT"
] | null | null | null |
hdbo/febo/solvers/__init__.py
|
eric-vader/HD-BO-Additive-Models
|
0d7e1d46194af2e3d402631caec6e7be9a50376a
|
[
"MIT"
] | 1
|
2020-12-27T07:58:46.000Z
|
2020-12-27T07:58:46.000Z
|
"""
Optimizers
----------
.. autosummary::
:template: template.rst
:toctree:
Solver
ScipySolver
CandidateSolver
GridSolver
"""
from .solver import Solver
from .scipy import ScipySolver
from .candidate import CandidateSolver, GridSolver, FiniteDomainSolver
| 13.95
| 70
| 0.713262
|
from .solver import Solver
from .scipy import ScipySolver
from .candidate import CandidateSolver, GridSolver, FiniteDomainSolver
| true
| true
|
79087b8f74439ac7eadad97494f545910570500a
| 9,576
|
py
|
Python
|
controllers/dvi.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | 1
|
2019-08-20T16:32:33.000Z
|
2019-08-20T16:32:33.000Z
|
controllers/dvi.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | null | null | null |
controllers/dvi.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Disaster Victim Identification, Controllers
@author: nursix
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def s3_menu_postp():
# @todo: rewrite this for new framework
menu_selected = []
body_id = s3mgr.get_session("dvi", "body")
if body_id:
body = s3db.dvi_body
query = (body.id == body_id)
record = db(query).select(body.id, body.pe_label,
limitby=(0,1)).first()
if record:
label = record.pe_label
response.menu_options[-3][-1].append(
[T("Candidate Matches for Body %s" % label),
False, URL(f="person",
vars=dict(match=record.id))]
)
menu_selected.append(
["%s: %s" % (T("Body"), label),
False, URL(f="body", args=[record.id])]
)
person_id = s3mgr.get_session("pr", "person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = s3db.pr_person_represent(record.id)
menu_selected.append(
["%s: %s" % (T("Person"), name),
False, URL(f="person", args=[record.id])]
)
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
try:
module_name = settings.modules[module].name_nice
except:
module_name = T("Disaster Victim Identification")
table = s3db.dvi_body
total = db(table.deleted == False).count()
itable = s3db.dvi_identification
query = (table.deleted == False) & \
(itable.pe_id == table.pe_id) & \
(itable.deleted == False) & \
(itable.status == 3)
identified = db(query).count()
status = [[str(T("identified")), int(identified)],
[str(T("unidentified")), int(total-identified)]]
response.title = module_name
return dict(module_name=module_name,
total=total,
status=json.dumps(status))
# -----------------------------------------------------------------------------
def recreq():
""" Recovery Requests List """
table = s3db.dvi_recreq
table.person_id.default = s3_logged_in_person()
def prep(r):
if r.interactive and not r.record:
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def morgue():
""" Morgue Registry """
morgue_tabs = [(T("Morgue Details"), ""),
(T("Bodies"), "body")]
rheader = S3ResourceHeader([
[(T("Morgue"), "name")]
], tabs=morgue_tabs)
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive and r.id and not r.component:
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def body():
""" Dead Bodies Registry """
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T("unknown")
btable = s3db.dvi_body
itable = s3db.dvi_identification
status = request.get_vars.get("status", None)
if status == "unidentified":
query = (itable.deleted == False) & \
(itable.status == 3)
ids = db(query).select(itable.pe_id)
ids = [i.pe_id for i in ids]
if ids:
s3.filter = (~(btable.pe_id.belongs(ids)))
s3db.configure("dvi_body", main="pe_label", extra="gender")
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T("Recovery"), ""),
(T("Checklist"), "checklist"),
(T("Images"), "image"),
(T("Physical Description"), "physical_description"),
(T("Effects Inventory"), "effects"),
(T("Journal"), "note"),
(T("Identification"), "identification")]
rheader = S3ResourceHeader([
[(T("ID Tag Number"), "pe_label")],
["gender"],
["age_group"],
], tabs=dvi_tabs)
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def person():
""" Missing Persons Registry (Match Finder) """
table = s3db.pr_person
s3.crud_strings["pr_person"].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons found"),
msg_no_match = T("No Persons currently reported missing"))
s3db.configure("pr_group_membership",
list_fields=["id",
"group_id",
"group_head",
"description"
])
s3db.configure("pr_person",
listadd=False,
editable=False,
deletable=False,
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group"
])
def prep(r):
if not r.id and not r.method and not r.component:
body_id = r.get_vars.get("match", None)
body = db(db.dvi_body.id == body_id).select(
db.dvi_body.pe_label, limitby=(0, 1)).first()
label = body and body.pe_label or "#%s" % body_id
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings["pr_person"].update(
#subtitle_list = T("Candidate Matches for Body %s" % label),
msg_no_match = T("No matching records found"))
return True
s3.prep = prep
field = table.missing
field.readable = False
field.writable = False
field.default = True
table.age_group.readable = True
table.age_group.writable = True
# Show only missing persons in list views
if len(request.args) == 0:
s3.filter = (db.pr_person.missing == True)
mpr_tabs = [
(T("Missing Report"), "missing_report"),
(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note"),
]
rheader = lambda r: s3db.pr_rheader(r, tabs=mpr_tabs)
output = s3_rest_controller("pr", "person",
main="first_name",
extra="last_name",
rheader=rheader)
return output
# -------------------------------------------------------------------------
def dvi_match_query(body_id):
"""
Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID
"""
ptable = s3db.pr_person
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((ptable.deleted == False) &
(ptable.missing == True) &
(ntable.pe_id == ptable.pe_id) &
(ntable.status == 1))
body = btable[body_id]
if not body:
return query
# last seen should be before date of recovery
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) |
(ntable.timestmp == None))
query = query & q
# age group should match
if body.age_group and body.age_group != 1:
q = ((ptable.age_group == None) |
(ptable.age_group == 1) |
(ptable.age_group == body.age_group))
query = query & q
# gender should match
if body.gender and body.gender != 1:
q = ((ptable.gender == None) |
(ptable.gender == 1) |
(ptable.gender == body.gender))
return query
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax Tooltips """
formfield = request.vars.get("formfield", None)
if formfield:
response.view = "pr/ajaxtips/%s.html" % formfield
return dict()
# END =========================================================================
| 32.571429
| 80
| 0.487573
|
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
def s3_menu_postp():
menu_selected = []
body_id = s3mgr.get_session("dvi", "body")
if body_id:
body = s3db.dvi_body
query = (body.id == body_id)
record = db(query).select(body.id, body.pe_label,
limitby=(0,1)).first()
if record:
label = record.pe_label
response.menu_options[-3][-1].append(
[T("Candidate Matches for Body %s" % label),
False, URL(f="person",
vars=dict(match=record.id))]
)
menu_selected.append(
["%s: %s" % (T("Body"), label),
False, URL(f="body", args=[record.id])]
)
person_id = s3mgr.get_session("pr", "person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = s3db.pr_person_represent(record.id)
menu_selected.append(
["%s: %s" % (T("Person"), name),
False, URL(f="person", args=[record.id])]
)
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
def index():
try:
module_name = settings.modules[module].name_nice
except:
module_name = T("Disaster Victim Identification")
table = s3db.dvi_body
total = db(table.deleted == False).count()
itable = s3db.dvi_identification
query = (table.deleted == False) & \
(itable.pe_id == table.pe_id) & \
(itable.deleted == False) & \
(itable.status == 3)
identified = db(query).count()
status = [[str(T("identified")), int(identified)],
[str(T("unidentified")), int(total-identified)]]
response.title = module_name
return dict(module_name=module_name,
total=total,
status=json.dumps(status))
def recreq():
table = s3db.dvi_recreq
table.person_id.default = s3_logged_in_person()
def prep(r):
if r.interactive and not r.record:
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
output = s3_rest_controller()
return output
def morgue():
morgue_tabs = [(T("Morgue Details"), ""),
(T("Bodies"), "body")]
rheader = S3ResourceHeader([
[(T("Morgue"), "name")]
], tabs=morgue_tabs)
def prep(r):
s3db.gis_location_filter(r)
if r.interactive and r.id and not r.component:
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
output = s3_rest_controller(rheader=rheader)
return output
def body():
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T("unknown")
btable = s3db.dvi_body
itable = s3db.dvi_identification
status = request.get_vars.get("status", None)
if status == "unidentified":
query = (itable.deleted == False) & \
(itable.status == 3)
ids = db(query).select(itable.pe_id)
ids = [i.pe_id for i in ids]
if ids:
s3.filter = (~(btable.pe_id.belongs(ids)))
s3db.configure("dvi_body", main="pe_label", extra="gender")
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T("Recovery"), ""),
(T("Checklist"), "checklist"),
(T("Images"), "image"),
(T("Physical Description"), "physical_description"),
(T("Effects Inventory"), "effects"),
(T("Journal"), "note"),
(T("Identification"), "identification")]
rheader = S3ResourceHeader([
[(T("ID Tag Number"), "pe_label")],
["gender"],
["age_group"],
], tabs=dvi_tabs)
output = s3_rest_controller(rheader=rheader)
return output
def person():
table = s3db.pr_person
s3.crud_strings["pr_person"].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons found"),
msg_no_match = T("No Persons currently reported missing"))
s3db.configure("pr_group_membership",
list_fields=["id",
"group_id",
"group_head",
"description"
])
s3db.configure("pr_person",
listadd=False,
editable=False,
deletable=False,
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group"
])
def prep(r):
if not r.id and not r.method and not r.component:
body_id = r.get_vars.get("match", None)
body = db(db.dvi_body.id == body_id).select(
db.dvi_body.pe_label, limitby=(0, 1)).first()
label = body and body.pe_label or "#%s" % body_id
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings["pr_person"].update(
msg_no_match = T("No matching records found"))
return True
s3.prep = prep
field = table.missing
field.readable = False
field.writable = False
field.default = True
table.age_group.readable = True
table.age_group.writable = True
if len(request.args) == 0:
s3.filter = (db.pr_person.missing == True)
mpr_tabs = [
(T("Missing Report"), "missing_report"),
(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note"),
]
rheader = lambda r: s3db.pr_rheader(r, tabs=mpr_tabs)
output = s3_rest_controller("pr", "person",
main="first_name",
extra="last_name",
rheader=rheader)
return output
def dvi_match_query(body_id):
ptable = s3db.pr_person
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((ptable.deleted == False) &
(ptable.missing == True) &
(ntable.pe_id == ptable.pe_id) &
(ntable.status == 1))
body = btable[body_id]
if not body:
return query
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) |
(ntable.timestmp == None))
query = query & q
if body.age_group and body.age_group != 1:
q = ((ptable.age_group == None) |
(ptable.age_group == 1) |
(ptable.age_group == body.age_group))
query = query & q
if body.gender and body.gender != 1:
q = ((ptable.gender == None) |
(ptable.gender == 1) |
(ptable.gender == body.gender))
return query
def tooltip():
formfield = request.vars.get("formfield", None)
if formfield:
response.view = "pr/ajaxtips/%s.html" % formfield
return dict()
| true
| true
|
79087bb1eb257ab337d645a8213c370d32478ede
| 8,037
|
py
|
Python
|
tests/support/test_collection.py
|
fenestron/lorator
|
1966f095a67ae65e816b1c8f7359b9f203cd5c4f
|
[
"MIT"
] | null | null | null |
tests/support/test_collection.py
|
fenestron/lorator
|
1966f095a67ae65e816b1c8f7359b9f203cd5c4f
|
[
"MIT"
] | null | null | null |
tests/support/test_collection.py
|
fenestron/lorator
|
1966f095a67ae65e816b1c8f7359b9f203cd5c4f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .. import OratorTestCase
from lorator.support.collection import Collection
class CollectionTestCase(OratorTestCase):
def test_first_returns_first_item_in_collection(self):
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.first())
def test_last_returns_last_item_in_collection(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c.last())
def test_pop_removes_and_returns_last_item_or_specified_index(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c.pop())
self.assertEqual("foo", c.last())
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.pop(0))
self.assertEqual("bar", c.first())
def test_shift_removes_and_returns_first_item(self):
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.shift())
self.assertEqual("bar", c.first())
def test_empty_collection_is_empty(self):
c = Collection()
c2 = Collection([])
self.assertTrue(c.is_empty())
self.assertTrue(c2.is_empty())
def test_collection_is_constructed(self):
c = Collection("foo")
self.assertEqual(["foo"], c.all())
c = Collection(2)
self.assertEqual([2], c.all())
c = Collection(False)
self.assertEqual([False], c.all())
c = Collection(None)
self.assertEqual([], c.all())
c = Collection()
self.assertEqual([], c.all())
def test_offset_access(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c[1])
c[1] = "baz"
self.assertEqual("baz", c[1])
del c[0]
self.assertEqual("baz", c[0])
def test_forget(self):
c = Collection(["foo", "bar", "boom"])
c.forget(0)
self.assertEqual("bar", c[0])
c.forget(0, 1)
self.assertTrue(c.is_empty())
def test_get_avg_items_from_collection(self):
c = Collection([{"foo": 10}, {"foo": 20}])
self.assertEqual(15, c.avg("foo"))
c = Collection([1, 2, 3, 4, 5])
self.assertEqual(3, c.avg())
c = Collection()
self.assertIsNone(c.avg())
def test_collapse(self):
obj1 = object()
obj2 = object()
c = Collection([[obj1], [obj2]])
self.assertEqual([obj1, obj2], c.collapse().all())
def test_collapse_with_nested_collection(self):
c = Collection([Collection([1, 2, 3]), Collection([4, 5, 6])])
self.assertEqual([1, 2, 3, 4, 5, 6], c.collapse().all())
def test_contains(self):
c = Collection([1, 3, 5])
self.assertTrue(c.contains(1))
self.assertFalse(c.contains(2))
self.assertTrue(c.contains(lambda x: x < 5))
self.assertFalse(c.contains(lambda x: x > 5))
self.assertIn(3, c)
c = Collection([{"v": 1}, {"v": 3}, {"v": 5}])
self.assertTrue(c.contains("v", 1))
self.assertFalse(c.contains("v", 2))
obj1 = type("lamdbaobject", (object,), {})()
obj1.v = 1
obj2 = type("lamdbaobject", (object,), {})()
obj2.v = 3
obj3 = type("lamdbaobject", (object,), {})()
obj3.v = 5
c = Collection([{"v": 1}, {"v": 3}, {"v": 5}])
self.assertTrue(c.contains("v", 1))
self.assertFalse(c.contains("v", 2))
def test_countable(self):
c = Collection(["foo", "bar"])
self.assertEqual(2, c.count())
self.assertEqual(2, len(c))
def test_diff(self):
c = Collection(["foo", "bar"])
self.assertEqual(["foo"], c.diff(Collection(["bar", "baz"])).all())
def test_each(self):
original = ["foo", "bar", "baz"]
c = Collection(original)
result = []
c.each(lambda x: result.append(x))
self.assertEqual(result, original)
self.assertEqual(original, c.all())
def test_every(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 3, 5], c.every(2).all())
self.assertEqual([2, 4, 6], c.every(2, 1).all())
def test_filter(self):
c = Collection([{"id": 1, "name": "hello"}, {"id": 2, "name": "world"}])
self.assertEqual(
[{"id": 2, "name": "world"}], c.filter(lambda item: item["id"] == 2).all()
)
c = Collection(["", "hello", "", "world"])
self.assertEqual(["hello", "world"], c.filter().all())
def test_where(self):
c = Collection([{"v": 1}, {"v": 3}, {"v": 2}, {"v": 3}, {"v": 4}])
self.assertEqual([{"v": 3}, {"v": 3}], c.where("v", 3).all())
def test_implode(self):
obj1 = type("lamdbaobject", (object,), {})()
obj1.name = "john"
obj1.email = "foo"
c = Collection(
[{"name": "john", "email": "foo"}, {"name": "jane", "email": "bar"}]
)
self.assertEqual("foobar", c.implode("email"))
self.assertEqual("foo,bar", c.implode("email", ","))
c = Collection(["foo", "bar"])
self.assertEqual("foobar", c.implode(""))
self.assertEqual("foo,bar", c.implode(","))
def test_lists(self):
obj1 = type("lamdbaobject", (object,), {})()
obj1.name = "john"
obj1.email = "foo"
c = Collection([obj1, {"name": "jane", "email": "bar"}])
self.assertEqual({"john": "foo", "jane": "bar"}, c.lists("email", "name"))
self.assertEqual(["foo", "bar"], c.pluck("email").all())
def test_map(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([3, 4, 5, 6, 7], c.map(lambda x: x + 2).all())
def test_merge(self):
c = Collection([1, 2, 3])
c.merge([4, 5, 6])
self.assertEqual([1, 2, 3, 4, 5, 6], c.all())
c = Collection(Collection([1, 2, 3]))
c.merge([4, 5, 6])
self.assertEqual([1, 2, 3, 4, 5, 6], c.all())
def test_for_page(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([4, 5, 6], c.for_page(2, 3).all())
self.assertEqual([5, 6], c.for_page(2, 4).all())
def test_prepend(self):
c = Collection([4, 5, 6])
c.prepend(3)
self.assertEqual([3, 4, 5, 6], c.all())
def test_append(self):
c = Collection([3, 4, 5])
c.append(6)
self.assertEqual([3, 4, 5, 6], c.all())
def test_pull(self):
c = Collection([1, 2, 3, 4])
c.pull(2)
self.assertEqual([1, 2, 4], c.all())
def test_put(self):
c = Collection([1, 2, 4])
c.put(2, 3)
self.assertEqual([1, 2, 3], c.all())
def test_reject(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 2, 3], c.reject(lambda x: x > 3).all())
def test_reverse(self):
c = Collection([1, 2, 3, 4])
self.assertEqual([4, 3, 2, 1], c.reverse().all())
def test_sort(self):
c = Collection([5, 3, 1, 2, 4])
sorted = c.sort(lambda x: x)
self.assertEqual([1, 2, 3, 4, 5], sorted.all())
def test_take(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 2, 3], c.take(3).all())
self.assertEqual([4, 5, 6], c.take(-3).all())
def test_transform(self):
c = Collection([1, 2, 3, 4])
c.transform(lambda x: x + 2)
self.assertEqual([3, 4, 5, 6], c.all())
def test_zip(self):
c = Collection([1, 2, 3])
self.assertEqual([(1, 4), (2, 5), (3, 6)], c.zip([4, 5, 6]).all())
def test_only(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([2, 4], c.only(1, 3).all())
def test_without(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([1, 3, 5], c.without(1, 3).all())
self.assertEqual([1, 2, 3, 4, 5], c.all())
def test_flatten(self):
c = Collection({"foo": [5, 6], "bar": 7, "baz": {"boom": [1, 2, 3, 4]}})
self.assertEqual([1, 2, 3, 4, 5, 6, 7], c.flatten().sort().all())
c = Collection([1, [2, 3], 4])
self.assertEqual([1, 2, 3, 4], c.flatten().all())
| 31.030888
| 86
| 0.520343
|
from .. import OratorTestCase
from lorator.support.collection import Collection
class CollectionTestCase(OratorTestCase):
def test_first_returns_first_item_in_collection(self):
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.first())
def test_last_returns_last_item_in_collection(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c.last())
def test_pop_removes_and_returns_last_item_or_specified_index(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c.pop())
self.assertEqual("foo", c.last())
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.pop(0))
self.assertEqual("bar", c.first())
def test_shift_removes_and_returns_first_item(self):
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.shift())
self.assertEqual("bar", c.first())
def test_empty_collection_is_empty(self):
c = Collection()
c2 = Collection([])
self.assertTrue(c.is_empty())
self.assertTrue(c2.is_empty())
def test_collection_is_constructed(self):
c = Collection("foo")
self.assertEqual(["foo"], c.all())
c = Collection(2)
self.assertEqual([2], c.all())
c = Collection(False)
self.assertEqual([False], c.all())
c = Collection(None)
self.assertEqual([], c.all())
c = Collection()
self.assertEqual([], c.all())
def test_offset_access(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c[1])
c[1] = "baz"
self.assertEqual("baz", c[1])
del c[0]
self.assertEqual("baz", c[0])
def test_forget(self):
c = Collection(["foo", "bar", "boom"])
c.forget(0)
self.assertEqual("bar", c[0])
c.forget(0, 1)
self.assertTrue(c.is_empty())
def test_get_avg_items_from_collection(self):
c = Collection([{"foo": 10}, {"foo": 20}])
self.assertEqual(15, c.avg("foo"))
c = Collection([1, 2, 3, 4, 5])
self.assertEqual(3, c.avg())
c = Collection()
self.assertIsNone(c.avg())
def test_collapse(self):
obj1 = object()
obj2 = object()
c = Collection([[obj1], [obj2]])
self.assertEqual([obj1, obj2], c.collapse().all())
def test_collapse_with_nested_collection(self):
c = Collection([Collection([1, 2, 3]), Collection([4, 5, 6])])
self.assertEqual([1, 2, 3, 4, 5, 6], c.collapse().all())
def test_contains(self):
c = Collection([1, 3, 5])
self.assertTrue(c.contains(1))
self.assertFalse(c.contains(2))
self.assertTrue(c.contains(lambda x: x < 5))
self.assertFalse(c.contains(lambda x: x > 5))
self.assertIn(3, c)
c = Collection([{"v": 1}, {"v": 3}, {"v": 5}])
self.assertTrue(c.contains("v", 1))
self.assertFalse(c.contains("v", 2))
obj1 = type("lamdbaobject", (object,), {})()
obj1.v = 1
obj2 = type("lamdbaobject", (object,), {})()
obj2.v = 3
obj3 = type("lamdbaobject", (object,), {})()
obj3.v = 5
c = Collection([{"v": 1}, {"v": 3}, {"v": 5}])
self.assertTrue(c.contains("v", 1))
self.assertFalse(c.contains("v", 2))
def test_countable(self):
c = Collection(["foo", "bar"])
self.assertEqual(2, c.count())
self.assertEqual(2, len(c))
def test_diff(self):
c = Collection(["foo", "bar"])
self.assertEqual(["foo"], c.diff(Collection(["bar", "baz"])).all())
def test_each(self):
original = ["foo", "bar", "baz"]
c = Collection(original)
result = []
c.each(lambda x: result.append(x))
self.assertEqual(result, original)
self.assertEqual(original, c.all())
def test_every(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 3, 5], c.every(2).all())
self.assertEqual([2, 4, 6], c.every(2, 1).all())
def test_filter(self):
c = Collection([{"id": 1, "name": "hello"}, {"id": 2, "name": "world"}])
self.assertEqual(
[{"id": 2, "name": "world"}], c.filter(lambda item: item["id"] == 2).all()
)
c = Collection(["", "hello", "", "world"])
self.assertEqual(["hello", "world"], c.filter().all())
def test_where(self):
c = Collection([{"v": 1}, {"v": 3}, {"v": 2}, {"v": 3}, {"v": 4}])
self.assertEqual([{"v": 3}, {"v": 3}], c.where("v", 3).all())
def test_implode(self):
obj1 = type("lamdbaobject", (object,), {})()
obj1.name = "john"
obj1.email = "foo"
c = Collection(
[{"name": "john", "email": "foo"}, {"name": "jane", "email": "bar"}]
)
self.assertEqual("foobar", c.implode("email"))
self.assertEqual("foo,bar", c.implode("email", ","))
c = Collection(["foo", "bar"])
self.assertEqual("foobar", c.implode(""))
self.assertEqual("foo,bar", c.implode(","))
def test_lists(self):
obj1 = type("lamdbaobject", (object,), {})()
obj1.name = "john"
obj1.email = "foo"
c = Collection([obj1, {"name": "jane", "email": "bar"}])
self.assertEqual({"john": "foo", "jane": "bar"}, c.lists("email", "name"))
self.assertEqual(["foo", "bar"], c.pluck("email").all())
def test_map(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([3, 4, 5, 6, 7], c.map(lambda x: x + 2).all())
def test_merge(self):
c = Collection([1, 2, 3])
c.merge([4, 5, 6])
self.assertEqual([1, 2, 3, 4, 5, 6], c.all())
c = Collection(Collection([1, 2, 3]))
c.merge([4, 5, 6])
self.assertEqual([1, 2, 3, 4, 5, 6], c.all())
def test_for_page(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([4, 5, 6], c.for_page(2, 3).all())
self.assertEqual([5, 6], c.for_page(2, 4).all())
def test_prepend(self):
c = Collection([4, 5, 6])
c.prepend(3)
self.assertEqual([3, 4, 5, 6], c.all())
def test_append(self):
c = Collection([3, 4, 5])
c.append(6)
self.assertEqual([3, 4, 5, 6], c.all())
def test_pull(self):
c = Collection([1, 2, 3, 4])
c.pull(2)
self.assertEqual([1, 2, 4], c.all())
def test_put(self):
c = Collection([1, 2, 4])
c.put(2, 3)
self.assertEqual([1, 2, 3], c.all())
def test_reject(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 2, 3], c.reject(lambda x: x > 3).all())
def test_reverse(self):
c = Collection([1, 2, 3, 4])
self.assertEqual([4, 3, 2, 1], c.reverse().all())
def test_sort(self):
c = Collection([5, 3, 1, 2, 4])
sorted = c.sort(lambda x: x)
self.assertEqual([1, 2, 3, 4, 5], sorted.all())
def test_take(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 2, 3], c.take(3).all())
self.assertEqual([4, 5, 6], c.take(-3).all())
def test_transform(self):
c = Collection([1, 2, 3, 4])
c.transform(lambda x: x + 2)
self.assertEqual([3, 4, 5, 6], c.all())
def test_zip(self):
c = Collection([1, 2, 3])
self.assertEqual([(1, 4), (2, 5), (3, 6)], c.zip([4, 5, 6]).all())
def test_only(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([2, 4], c.only(1, 3).all())
def test_without(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([1, 3, 5], c.without(1, 3).all())
self.assertEqual([1, 2, 3, 4, 5], c.all())
def test_flatten(self):
c = Collection({"foo": [5, 6], "bar": 7, "baz": {"boom": [1, 2, 3, 4]}})
self.assertEqual([1, 2, 3, 4, 5, 6, 7], c.flatten().sort().all())
c = Collection([1, [2, 3], 4])
self.assertEqual([1, 2, 3, 4], c.flatten().all())
| true
| true
|
79087c01aa289bbd0ea27566b97b960f8cd46143
| 66
|
py
|
Python
|
Term 5/Vue/p.py
|
theseana/fempfasb
|
bb6776f9ea00225d4653097f746b866de8ffac1b
|
[
"MIT"
] | 1
|
2022-01-16T00:31:01.000Z
|
2022-01-16T00:31:01.000Z
|
Term 5/Vue/p.py
|
theseana/fempfasb
|
bb6776f9ea00225d4653097f746b866de8ffac1b
|
[
"MIT"
] | null | null | null |
Term 5/Vue/p.py
|
theseana/fempfasb
|
bb6776f9ea00225d4653097f746b866de8ffac1b
|
[
"MIT"
] | null | null | null |
todos = ['barber', 'grocery']
for todo in todos:
print(todo)
| 16.5
| 30
| 0.621212
|
todos = ['barber', 'grocery']
for todo in todos:
print(todo)
| true
| true
|
79087d50bf1f82f1ab98d792904d760d1d4f670b
| 415
|
py
|
Python
|
packages/setup.py
|
TheWorldOfCode/MWS
|
7a8b7dbc88e80880f0fa9bfa895a624ab11f4093
|
[
"BSD-3-Clause"
] | null | null | null |
packages/setup.py
|
TheWorldOfCode/MWS
|
7a8b7dbc88e80880f0fa9bfa895a624ab11f4093
|
[
"BSD-3-Clause"
] | null | null | null |
packages/setup.py
|
TheWorldOfCode/MWS
|
7a8b7dbc88e80880f0fa9bfa895a624ab11f4093
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
setup(name='mws',
version='0.2',
description='Multi window sender',
url='https://github.com/TheWorldOfCode/MWS',
author='TheWorldOfCode',
author_email='dannj75@gmail.com',
install_requires=[
"python-daemon>=2.2.4",
"python-xlib>=0.27"
],
license='BSD',
packages=['mws'],
zip_safe=False)
| 24.411765
| 50
| 0.561446
|
from setuptools import setup
setup(name='mws',
version='0.2',
description='Multi window sender',
url='https://github.com/TheWorldOfCode/MWS',
author='TheWorldOfCode',
author_email='dannj75@gmail.com',
install_requires=[
"python-daemon>=2.2.4",
"python-xlib>=0.27"
],
license='BSD',
packages=['mws'],
zip_safe=False)
| true
| true
|
79087d5ebdcbd6a611a5a6f3ef36637a0d631622
| 28,709
|
py
|
Python
|
latex2sympy.py
|
ConsoleBit/latex2sympy
|
88eb1e0a42beb62c41601c9d5154253c3e712a8d
|
[
"MIT"
] | null | null | null |
latex2sympy.py
|
ConsoleBit/latex2sympy
|
88eb1e0a42beb62c41601c9d5154253c3e712a8d
|
[
"MIT"
] | null | null | null |
latex2sympy.py
|
ConsoleBit/latex2sympy
|
88eb1e0a42beb62c41601c9d5154253c3e712a8d
|
[
"MIT"
] | null | null | null |
import sympy
import antlr4
from antlr4.error.ErrorListener import ErrorListener
from sympy.core.operations import AssocOp
try:
from gen.PSParser import PSParser
from gen.PSLexer import PSLexer
from gen.PSListener import PSListener
except Exception:
from .gen.PSParser import PSParser
from .gen.PSLexer import PSLexer
from .gen.PSListener import PSListener
from sympy.printing.str import StrPrinter
from sympy.parsing.sympy_parser import parse_expr
import hashlib
VARIABLE_VALUES = {}
def process_sympy(sympy, variable_values={}):
# variable values
global VARIABLE_VALUES
if len(variable_values) > 0:
VARIABLE_VALUES = variable_values
else:
VARIABLE_VALUES = {}
# setup listener
matherror = MathErrorListener(sympy)
# stream input
stream = antlr4.InputStream(sympy)
lex = PSLexer(stream)
lex.removeErrorListeners()
lex.addErrorListener(matherror)
tokens = antlr4.CommonTokenStream(lex)
parser = PSParser(tokens)
# remove default console error listener
parser.removeErrorListeners()
parser.addErrorListener(matherror)
# process the input
return_data = None
math = parser.math()
# if a list
if math.relation_list():
return_data = []
# go over list items
relation_list = math.relation_list().relation_list_content()
for list_item in relation_list.relation():
expr = convert_relation(list_item)
return_data.append(expr)
# if not, do default
else:
relation = math.relation()
return_data = convert_relation(relation)
return return_data
class MathErrorListener(ErrorListener):
def __init__(self, src):
super(ErrorListener, self).__init__()
self.src = src
def syntaxError(self, recog, symbol, line, col, msg, e):
fmt = "%s\n%s\n%s"
marker = "~" * col + "^"
if msg.startswith("missing"):
err = fmt % (msg, self.src, marker)
elif msg.startswith("no viable"):
err = fmt % ("I expected something else here", self.src, marker)
elif msg.startswith("mismatched"):
names = PSParser.literalNames
expected = [names[i] for i in e.getExpectedTokens() if i < len(names)]
if len(expected) < 10:
expected = " ".join(expected)
err = (fmt % ("I expected one of these: " + expected,
self.src, marker))
else:
err = (fmt % ("I expected something else here", self.src, marker))
else:
err = fmt % ("I don't understand this", self.src, marker)
raise Exception(err)
def convert_relation(rel):
if rel.expr():
return convert_expr(rel.expr())
lh = convert_relation(rel.relation(0))
rh = convert_relation(rel.relation(1))
if rel.LT():
return sympy.StrictLessThan(lh, rh, evaluate=False)
elif rel.LTE():
return sympy.LessThan(lh, rh, evaluate=False)
elif rel.GT():
return sympy.StrictGreaterThan(lh, rh, evaluate=False)
elif rel.GTE():
return sympy.GreaterThan(lh, rh, evaluate=False)
elif rel.EQUAL():
return sympy.Eq(lh, rh, evaluate=False)
elif rel.UNEQUAL():
return sympy.Ne(lh, rh, evaluate=False)
def convert_expr(expr):
if expr.additive():
return convert_add(expr.additive())
def convert_matrix(matrix):
# build matrix
row = matrix.matrix_row()
tmp = []
rows = 0
for r in row:
tmp.append([])
for expr in r.expr():
tmp[rows].append(convert_expr(expr))
rows = rows + 1
# return the matrix
return sympy.Matrix(tmp)
def add_flat(lh, rh):
if hasattr(lh, 'is_Add') and lh.is_Add or hasattr(rh, 'is_Add') and rh.is_Add:
args = []
if hasattr(lh, 'is_Add') and lh.is_Add:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_Add') and rh.is_Add:
args = args + list(rh.args)
else:
args += [rh]
return sympy.Add(*args, evaluate=False)
else:
return sympy.Add(lh, rh, evaluate=False)
def mat_add_flat(lh, rh):
if hasattr(lh, 'is_MatAdd') and lh.is_MatAdd or hasattr(rh, 'is_MatAdd') and rh.is_MatAdd:
args = []
if hasattr(lh, 'is_MatAdd') and lh.is_MatAdd:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_MatAdd') and rh.is_MatAdd:
args = args + list(rh.args)
else:
args += [rh]
return sympy.MatAdd(*args, evaluate=False)
else:
return sympy.MatAdd(lh, rh, evaluate=False)
def mul_flat(lh, rh):
if hasattr(lh, 'is_Mul') and lh.is_Mul or hasattr(rh, 'is_Mul') and rh.is_Mul:
args = []
if hasattr(lh, 'is_Mul') and lh.is_Mul:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_Mul') and rh.is_Mul:
args = args + list(rh.args)
else:
args += [rh]
return sympy.Mul(*args, evaluate=False)
else:
return sympy.Mul(lh, rh, evaluate=False)
def mat_mul_flat(lh, rh):
if hasattr(lh, 'is_MatMul') and lh.is_MatMul or hasattr(rh, 'is_MatMul') and rh.is_MatMul:
args = []
if hasattr(lh, 'is_MatMul') and lh.is_MatMul:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_MatMul') and rh.is_MatMul:
args = args + list(rh.args)
else:
args += [rh]
return sympy.MatMul(*args, evaluate=False)
else:
return sympy.MatMul(lh, rh, evaluate=False)
def convert_add(add):
if add.ADD():
lh = convert_add(add.additive(0))
rh = convert_add(add.additive(1))
if lh.is_Matrix or rh.is_Matrix:
return mat_add_flat(lh, rh)
else:
return add_flat(lh, rh)
elif add.SUB():
lh = convert_add(add.additive(0))
rh = convert_add(add.additive(1))
if lh.is_Matrix or rh.is_Matrix:
return mat_add_flat(lh, mat_mul_flat(-1, rh))
else:
# If we want to force ordering for variables this should be:
# return Sub(lh, rh, evaluate=False)
if not rh.is_Matrix and rh.func.is_Number:
rh = -rh
else:
rh = mul_flat(-1, rh)
return add_flat(lh, rh)
else:
return convert_mp(add.mp())
def convert_mp(mp):
if hasattr(mp, 'mp'):
mp_left = mp.mp(0)
mp_right = mp.mp(1)
else:
mp_left = mp.mp_nofunc(0)
mp_right = mp.mp_nofunc(1)
if mp.MUL() or mp.CMD_TIMES() or mp.CMD_CDOT():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if lh.is_Matrix or rh.is_Matrix:
return mat_mul_flat(lh, rh)
else:
return mul_flat(lh, rh)
elif mp.DIV() or mp.CMD_DIV() or mp.COLON():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if lh.is_Matrix or rh.is_Matrix:
return sympy.MatMul(lh, sympy.Pow(rh, -1, evaluate=False), evaluate=False)
else:
return sympy.Mul(lh, sympy.Pow(rh, -1, evaluate=False), evaluate=False)
elif mp.CMD_MOD():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if rh.is_Matrix:
raise Exception("Cannot perform modulo operation with a matrix as an operand")
else:
return sympy.Mod(lh, rh, evaluate=False)
else:
if hasattr(mp, 'unary'):
return convert_unary(mp.unary())
else:
return convert_unary(mp.unary_nofunc())
def convert_unary(unary):
if hasattr(unary, 'unary'):
nested_unary = unary.unary()
else:
nested_unary = unary.unary_nofunc()
if hasattr(unary, 'postfix_nofunc'):
first = unary.postfix()
tail = unary.postfix_nofunc()
postfix = [first] + tail
else:
postfix = unary.postfix()
if unary.ADD():
return convert_unary(nested_unary)
elif unary.SUB():
tmp_convert_nested_unary = convert_unary(nested_unary)
if tmp_convert_nested_unary.is_Matrix:
return mat_mul_flat(-1, tmp_convert_nested_unary, evaluate=False)
else:
if tmp_convert_nested_unary.func.is_Number:
return -tmp_convert_nested_unary
else:
return mul_flat(-1, tmp_convert_nested_unary)
elif postfix:
return convert_postfix_list(postfix)
def convert_postfix_list(arr, i=0):
if i >= len(arr):
raise Exception("Index out of bounds")
res = convert_postfix(arr[i])
if isinstance(res, sympy.Expr) or isinstance(res, sympy.Matrix) or res is sympy.S.EmptySet:
if i == len(arr) - 1:
return res # nothing to multiply by
else:
# multiply by next
rh = convert_postfix_list(arr, i + 1)
if res.is_Matrix or rh.is_Matrix:
return mat_mul_flat(res, rh)
else:
return mul_flat(res, rh)
else: # must be derivative
wrt = res[0]
if i == len(arr) - 1:
raise Exception("Expected expression for derivative")
else:
expr = convert_postfix_list(arr, i + 1)
return sympy.Derivative(expr, wrt)
def do_subs(expr, at):
if at.expr():
at_expr = convert_expr(at.expr())
syms = at_expr.atoms(sympy.Symbol)
if len(syms) == 0:
return expr
elif len(syms) > 0:
sym = next(iter(syms))
return expr.subs(sym, at_expr)
elif at.equality():
lh = convert_expr(at.equality().expr(0))
rh = convert_expr(at.equality().expr(1))
return expr.subs(lh, rh)
def convert_postfix(postfix):
if hasattr(postfix, 'exp'):
exp_nested = postfix.exp()
else:
exp_nested = postfix.exp_nofunc()
exp = convert_exp(exp_nested)
for op in postfix.postfix_op():
if op.BANG():
if isinstance(exp, list):
raise Exception("Cannot apply postfix to derivative")
exp = sympy.factorial(exp, evaluate=False)
elif op.eval_at():
ev = op.eval_at()
at_b = None
at_a = None
if ev.eval_at_sup():
at_b = do_subs(exp, ev.eval_at_sup())
if ev.eval_at_sub():
at_a = do_subs(exp, ev.eval_at_sub())
if at_b is not None and at_a is not None:
exp = add_flat(at_b, mul_flat(at_a, -1))
elif at_b is not None:
exp = at_b
elif at_a is not None:
exp = at_a
return exp
def convert_exp(exp):
if hasattr(exp, 'exp'):
exp_nested = exp.exp()
else:
exp_nested = exp.exp_nofunc()
if exp_nested:
base = convert_exp(exp_nested)
if isinstance(base, list):
raise Exception("Cannot raise derivative to power")
if exp.atom():
exponent = convert_atom(exp.atom())
elif exp.expr():
exponent = convert_expr(exp.expr())
return sympy.Pow(base, exponent, evaluate=False)
else:
if hasattr(exp, 'comp'):
return convert_comp(exp.comp())
else:
return convert_comp(exp.comp_nofunc())
def convert_comp(comp):
if comp.group():
return convert_expr(comp.group().expr())
elif comp.abs_group():
return sympy.Abs(convert_expr(comp.abs_group().expr()), evaluate=False)
elif comp.floor_group():
return handle_floor(convert_expr(comp.floor_group().expr()))
elif comp.ceil_group():
return handle_ceil(convert_expr(comp.ceil_group().expr()))
elif comp.atom():
return convert_atom(comp.atom())
elif comp.frac():
return convert_frac(comp.frac())
elif comp.binom():
return convert_binom(comp.binom())
elif comp.matrix():
return convert_matrix(comp.matrix())
elif comp.func():
return convert_func(comp.func())
def convert_atom(atom):
if atom.LETTER_NO_E():
subscriptName = ''
s = atom.LETTER_NO_E().getText()
if s == "I":
return sympy.I
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = '_{' + StrPrinter().doprint(subscript) + '}'
return sympy.Symbol(atom.LETTER_NO_E().getText() + subscriptName, real=True)
elif atom.GREEK_LETTER():
s = atom.GREEK_LETTER().getText()[1:]
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = StrPrinter().doprint(subscript)
s += '_{' + subscriptName + '}'
return sympy.Symbol(s, real=True)
elif atom.accent():
# get name for accent
name = atom.accent().start.text[1:]
# exception: check if bar or overline which are treated both as bar
if name in ["bar", "overline"]:
name = "bar"
# get the base (variable)
base = atom.accent().base.getText()
# set string to base+name
s = base + name
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = StrPrinter().doprint(subscript)
s += '_{' + subscriptName + '}'
return sympy.Symbol(s, real=True)
elif atom.SYMBOL():
s = atom.SYMBOL().getText().replace("\\$", "").replace("\\%", "")
if s == "\\infty":
return sympy.oo
elif s == '\\pi':
return sympy.pi
elif s == '\\emptyset':
return sympy.S.EmptySet
else:
raise Exception("Unrecognized symbol")
elif atom.NUMBER():
s = atom.NUMBER().getText().replace(",", "")
try:
sr = sympy.Rational(s)
return sr
except (TypeError, ValueError):
return sympy.Number(s)
elif atom.E_NOTATION():
s = atom.E_NOTATION().getText().replace(",", "")
try:
sr = sympy.Rational(s)
return sr
except (TypeError, ValueError):
return sympy.Number(s)
elif atom.DIFFERENTIAL():
var = get_differential_var(atom.DIFFERENTIAL())
return sympy.Symbol('d' + var.name, real=True)
elif atom.mathit():
text = rule2text(atom.mathit().mathit_text())
return sympy.Symbol(text, real=True)
elif atom.VARIABLE():
text = atom.VARIABLE().getText()
is_percent = text.endswith("\\%")
trim_amount = 3 if is_percent else 1
name = text[10:]
name = name[0:len(name) - trim_amount]
# add hash to distinguish from regular symbols
# hash = hashlib.md5(name.encode()).hexdigest()
# symbol_name = name + hash
symbol_name = name
# replace the variable for already known variable values
if name in VARIABLE_VALUES:
# if a sympy class
if isinstance(VARIABLE_VALUES[name], tuple(sympy.core.all_classes)):
symbol = VARIABLE_VALUES[name]
# if NOT a sympy class
else:
symbol = parse_expr(str(VARIABLE_VALUES[name]))
else:
symbol = sympy.Symbol(symbol_name, real=True)
if is_percent:
return sympy.Mul(symbol, sympy.Pow(100, -1, evaluate=False), evaluate=False)
# return the symbol
return symbol
elif atom.PERCENT_NUMBER():
text = atom.PERCENT_NUMBER().getText().replace("\\%", "").replace(",", "")
try:
number = sympy.Rational(text)
except (TypeError, ValueError):
number = sympy.Number(text)
percent = sympy.Rational(number, 100)
return percent
def rule2text(ctx):
stream = ctx.start.getInputStream()
# starting index of starting token
startIdx = ctx.start.start
# stopping index of stopping token
stopIdx = ctx.stop.stop
return stream.getText(startIdx, stopIdx)
def convert_frac(frac):
diff_op = False
partial_op = False
lower_itv = frac.lower.getSourceInterval()
lower_itv_len = lower_itv[1] - lower_itv[0] + 1
if (frac.lower.start == frac.lower.stop and
frac.lower.start.type == PSLexer.DIFFERENTIAL):
wrt = get_differential_var_str(frac.lower.start.text)
diff_op = True
elif (lower_itv_len == 2 and
frac.lower.start.type == PSLexer.SYMBOL and
frac.lower.start.text == '\\partial' and
(frac.lower.stop.type == PSLexer.LETTER_NO_E or frac.lower.stop.type == PSLexer.SYMBOL)):
partial_op = True
wrt = frac.lower.stop.text
if frac.lower.stop.type == PSLexer.SYMBOL:
wrt = wrt[1:]
if diff_op or partial_op:
wrt = sympy.Symbol(wrt, real=True)
if (diff_op and frac.upper.start == frac.upper.stop and
frac.upper.start.type == PSLexer.LETTER_NO_E and
frac.upper.start.text == 'd'):
return [wrt]
elif (partial_op and frac.upper.start == frac.upper.stop and
frac.upper.start.type == PSLexer.SYMBOL and
frac.upper.start.text == '\\partial'):
return [wrt]
upper_text = rule2text(frac.upper)
expr_top = None
if diff_op and upper_text.startswith('d'):
expr_top = process_sympy(upper_text[1:])
elif partial_op and frac.upper.start.text == '\\partial':
expr_top = process_sympy(upper_text[len('\\partial'):])
if expr_top:
return sympy.Derivative(expr_top, wrt)
expr_top = convert_expr(frac.upper)
expr_bot = convert_expr(frac.lower)
if expr_top.is_Matrix or expr_bot.is_Matrix:
return sympy.MatMul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)
else:
return sympy.Mul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)
def convert_binom(binom):
expr_top = convert_expr(binom.upper)
expr_bot = convert_expr(binom.lower)
return sympy.binomial(expr_top, expr_bot)
def convert_func(func):
if func.func_normal_single_arg():
if func.L_PAREN(): # function called with parenthesis
arg = convert_func_arg(func.func_single_arg())
else:
arg = convert_func_arg(func.func_single_arg_noparens())
name = func.func_normal_single_arg().start.text[1:]
# change arc<trig> -> a<trig>
if name in ["arcsin", "arccos", "arctan", "arccsc", "arcsec",
"arccot"]:
name = "a" + name[3:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name in ["arsinh", "arcosh", "artanh"]:
name = "a" + name[2:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name in ["arcsinh", "arccosh", "arctanh"]:
name = "a" + name[3:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name == "operatorname":
operatorname = func.func_normal_single_arg().func_operator_name.getText()
if operatorname in ["arsinh", "arcosh", "artanh"]:
operatorname = "a" + operatorname[2:]
expr = getattr(sympy.functions, operatorname)(arg, evaluate=False)
elif operatorname in ["arcsinh", "arccosh", "arctanh"]:
operatorname = "a" + operatorname[3:]
expr = getattr(sympy.functions, operatorname)(arg, evaluate=False)
elif operatorname == "floor":
expr = handle_floor(arg)
elif operatorname == "ceil":
expr = handle_ceil(arg)
elif name in ["log", "ln"]:
if func.subexpr():
if func.subexpr().atom():
base = convert_atom(func.subexpr().atom())
else:
base = convert_expr(func.subexpr().expr())
elif name == "log":
base = 10
elif name == "ln":
base = sympy.E
expr = sympy.log(arg, base, evaluate=False)
elif name in ["exp", "exponentialE"]:
expr = sympy.exp(arg)
elif name == "floor":
expr = handle_floor(arg)
elif name == "ceil":
expr = handle_ceil(arg)
func_pow = None
should_pow = True
if func.supexpr():
if func.supexpr().expr():
func_pow = convert_expr(func.supexpr().expr())
else:
func_pow = convert_atom(func.supexpr().atom())
if name in ["sin", "cos", "tan", "csc", "sec", "cot", "sinh", "cosh", "tanh"]:
if func_pow == -1:
name = "a" + name
should_pow = False
expr = getattr(sympy.functions, name)(arg, evaluate=False)
if func_pow and should_pow:
expr = sympy.Pow(expr, func_pow, evaluate=False)
return expr
elif func.func_normal_multi_arg():
if func.L_PAREN(): # function called with parenthesis
args = func.func_multi_arg().getText().split(",")
else:
args = func.func_multi_arg_noparens().split(",")
args = list(map(lambda arg: process_sympy(arg, VARIABLE_VALUES), args))
name = func.func_normal_multi_arg().start.text[1:]
if name == "operatorname":
operatorname = func.func_normal_multi_arg().func_operator_name.getText()
if operatorname in ["gcd", "lcm"]:
expr = handle_gcd_lcm(operatorname, args)
elif name in ["gcd", "lcm"]:
expr = handle_gcd_lcm(name, args)
elif name in ["max", "min"]:
name = name[0].upper() + name[1:]
expr = getattr(sympy.functions, name)(*args, evaluate=False)
func_pow = None
should_pow = True
if func.supexpr():
if func.supexpr().expr():
func_pow = convert_expr(func.supexpr().expr())
else:
func_pow = convert_atom(func.supexpr().atom())
if func_pow and should_pow:
expr = sympy.Pow(expr, func_pow, evaluate=False)
return expr
# elif func.LETTER_NO_E() or func.SYMBOL():
# print('LETTER_NO_E or symbol')
# if func.LETTER_NO_E():
# fname = func.LETTER_NO_E().getText()
# elif func.SYMBOL():
# fname = func.SYMBOL().getText()[1:]
# fname = str(fname) # can't be unicode
# if func.subexpr():
# subscript = None
# if func.subexpr().expr(): # subscript is expr
# subscript = convert_expr(func.subexpr().expr())
# else: # subscript is atom
# subscript = convert_atom(func.subexpr().atom())
# subscriptName = StrPrinter().doprint(subscript)
# fname += '_{' + subscriptName + '}'
# input_args = func.args()
# output_args = []
# while input_args.args(): # handle multiple arguments to function
# output_args.append(convert_expr(input_args.expr()))
# input_args = input_args.args()
# output_args.append(convert_expr(input_args.expr()))
# return sympy.Function(fname)(*output_args)
elif func.FUNC_INT():
return handle_integral(func)
elif func.FUNC_SQRT():
expr = convert_expr(func.base)
if func.root:
r = convert_expr(func.root)
return sympy.Pow(expr, 1 / r, evaluate=False)
else:
return sympy.Pow(expr, sympy.S.Half, evaluate=False)
elif func.FUNC_SUM():
return handle_sum_or_prod(func, "summation")
elif func.FUNC_PROD():
return handle_sum_or_prod(func, "product")
elif func.FUNC_LIM():
return handle_limit(func)
elif func.EXP_E():
return handle_exp(func)
def convert_func_arg(arg):
if hasattr(arg, 'expr'):
return convert_expr(arg.expr())
else:
return convert_mp(arg.mp_nofunc())
def handle_integral(func):
if func.additive():
integrand = convert_add(func.additive())
elif func.frac():
integrand = convert_frac(func.frac())
else:
integrand = 1
int_var = None
if func.DIFFERENTIAL():
int_var = get_differential_var(func.DIFFERENTIAL())
else:
for sym in integrand.atoms(sympy.Symbol):
s = str(sym)
if len(s) > 1 and s[0] == 'd':
if s[1] == '\\':
int_var = sympy.Symbol(s[2:], real=True)
else:
int_var = sympy.Symbol(s[1:], real=True)
int_sym = sym
if int_var:
integrand = integrand.subs(int_sym, 1)
else:
# Assume dx by default
int_var = sympy.Symbol('x', real=True)
if func.subexpr():
if func.subexpr().atom():
lower = convert_atom(func.subexpr().atom())
else:
lower = convert_expr(func.subexpr().expr())
if func.supexpr().atom():
upper = convert_atom(func.supexpr().atom())
else:
upper = convert_expr(func.supexpr().expr())
return sympy.Integral(integrand, (int_var, lower, upper))
else:
return sympy.Integral(integrand, int_var)
def handle_sum_or_prod(func, name):
val = convert_mp(func.mp())
iter_var = convert_expr(func.subeq().equality().expr(0))
start = convert_expr(func.subeq().equality().expr(1))
if func.supexpr().expr(): # ^{expr}
end = convert_expr(func.supexpr().expr())
else: # ^atom
end = convert_atom(func.supexpr().atom())
if name == "summation":
return sympy.Sum(val, (iter_var, start, end))
elif name == "product":
return sympy.Product(val, (iter_var, start, end))
def handle_limit(func):
sub = func.limit_sub()
if sub.LETTER_NO_E():
var = sympy.Symbol(sub.LETTER_NO_E().getText(), real=True)
elif sub.GREEK_LETTER():
var = sympy.Symbol(sub.GREEK_LETTER().getText()[1:], real=True)
else:
var = sympy.Symbol('x', real=True)
if sub.SUB():
direction = "-"
else:
direction = "+"
approaching = convert_expr(sub.expr())
content = convert_mp(func.mp())
return sympy.Limit(content, var, approaching, direction)
def handle_exp(func):
if func.supexpr():
if func.supexpr().expr(): # ^{expr}
exp_arg = convert_expr(func.supexpr().expr())
else: # ^atom
exp_arg = convert_atom(func.supexpr().atom())
else:
exp_arg = 1
return sympy.exp(exp_arg)
def handle_gcd_lcm(f, args):
"""
Return the result of gcd() or lcm(), as UnevaluatedExpr
f: str - name of function ("gcd" or "lcm")
args: List[Expr] - list of function arguments
"""
args = tuple(map(sympy.nsimplify, args))
# gcd() and lcm() don't support evaluate=False
return sympy.UnevaluatedExpr(getattr(sympy, f)(args))
def handle_floor(expr):
"""
Apply floor() then return the floored expression.
expr: Expr - sympy expression as an argument to floor()
"""
return sympy.functions.floor(expr, evaluate=False)
def handle_ceil(expr):
"""
Apply ceil() then return the ceil-ed expression.
expr: Expr - sympy expression as an argument to ceil()
"""
return sympy.functions.ceiling(expr, evaluate=False)
def get_differential_var(d):
text = get_differential_var_str(d.getText())
return sympy.Symbol(text, real=True)
def get_differential_var_str(text):
for i in range(1, len(text)):
c = text[i]
if not (c == " " or c == "\r" or c == "\n" or c == "\t"):
idx = i
break
text = text[idx:]
if text[0] == "\\":
text = text[1:]
return text
| 33.15127
| 99
| 0.571633
|
import sympy
import antlr4
from antlr4.error.ErrorListener import ErrorListener
from sympy.core.operations import AssocOp
try:
from gen.PSParser import PSParser
from gen.PSLexer import PSLexer
from gen.PSListener import PSListener
except Exception:
from .gen.PSParser import PSParser
from .gen.PSLexer import PSLexer
from .gen.PSListener import PSListener
from sympy.printing.str import StrPrinter
from sympy.parsing.sympy_parser import parse_expr
import hashlib
VARIABLE_VALUES = {}
def process_sympy(sympy, variable_values={}):
global VARIABLE_VALUES
if len(variable_values) > 0:
VARIABLE_VALUES = variable_values
else:
VARIABLE_VALUES = {}
matherror = MathErrorListener(sympy)
stream = antlr4.InputStream(sympy)
lex = PSLexer(stream)
lex.removeErrorListeners()
lex.addErrorListener(matherror)
tokens = antlr4.CommonTokenStream(lex)
parser = PSParser(tokens)
parser.removeErrorListeners()
parser.addErrorListener(matherror)
return_data = None
math = parser.math()
if math.relation_list():
return_data = []
relation_list = math.relation_list().relation_list_content()
for list_item in relation_list.relation():
expr = convert_relation(list_item)
return_data.append(expr)
else:
relation = math.relation()
return_data = convert_relation(relation)
return return_data
class MathErrorListener(ErrorListener):
def __init__(self, src):
super(ErrorListener, self).__init__()
self.src = src
def syntaxError(self, recog, symbol, line, col, msg, e):
fmt = "%s\n%s\n%s"
marker = "~" * col + "^"
if msg.startswith("missing"):
err = fmt % (msg, self.src, marker)
elif msg.startswith("no viable"):
err = fmt % ("I expected something else here", self.src, marker)
elif msg.startswith("mismatched"):
names = PSParser.literalNames
expected = [names[i] for i in e.getExpectedTokens() if i < len(names)]
if len(expected) < 10:
expected = " ".join(expected)
err = (fmt % ("I expected one of these: " + expected,
self.src, marker))
else:
err = (fmt % ("I expected something else here", self.src, marker))
else:
err = fmt % ("I don't understand this", self.src, marker)
raise Exception(err)
def convert_relation(rel):
if rel.expr():
return convert_expr(rel.expr())
lh = convert_relation(rel.relation(0))
rh = convert_relation(rel.relation(1))
if rel.LT():
return sympy.StrictLessThan(lh, rh, evaluate=False)
elif rel.LTE():
return sympy.LessThan(lh, rh, evaluate=False)
elif rel.GT():
return sympy.StrictGreaterThan(lh, rh, evaluate=False)
elif rel.GTE():
return sympy.GreaterThan(lh, rh, evaluate=False)
elif rel.EQUAL():
return sympy.Eq(lh, rh, evaluate=False)
elif rel.UNEQUAL():
return sympy.Ne(lh, rh, evaluate=False)
def convert_expr(expr):
if expr.additive():
return convert_add(expr.additive())
def convert_matrix(matrix):
# build matrix
row = matrix.matrix_row()
tmp = []
rows = 0
for r in row:
tmp.append([])
for expr in r.expr():
tmp[rows].append(convert_expr(expr))
rows = rows + 1
# return the matrix
return sympy.Matrix(tmp)
def add_flat(lh, rh):
if hasattr(lh, 'is_Add') and lh.is_Add or hasattr(rh, 'is_Add') and rh.is_Add:
args = []
if hasattr(lh, 'is_Add') and lh.is_Add:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_Add') and rh.is_Add:
args = args + list(rh.args)
else:
args += [rh]
return sympy.Add(*args, evaluate=False)
else:
return sympy.Add(lh, rh, evaluate=False)
def mat_add_flat(lh, rh):
if hasattr(lh, 'is_MatAdd') and lh.is_MatAdd or hasattr(rh, 'is_MatAdd') and rh.is_MatAdd:
args = []
if hasattr(lh, 'is_MatAdd') and lh.is_MatAdd:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_MatAdd') and rh.is_MatAdd:
args = args + list(rh.args)
else:
args += [rh]
return sympy.MatAdd(*args, evaluate=False)
else:
return sympy.MatAdd(lh, rh, evaluate=False)
def mul_flat(lh, rh):
if hasattr(lh, 'is_Mul') and lh.is_Mul or hasattr(rh, 'is_Mul') and rh.is_Mul:
args = []
if hasattr(lh, 'is_Mul') and lh.is_Mul:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_Mul') and rh.is_Mul:
args = args + list(rh.args)
else:
args += [rh]
return sympy.Mul(*args, evaluate=False)
else:
return sympy.Mul(lh, rh, evaluate=False)
def mat_mul_flat(lh, rh):
if hasattr(lh, 'is_MatMul') and lh.is_MatMul or hasattr(rh, 'is_MatMul') and rh.is_MatMul:
args = []
if hasattr(lh, 'is_MatMul') and lh.is_MatMul:
args += list(lh.args)
else:
args += [lh]
if hasattr(rh, 'is_MatMul') and rh.is_MatMul:
args = args + list(rh.args)
else:
args += [rh]
return sympy.MatMul(*args, evaluate=False)
else:
return sympy.MatMul(lh, rh, evaluate=False)
def convert_add(add):
if add.ADD():
lh = convert_add(add.additive(0))
rh = convert_add(add.additive(1))
if lh.is_Matrix or rh.is_Matrix:
return mat_add_flat(lh, rh)
else:
return add_flat(lh, rh)
elif add.SUB():
lh = convert_add(add.additive(0))
rh = convert_add(add.additive(1))
if lh.is_Matrix or rh.is_Matrix:
return mat_add_flat(lh, mat_mul_flat(-1, rh))
else:
# If we want to force ordering for variables this should be:
# return Sub(lh, rh, evaluate=False)
if not rh.is_Matrix and rh.func.is_Number:
rh = -rh
else:
rh = mul_flat(-1, rh)
return add_flat(lh, rh)
else:
return convert_mp(add.mp())
def convert_mp(mp):
if hasattr(mp, 'mp'):
mp_left = mp.mp(0)
mp_right = mp.mp(1)
else:
mp_left = mp.mp_nofunc(0)
mp_right = mp.mp_nofunc(1)
if mp.MUL() or mp.CMD_TIMES() or mp.CMD_CDOT():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if lh.is_Matrix or rh.is_Matrix:
return mat_mul_flat(lh, rh)
else:
return mul_flat(lh, rh)
elif mp.DIV() or mp.CMD_DIV() or mp.COLON():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if lh.is_Matrix or rh.is_Matrix:
return sympy.MatMul(lh, sympy.Pow(rh, -1, evaluate=False), evaluate=False)
else:
return sympy.Mul(lh, sympy.Pow(rh, -1, evaluate=False), evaluate=False)
elif mp.CMD_MOD():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
if rh.is_Matrix:
raise Exception("Cannot perform modulo operation with a matrix as an operand")
else:
return sympy.Mod(lh, rh, evaluate=False)
else:
if hasattr(mp, 'unary'):
return convert_unary(mp.unary())
else:
return convert_unary(mp.unary_nofunc())
def convert_unary(unary):
if hasattr(unary, 'unary'):
nested_unary = unary.unary()
else:
nested_unary = unary.unary_nofunc()
if hasattr(unary, 'postfix_nofunc'):
first = unary.postfix()
tail = unary.postfix_nofunc()
postfix = [first] + tail
else:
postfix = unary.postfix()
if unary.ADD():
return convert_unary(nested_unary)
elif unary.SUB():
tmp_convert_nested_unary = convert_unary(nested_unary)
if tmp_convert_nested_unary.is_Matrix:
return mat_mul_flat(-1, tmp_convert_nested_unary, evaluate=False)
else:
if tmp_convert_nested_unary.func.is_Number:
return -tmp_convert_nested_unary
else:
return mul_flat(-1, tmp_convert_nested_unary)
elif postfix:
return convert_postfix_list(postfix)
def convert_postfix_list(arr, i=0):
if i >= len(arr):
raise Exception("Index out of bounds")
res = convert_postfix(arr[i])
if isinstance(res, sympy.Expr) or isinstance(res, sympy.Matrix) or res is sympy.S.EmptySet:
if i == len(arr) - 1:
return res # nothing to multiply by
else:
# multiply by next
rh = convert_postfix_list(arr, i + 1)
if res.is_Matrix or rh.is_Matrix:
return mat_mul_flat(res, rh)
else:
return mul_flat(res, rh)
else: # must be derivative
wrt = res[0]
if i == len(arr) - 1:
raise Exception("Expected expression for derivative")
else:
expr = convert_postfix_list(arr, i + 1)
return sympy.Derivative(expr, wrt)
def do_subs(expr, at):
if at.expr():
at_expr = convert_expr(at.expr())
syms = at_expr.atoms(sympy.Symbol)
if len(syms) == 0:
return expr
elif len(syms) > 0:
sym = next(iter(syms))
return expr.subs(sym, at_expr)
elif at.equality():
lh = convert_expr(at.equality().expr(0))
rh = convert_expr(at.equality().expr(1))
return expr.subs(lh, rh)
def convert_postfix(postfix):
if hasattr(postfix, 'exp'):
exp_nested = postfix.exp()
else:
exp_nested = postfix.exp_nofunc()
exp = convert_exp(exp_nested)
for op in postfix.postfix_op():
if op.BANG():
if isinstance(exp, list):
raise Exception("Cannot apply postfix to derivative")
exp = sympy.factorial(exp, evaluate=False)
elif op.eval_at():
ev = op.eval_at()
at_b = None
at_a = None
if ev.eval_at_sup():
at_b = do_subs(exp, ev.eval_at_sup())
if ev.eval_at_sub():
at_a = do_subs(exp, ev.eval_at_sub())
if at_b is not None and at_a is not None:
exp = add_flat(at_b, mul_flat(at_a, -1))
elif at_b is not None:
exp = at_b
elif at_a is not None:
exp = at_a
return exp
def convert_exp(exp):
if hasattr(exp, 'exp'):
exp_nested = exp.exp()
else:
exp_nested = exp.exp_nofunc()
if exp_nested:
base = convert_exp(exp_nested)
if isinstance(base, list):
raise Exception("Cannot raise derivative to power")
if exp.atom():
exponent = convert_atom(exp.atom())
elif exp.expr():
exponent = convert_expr(exp.expr())
return sympy.Pow(base, exponent, evaluate=False)
else:
if hasattr(exp, 'comp'):
return convert_comp(exp.comp())
else:
return convert_comp(exp.comp_nofunc())
def convert_comp(comp):
if comp.group():
return convert_expr(comp.group().expr())
elif comp.abs_group():
return sympy.Abs(convert_expr(comp.abs_group().expr()), evaluate=False)
elif comp.floor_group():
return handle_floor(convert_expr(comp.floor_group().expr()))
elif comp.ceil_group():
return handle_ceil(convert_expr(comp.ceil_group().expr()))
elif comp.atom():
return convert_atom(comp.atom())
elif comp.frac():
return convert_frac(comp.frac())
elif comp.binom():
return convert_binom(comp.binom())
elif comp.matrix():
return convert_matrix(comp.matrix())
elif comp.func():
return convert_func(comp.func())
def convert_atom(atom):
if atom.LETTER_NO_E():
subscriptName = ''
s = atom.LETTER_NO_E().getText()
if s == "I":
return sympy.I
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = '_{' + StrPrinter().doprint(subscript) + '}'
return sympy.Symbol(atom.LETTER_NO_E().getText() + subscriptName, real=True)
elif atom.GREEK_LETTER():
s = atom.GREEK_LETTER().getText()[1:]
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = StrPrinter().doprint(subscript)
s += '_{' + subscriptName + '}'
return sympy.Symbol(s, real=True)
elif atom.accent():
# get name for accent
name = atom.accent().start.text[1:]
# exception: check if bar or overline which are treated both as bar
if name in ["bar", "overline"]:
name = "bar"
# get the base (variable)
base = atom.accent().base.getText()
# set string to base+name
s = base + name
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = StrPrinter().doprint(subscript)
s += '_{' + subscriptName + '}'
return sympy.Symbol(s, real=True)
elif atom.SYMBOL():
s = atom.SYMBOL().getText().replace("\\$", "").replace("\\%", "")
if s == "\\infty":
return sympy.oo
elif s == '\\pi':
return sympy.pi
elif s == '\\emptyset':
return sympy.S.EmptySet
else:
raise Exception("Unrecognized symbol")
elif atom.NUMBER():
s = atom.NUMBER().getText().replace(",", "")
try:
sr = sympy.Rational(s)
return sr
except (TypeError, ValueError):
return sympy.Number(s)
elif atom.E_NOTATION():
s = atom.E_NOTATION().getText().replace(",", "")
try:
sr = sympy.Rational(s)
return sr
except (TypeError, ValueError):
return sympy.Number(s)
elif atom.DIFFERENTIAL():
var = get_differential_var(atom.DIFFERENTIAL())
return sympy.Symbol('d' + var.name, real=True)
elif atom.mathit():
text = rule2text(atom.mathit().mathit_text())
return sympy.Symbol(text, real=True)
elif atom.VARIABLE():
text = atom.VARIABLE().getText()
is_percent = text.endswith("\\%")
trim_amount = 3 if is_percent else 1
name = text[10:]
name = name[0:len(name) - trim_amount]
# add hash to distinguish from regular symbols
# hash = hashlib.md5(name.encode()).hexdigest()
# symbol_name = name + hash
symbol_name = name
# replace the variable for already known variable values
if name in VARIABLE_VALUES:
# if a sympy class
if isinstance(VARIABLE_VALUES[name], tuple(sympy.core.all_classes)):
symbol = VARIABLE_VALUES[name]
# if NOT a sympy class
else:
symbol = parse_expr(str(VARIABLE_VALUES[name]))
else:
symbol = sympy.Symbol(symbol_name, real=True)
if is_percent:
return sympy.Mul(symbol, sympy.Pow(100, -1, evaluate=False), evaluate=False)
# return the symbol
return symbol
elif atom.PERCENT_NUMBER():
text = atom.PERCENT_NUMBER().getText().replace("\\%", "").replace(",", "")
try:
number = sympy.Rational(text)
except (TypeError, ValueError):
number = sympy.Number(text)
percent = sympy.Rational(number, 100)
return percent
def rule2text(ctx):
stream = ctx.start.getInputStream()
# starting index of starting token
startIdx = ctx.start.start
# stopping index of stopping token
stopIdx = ctx.stop.stop
return stream.getText(startIdx, stopIdx)
def convert_frac(frac):
diff_op = False
partial_op = False
lower_itv = frac.lower.getSourceInterval()
lower_itv_len = lower_itv[1] - lower_itv[0] + 1
if (frac.lower.start == frac.lower.stop and
frac.lower.start.type == PSLexer.DIFFERENTIAL):
wrt = get_differential_var_str(frac.lower.start.text)
diff_op = True
elif (lower_itv_len == 2 and
frac.lower.start.type == PSLexer.SYMBOL and
frac.lower.start.text == '\\partial' and
(frac.lower.stop.type == PSLexer.LETTER_NO_E or frac.lower.stop.type == PSLexer.SYMBOL)):
partial_op = True
wrt = frac.lower.stop.text
if frac.lower.stop.type == PSLexer.SYMBOL:
wrt = wrt[1:]
if diff_op or partial_op:
wrt = sympy.Symbol(wrt, real=True)
if (diff_op and frac.upper.start == frac.upper.stop and
frac.upper.start.type == PSLexer.LETTER_NO_E and
frac.upper.start.text == 'd'):
return [wrt]
elif (partial_op and frac.upper.start == frac.upper.stop and
frac.upper.start.type == PSLexer.SYMBOL and
frac.upper.start.text == '\\partial'):
return [wrt]
upper_text = rule2text(frac.upper)
expr_top = None
if diff_op and upper_text.startswith('d'):
expr_top = process_sympy(upper_text[1:])
elif partial_op and frac.upper.start.text == '\\partial':
expr_top = process_sympy(upper_text[len('\\partial'):])
if expr_top:
return sympy.Derivative(expr_top, wrt)
expr_top = convert_expr(frac.upper)
expr_bot = convert_expr(frac.lower)
if expr_top.is_Matrix or expr_bot.is_Matrix:
return sympy.MatMul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)
else:
return sympy.Mul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)
def convert_binom(binom):
expr_top = convert_expr(binom.upper)
expr_bot = convert_expr(binom.lower)
return sympy.binomial(expr_top, expr_bot)
def convert_func(func):
if func.func_normal_single_arg():
if func.L_PAREN(): # function called with parenthesis
arg = convert_func_arg(func.func_single_arg())
else:
arg = convert_func_arg(func.func_single_arg_noparens())
name = func.func_normal_single_arg().start.text[1:]
# change arc<trig> -> a<trig>
if name in ["arcsin", "arccos", "arctan", "arccsc", "arcsec",
"arccot"]:
name = "a" + name[3:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name in ["arsinh", "arcosh", "artanh"]:
name = "a" + name[2:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name in ["arcsinh", "arccosh", "arctanh"]:
name = "a" + name[3:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
elif name == "operatorname":
operatorname = func.func_normal_single_arg().func_operator_name.getText()
if operatorname in ["arsinh", "arcosh", "artanh"]:
operatorname = "a" + operatorname[2:]
expr = getattr(sympy.functions, operatorname)(arg, evaluate=False)
elif operatorname in ["arcsinh", "arccosh", "arctanh"]:
operatorname = "a" + operatorname[3:]
expr = getattr(sympy.functions, operatorname)(arg, evaluate=False)
elif operatorname == "floor":
expr = handle_floor(arg)
elif operatorname == "ceil":
expr = handle_ceil(arg)
elif name in ["log", "ln"]:
if func.subexpr():
if func.subexpr().atom():
base = convert_atom(func.subexpr().atom())
else:
base = convert_expr(func.subexpr().expr())
elif name == "log":
base = 10
elif name == "ln":
base = sympy.E
expr = sympy.log(arg, base, evaluate=False)
elif name in ["exp", "exponentialE"]:
expr = sympy.exp(arg)
elif name == "floor":
expr = handle_floor(arg)
elif name == "ceil":
expr = handle_ceil(arg)
func_pow = None
should_pow = True
if func.supexpr():
if func.supexpr().expr():
func_pow = convert_expr(func.supexpr().expr())
else:
func_pow = convert_atom(func.supexpr().atom())
if name in ["sin", "cos", "tan", "csc", "sec", "cot", "sinh", "cosh", "tanh"]:
if func_pow == -1:
name = "a" + name
should_pow = False
expr = getattr(sympy.functions, name)(arg, evaluate=False)
if func_pow and should_pow:
expr = sympy.Pow(expr, func_pow, evaluate=False)
return expr
elif func.func_normal_multi_arg():
if func.L_PAREN(): # function called with parenthesis
args = func.func_multi_arg().getText().split(",")
else:
args = func.func_multi_arg_noparens().split(",")
args = list(map(lambda arg: process_sympy(arg, VARIABLE_VALUES), args))
name = func.func_normal_multi_arg().start.text[1:]
if name == "operatorname":
operatorname = func.func_normal_multi_arg().func_operator_name.getText()
if operatorname in ["gcd", "lcm"]:
expr = handle_gcd_lcm(operatorname, args)
elif name in ["gcd", "lcm"]:
expr = handle_gcd_lcm(name, args)
elif name in ["max", "min"]:
name = name[0].upper() + name[1:]
expr = getattr(sympy.functions, name)(*args, evaluate=False)
func_pow = None
should_pow = True
if func.supexpr():
if func.supexpr().expr():
func_pow = convert_expr(func.supexpr().expr())
else:
func_pow = convert_atom(func.supexpr().atom())
if func_pow and should_pow:
expr = sympy.Pow(expr, func_pow, evaluate=False)
return expr
# elif func.LETTER_NO_E() or func.SYMBOL():
# print('LETTER_NO_E or symbol')
# if func.LETTER_NO_E():
# fname = func.LETTER_NO_E().getText()
# elif func.SYMBOL():
# fname = func.SYMBOL().getText()[1:]
# fname = str(fname) # can't be unicode
C_INT():
return handle_integral(func)
elif func.FUNC_SQRT():
expr = convert_expr(func.base)
if func.root:
r = convert_expr(func.root)
return sympy.Pow(expr, 1 / r, evaluate=False)
else:
return sympy.Pow(expr, sympy.S.Half, evaluate=False)
elif func.FUNC_SUM():
return handle_sum_or_prod(func, "summation")
elif func.FUNC_PROD():
return handle_sum_or_prod(func, "product")
elif func.FUNC_LIM():
return handle_limit(func)
elif func.EXP_E():
return handle_exp(func)
def convert_func_arg(arg):
if hasattr(arg, 'expr'):
return convert_expr(arg.expr())
else:
return convert_mp(arg.mp_nofunc())
def handle_integral(func):
if func.additive():
integrand = convert_add(func.additive())
elif func.frac():
integrand = convert_frac(func.frac())
else:
integrand = 1
int_var = None
if func.DIFFERENTIAL():
int_var = get_differential_var(func.DIFFERENTIAL())
else:
for sym in integrand.atoms(sympy.Symbol):
s = str(sym)
if len(s) > 1 and s[0] == 'd':
if s[1] == '\\':
int_var = sympy.Symbol(s[2:], real=True)
else:
int_var = sympy.Symbol(s[1:], real=True)
int_sym = sym
if int_var:
integrand = integrand.subs(int_sym, 1)
else:
int_var = sympy.Symbol('x', real=True)
if func.subexpr():
if func.subexpr().atom():
lower = convert_atom(func.subexpr().atom())
else:
lower = convert_expr(func.subexpr().expr())
if func.supexpr().atom():
upper = convert_atom(func.supexpr().atom())
else:
upper = convert_expr(func.supexpr().expr())
return sympy.Integral(integrand, (int_var, lower, upper))
else:
return sympy.Integral(integrand, int_var)
def handle_sum_or_prod(func, name):
val = convert_mp(func.mp())
iter_var = convert_expr(func.subeq().equality().expr(0))
start = convert_expr(func.subeq().equality().expr(1))
if func.supexpr().expr():
end = convert_expr(func.supexpr().expr())
else:
end = convert_atom(func.supexpr().atom())
if name == "summation":
return sympy.Sum(val, (iter_var, start, end))
elif name == "product":
return sympy.Product(val, (iter_var, start, end))
def handle_limit(func):
sub = func.limit_sub()
if sub.LETTER_NO_E():
var = sympy.Symbol(sub.LETTER_NO_E().getText(), real=True)
elif sub.GREEK_LETTER():
var = sympy.Symbol(sub.GREEK_LETTER().getText()[1:], real=True)
else:
var = sympy.Symbol('x', real=True)
if sub.SUB():
direction = "-"
else:
direction = "+"
approaching = convert_expr(sub.expr())
content = convert_mp(func.mp())
return sympy.Limit(content, var, approaching, direction)
def handle_exp(func):
if func.supexpr():
if func.supexpr().expr():
exp_arg = convert_expr(func.supexpr().expr())
else:
exp_arg = convert_atom(func.supexpr().atom())
else:
exp_arg = 1
return sympy.exp(exp_arg)
def handle_gcd_lcm(f, args):
args = tuple(map(sympy.nsimplify, args))
return sympy.UnevaluatedExpr(getattr(sympy, f)(args))
def handle_floor(expr):
return sympy.functions.floor(expr, evaluate=False)
def handle_ceil(expr):
return sympy.functions.ceiling(expr, evaluate=False)
def get_differential_var(d):
text = get_differential_var_str(d.getText())
return sympy.Symbol(text, real=True)
def get_differential_var_str(text):
for i in range(1, len(text)):
c = text[i]
if not (c == " " or c == "\r" or c == "\n" or c == "\t"):
idx = i
break
text = text[idx:]
if text[0] == "\\":
text = text[1:]
return text
| true
| true
|
79088056be0dcfbcb88134be5d64cd1a09c92800
| 1,580
|
py
|
Python
|
python-news_aggregator/migrations/versions/20180830_cluster_control.py
|
hanakhry/Crime_Admin
|
17f96a81b80fb1302fb7b00f57bc88412d131675
|
[
"MIT"
] | null | null | null |
python-news_aggregator/migrations/versions/20180830_cluster_control.py
|
hanakhry/Crime_Admin
|
17f96a81b80fb1302fb7b00f57bc88412d131675
|
[
"MIT"
] | null | null | null |
python-news_aggregator/migrations/versions/20180830_cluster_control.py
|
hanakhry/Crime_Admin
|
17f96a81b80fb1302fb7b00f57bc88412d131675
|
[
"MIT"
] | null | null | null |
"""adding cluster control options on every level
Revision ID: a987c6ce888d
Revises: 00c5cc87408d
Create Date: 2018-08-01 18:34:00.415937
"""
import logging
from alembic import op
import sqlalchemy as sa
revision = 'a987c6ce888d'
down_revision = '8079a1cb5874'
branch_labels = None
depends_on = None
logger = logging.getLogger('alembic.' + revision)
CLUSTER_CONF_OPTIONS = ['cluster_enabled', 'cluster_tfidf_enabled',
'cluster_same_category', 'cluster_same_feed',
'cluster_wake_up']
def upgrade():
op.drop_column('category', 'cluster_on_title')
for table in 'user', 'feed', 'category':
logger.info('adding cluster control options on %s', table)
for option in CLUSTER_CONF_OPTIONS:
op.add_column(table, sa.Column(option, sa.Boolean(),
default=None, nullable=True))
op.add_column(table, sa.Column('cluster_conf', sa.PickleType(),
default={}, nullable=True))
logger.info('setting default options to true for users')
op.execute('UPDATE "user" SET %s;'
% ', '.join(["%s=true" % opt for opt in CLUSTER_CONF_OPTIONS]))
for option in CLUSTER_CONF_OPTIONS:
op.alter_column('user', option, nullable=False)
def downgrade():
for table in 'user', 'feed', 'category':
for option in CLUSTER_CONF_OPTIONS:
op.drop_column(table, option)
op.add_column('category', sa.Column('cluster_on_title',
sa.BOOLEAN(), autoincrement=False, nullable=True))
| 35.111111
| 78
| 0.643038
|
import logging
from alembic import op
import sqlalchemy as sa
revision = 'a987c6ce888d'
down_revision = '8079a1cb5874'
branch_labels = None
depends_on = None
logger = logging.getLogger('alembic.' + revision)
CLUSTER_CONF_OPTIONS = ['cluster_enabled', 'cluster_tfidf_enabled',
'cluster_same_category', 'cluster_same_feed',
'cluster_wake_up']
def upgrade():
op.drop_column('category', 'cluster_on_title')
for table in 'user', 'feed', 'category':
logger.info('adding cluster control options on %s', table)
for option in CLUSTER_CONF_OPTIONS:
op.add_column(table, sa.Column(option, sa.Boolean(),
default=None, nullable=True))
op.add_column(table, sa.Column('cluster_conf', sa.PickleType(),
default={}, nullable=True))
logger.info('setting default options to true for users')
op.execute('UPDATE "user" SET %s;'
% ', '.join(["%s=true" % opt for opt in CLUSTER_CONF_OPTIONS]))
for option in CLUSTER_CONF_OPTIONS:
op.alter_column('user', option, nullable=False)
def downgrade():
for table in 'user', 'feed', 'category':
for option in CLUSTER_CONF_OPTIONS:
op.drop_column(table, option)
op.add_column('category', sa.Column('cluster_on_title',
sa.BOOLEAN(), autoincrement=False, nullable=True))
| true
| true
|
79088058ffbe478e046e84e2d58d137d99fc4250
| 10,587
|
py
|
Python
|
src/abaqus/Step/AnnealStep.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Step/AnnealStep.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Step/AnnealStep.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from abaqusConstants import *
from .AnalysisStep import AnalysisStep
from ..Adaptivity.AdaptiveMeshConstraintState import AdaptiveMeshConstraintState
from ..Adaptivity.AdaptiveMeshDomain import AdaptiveMeshDomain
from ..BoundaryCondition.BoundaryConditionState import BoundaryConditionState
from ..Load.LoadCase import LoadCase
from ..Load.LoadState import LoadState
from ..PredefinedField.PredefinedFieldState import PredefinedFieldState
from ..StepMiscellaneous.Control import Control
from ..StepMiscellaneous.SolverControl import SolverControl
from ..StepOutput.DiagnosticPrint import DiagnosticPrint
from ..StepOutput.FieldOutputRequestState import FieldOutputRequestState
from ..StepOutput.HistoryOutputRequestState import HistoryOutputRequestState
from ..StepOutput.Monitor import Monitor
from ..StepOutput.Restart import Restart
class AnnealStep(AnalysisStep):
"""The AnnealStep object anneals a structure by setting the velocities and all appropriate
state variables to zero.
The AnnealStep object is derived from the AnalysisStep object.
Attributes
----------
name: str
A String specifying the repository key.
refTemp: float
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
previous: str
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description: str
A String specifying a description of the new step. The default value is an empty string.
explicit: SymbolicConstant
A SymbolicConstant specifying whether the step has an explicit procedure type
(**procedureType=ANNEAL**, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
perturbation: Boolean
A Boolean specifying whether the step has a perturbation procedure type.
nonmechanical: Boolean
A Boolean specifying whether the step has a mechanical procedure type.
procedureType: SymbolicConstant
A SymbolicConstant specifying the Abaqus procedure. Possible values are:
- ANNEAL
- BUCKLE
- COMPLEX_FREQUENCY
- COUPLED_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRIC
- DIRECT_CYCLIC
- DYNAMIC_IMPLICIT
- DYNAMIC_EXPLICIT
- DYNAMIC_SUBSPACE
- DYNAMIC_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
- FREQUENCY
- GEOSTATIC
- HEAT_TRANSFER
- MASS_DIFFUSION
- MODAL_DYNAMICS
- RANDOM_RESPONSE
- RESPONSE_SPECTRUM
- SOILS
- STATIC_GENERAL
- STATIC_LINEAR_PERTURBATION
- STATIC_RIKS
- STEADY_STATE_DIRECT
- STEADY_STATE_MODAL
- STEADY_STATE_SUBSPACE
- VISCO
suppressed: Boolean
A Boolean specifying whether the step is suppressed or not. The default value is OFF.
fieldOutputRequestState: dict[str, FieldOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.FieldOutputRequestState.FieldOutputRequestState` objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.HistoryOutputRequestState.HistoryOutputRequestState` objects.
diagnosticPrint: DiagnosticPrint
A :py:class:`~abaqus.StepOutput.DiagnosticPrint.DiagnosticPrint` object.
monitor: Monitor
A :py:class:`~abaqus.StepOutput.Monitor.Monitor` object.
restart: Restart
A :py:class:`~abaqus.StepOutput.Restart.Restart` object.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshConstraintState.AdaptiveMeshConstraintState` objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshDomain.AdaptiveMeshDomain` objects.
control: Control
A :py:class:`~abaqus.StepMiscellaneous.Control.Control` object.
solverControl: SolverControl
A :py:class:`~abaqus.StepMiscellaneous.SolverControl.SolverControl` object.
boundaryConditionStates: dict[str, BoundaryConditionState]
A repository of :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` objects.
interactionStates: int
A repository of :py:class:`~abaqus.Interaction.InteractionState.InteractionState` objects.
loadStates: dict[str, LoadState]
A repository of :py:class:`~abaqus.Load.LoadState.LoadState` objects.
loadCases: dict[str, LoadCase]
A repository of :py:class:`~abaqus.Load.LoadCase.LoadCase` objects.
predefinedFieldStates: dict[str, PredefinedFieldState]
A repository of :py:class:`~abaqus.PredefinedField.PredefinedFieldState.PredefinedFieldState` objects.
Notes
-----
This object can be accessed by:
.. code-block:: python
import step
mdb.models[name].steps[name]
The corresponding analysis keywords are:
- ANNEAL
- STEP
"""
# A String specifying the repository key.
name: str = ''
# A Float specifying the post-anneal reference temperature. The default value is the
# current temperature at all nodes in the model after the annealing has completed.
refTemp: float = None
# A String specifying the name of the previous step. The new step appears after this step
# in the list of analysis steps.
previous: str = ''
# A String specifying a description of the new step. The default value is an empty string.
description: str = ''
# A SymbolicConstant specifying whether the step has an explicit procedure type
# (*procedureType*=ANNEAL, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
explicit: SymbolicConstant = None
# A Boolean specifying whether the step has a perturbation procedure type.
perturbation: Boolean = OFF
# A Boolean specifying whether the step has a mechanical procedure type.
nonmechanical: Boolean = OFF
# A SymbolicConstant specifying the Abaqus procedure. Possible values are:
# - ANNEAL
# - BUCKLE
# - COMPLEX_FREQUENCY
# - COUPLED_TEMP_DISPLACEMENT
# - COUPLED_THERMAL_ELECTRIC
# - DIRECT_CYCLIC
# - DYNAMIC_IMPLICIT
# - DYNAMIC_EXPLICIT
# - DYNAMIC_SUBSPACE
# - DYNAMIC_TEMP_DISPLACEMENT
# - COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
# - FREQUENCY
# - GEOSTATIC
# - HEAT_TRANSFER
# - MASS_DIFFUSION
# - MODAL_DYNAMICS
# - RANDOM_RESPONSE
# - RESPONSE_SPECTRUM
# - SOILS
# - STATIC_GENERAL
# - STATIC_LINEAR_PERTURBATION
# - STATIC_RIKS
# - STEADY_STATE_DIRECT
# - STEADY_STATE_MODAL
# - STEADY_STATE_SUBSPACE
# - VISCO
procedureType: SymbolicConstant = None
# A Boolean specifying whether the step is suppressed or not. The default value is OFF.
suppressed: Boolean = OFF
# A repository of FieldOutputRequestState objects.
fieldOutputRequestState: dict[str, FieldOutputRequestState] = dict[str, FieldOutputRequestState]()
# A repository of HistoryOutputRequestState objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState] = dict[str, HistoryOutputRequestState]()
# A DiagnosticPrint object.
diagnosticPrint: DiagnosticPrint = DiagnosticPrint()
# A Monitor object.
monitor: Monitor = None
# A Restart object.
restart: Restart = Restart()
# A repository of AdaptiveMeshConstraintState objects.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState] = dict[
str, AdaptiveMeshConstraintState]()
# A repository of AdaptiveMeshDomain objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain] = dict[str, AdaptiveMeshDomain]()
# A Control object.
control: Control = Control()
# A SolverControl object.
solverControl: SolverControl = SolverControl()
# A repository of BoundaryConditionState objects.
boundaryConditionStates: dict[str, BoundaryConditionState] = dict[str, BoundaryConditionState]()
# A repository of InteractionState objects.
interactionStates: int = None
# A repository of LoadState objects.
loadStates: dict[str, LoadState] = dict[str, LoadState]()
# A repository of LoadCase objects.
loadCases: dict[str, LoadCase] = dict[str, LoadCase]()
# A repository of PredefinedFieldState objects.
predefinedFieldStates: dict[str, PredefinedFieldState] = dict[str, PredefinedFieldState]()
def __init__(self, name: str, previous: str, description: str = '', refTemp: float = None,
maintainAttributes: Boolean = False):
"""This method creates an AnnealStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].AnnealStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
Returns
-------
An AnnealStep object.
Raises
------
RangeError
"""
super().__init__()
pass
def setValues(self, description: str = '', refTemp: float = None):
"""This method modifies the AnnealStep object.
Parameters
----------
description
A String specifying a description of the new step. The default value is an empty string.
refTemp
A Float specifying the post-anneal reference temperature. The default value is the
current temperature at all nodes in the model after the annealing has completed.
Raises
------
RangeError
"""
pass
| 39.066421
| 119
| 0.694248
|
from abaqusConstants import *
from .AnalysisStep import AnalysisStep
from ..Adaptivity.AdaptiveMeshConstraintState import AdaptiveMeshConstraintState
from ..Adaptivity.AdaptiveMeshDomain import AdaptiveMeshDomain
from ..BoundaryCondition.BoundaryConditionState import BoundaryConditionState
from ..Load.LoadCase import LoadCase
from ..Load.LoadState import LoadState
from ..PredefinedField.PredefinedFieldState import PredefinedFieldState
from ..StepMiscellaneous.Control import Control
from ..StepMiscellaneous.SolverControl import SolverControl
from ..StepOutput.DiagnosticPrint import DiagnosticPrint
from ..StepOutput.FieldOutputRequestState import FieldOutputRequestState
from ..StepOutput.HistoryOutputRequestState import HistoryOutputRequestState
from ..StepOutput.Monitor import Monitor
from ..StepOutput.Restart import Restart
class AnnealStep(AnalysisStep):
name: str = ''
refTemp: float = None
previous: str = ''
description: str = ''
explicit: SymbolicConstant = None
perturbation: Boolean = OFF
nonmechanical: Boolean = OFF
procedureType: SymbolicConstant = None
suppressed: Boolean = OFF
fieldOutputRequestState: dict[str, FieldOutputRequestState] = dict[str, FieldOutputRequestState]()
historyOutputRequestState: dict[str, HistoryOutputRequestState] = dict[str, HistoryOutputRequestState]()
diagnosticPrint: DiagnosticPrint = DiagnosticPrint()
monitor: Monitor = None
restart: Restart = Restart()
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState] = dict[
str, AdaptiveMeshConstraintState]()
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain] = dict[str, AdaptiveMeshDomain]()
control: Control = Control()
solverControl: SolverControl = SolverControl()
boundaryConditionStates: dict[str, BoundaryConditionState] = dict[str, BoundaryConditionState]()
interactionStates: int = None
loadStates: dict[str, LoadState] = dict[str, LoadState]()
loadCases: dict[str, LoadCase] = dict[str, LoadCase]()
predefinedFieldStates: dict[str, PredefinedFieldState] = dict[str, PredefinedFieldState]()
def __init__(self, name: str, previous: str, description: str = '', refTemp: float = None,
maintainAttributes: Boolean = False):
super().__init__()
pass
def setValues(self, description: str = '', refTemp: float = None):
pass
| true
| true
|
79088095e8c2af441dce842e8978848873eb522b
| 88
|
py
|
Python
|
The Noble Houses of Apaxia/noble.py
|
nithinmanne/kattis
|
70cb32fb8c1010168b8a4101bd73bd74db2a087d
|
[
"BSD-3-Clause"
] | null | null | null |
The Noble Houses of Apaxia/noble.py
|
nithinmanne/kattis
|
70cb32fb8c1010168b8a4101bd73bd74db2a087d
|
[
"BSD-3-Clause"
] | null | null | null |
The Noble Houses of Apaxia/noble.py
|
nithinmanne/kattis
|
70cb32fb8c1010168b8a4101bd73bd74db2a087d
|
[
"BSD-3-Clause"
] | null | null | null |
name = input().split()
print(name[0], name[1]*(len(name[1]) if len(name[1])!=5 else 4))
| 44
| 64
| 0.602273
|
name = input().split()
print(name[0], name[1]*(len(name[1]) if len(name[1])!=5 else 4))
| true
| true
|
790880c86cb38747484b688f807153decbae4756
| 1,860
|
py
|
Python
|
exercizes/ur5_conf.py
|
NimaPng/tsid
|
23bbc6bace4f4623c2189535e71ba63bedbc4368
|
[
"BSD-2-Clause"
] | null | null | null |
exercizes/ur5_conf.py
|
NimaPng/tsid
|
23bbc6bace4f4623c2189535e71ba63bedbc4368
|
[
"BSD-2-Clause"
] | null | null | null |
exercizes/ur5_conf.py
|
NimaPng/tsid
|
23bbc6bace4f4623c2189535e71ba63bedbc4368
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 09:47:07 2019
@author: student
"""
import numpy as np
import os
np.set_printoptions(precision=3, linewidth=200, suppress=True)
LINE_WIDTH = 60
N_SIMULATION = 4000 # number of time steps simulated
dt = 0.002 # controller time step
q0 = np.array([ 0. , -1.0, 0.7, 0. , 0. , 0. ]) # initial configuration
# REFERENCE SINUSOIDAL TRAJECTORY
amp = np.array([0*0.02, 0.1, 0.10]) # amplitude
phi = np.array([0.0, 0.5*np.pi, 0.0]) # phase
two_pi_f = 1.4*2*np.pi*np.array([1.0, 0.5, 0.5]) # frequency (time 2 PI)
offset = np.array([0.0, 0.0, 0.0])
w_ee = 1.0 # weight of end-effector task
w_posture = 1e-3 # weight of joint posture task
w_torque_bounds = 1.0 # weight of the torque bounds
w_joint_bounds = 1.0
kp_ee = 5.0 # proportional gain of end-effector constraint
kp_posture = 1.0 # proportional gain of joint posture task
tau_max_scaling = 0.4 # scaling factor of torque bounds
v_max_scaling = 0.4
ee_frame_name = "ee_fixed_joint" # end-effector frame name
ee_task_mask = np.array([1., 1, 1, 0, 0, 0])
PRINT_N = 500 # print every PRINT_N time steps
DISPLAY_N = 20 # update robot configuration in viwewer every DISPLAY_N time steps
CAMERA_TRANSFORM = [2.582354784011841, 1.620774507522583, 1.0674564838409424, 0.2770655155181885, 0.5401807427406311, 0.6969326734542847, 0.3817386031150818]
SPHERE_RADIUS = 0.03
REF_SPHERE_RADIUS = 0.03
EE_SPHERE_COLOR = (1, 0.5, 0, 0.5)
EE_REF_SPHERE_COLOR = (1, 0, 0, 0.5)
from example_robot_data.robots_loader import getModelPath
urdf = "/ur_description/urdf/ur5_robot.urdf"
path = getModelPath(urdf)
urdf = path+urdf
| 37.2
| 157
| 0.626882
|
import numpy as np
import os
np.set_printoptions(precision=3, linewidth=200, suppress=True)
LINE_WIDTH = 60
N_SIMULATION = 4000
dt = 0.002
q0 = np.array([ 0. , -1.0, 0.7, 0. , 0. , 0. ])
amp = np.array([0*0.02, 0.1, 0.10])
phi = np.array([0.0, 0.5*np.pi, 0.0])
two_pi_f = 1.4*2*np.pi*np.array([1.0, 0.5, 0.5])
offset = np.array([0.0, 0.0, 0.0])
w_ee = 1.0
w_posture = 1e-3
w_torque_bounds = 1.0
w_joint_bounds = 1.0
kp_ee = 5.0
kp_posture = 1.0
tau_max_scaling = 0.4
v_max_scaling = 0.4
ee_frame_name = "ee_fixed_joint"
ee_task_mask = np.array([1., 1, 1, 0, 0, 0])
PRINT_N = 500
DISPLAY_N = 20
CAMERA_TRANSFORM = [2.582354784011841, 1.620774507522583, 1.0674564838409424, 0.2770655155181885, 0.5401807427406311, 0.6969326734542847, 0.3817386031150818]
SPHERE_RADIUS = 0.03
REF_SPHERE_RADIUS = 0.03
EE_SPHERE_COLOR = (1, 0.5, 0, 0.5)
EE_REF_SPHERE_COLOR = (1, 0, 0, 0.5)
from example_robot_data.robots_loader import getModelPath
urdf = "/ur_description/urdf/ur5_robot.urdf"
path = getModelPath(urdf)
urdf = path+urdf
| true
| true
|
79088261082774e4ab99fa5b9dbbf114d5a65826
| 5,073
|
py
|
Python
|
moneyforecast/tests/records/fixtures.py
|
curaloucura/money-forecast
|
b6032028106772526a96c3329c8af66f9b9d0930
|
[
"Unlicense"
] | 7
|
2015-02-01T21:44:23.000Z
|
2021-04-19T07:45:06.000Z
|
moneyforecast/tests/records/fixtures.py
|
curaloucura/money-forecast
|
b6032028106772526a96c3329c8af66f9b9d0930
|
[
"Unlicense"
] | null | null | null |
moneyforecast/tests/records/fixtures.py
|
curaloucura/money-forecast
|
b6032028106772526a96c3329c8af66f9b9d0930
|
[
"Unlicense"
] | 4
|
2015-06-27T15:05:06.000Z
|
2020-06-14T05:14:43.000Z
|
import pytest
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from records.models import (
Category, Record, Budget, OUTCOME, INCOME, SAVINGS, tmz)
from records.month_control import MonthControl, MonthControlWithBudget
@pytest.fixture
def current_date():
today = date.today()
today_datetime = datetime(
day=today.day, month=today.month, year=today.year)
return tmz(today_datetime)
@pytest.fixture
def future_date(current_date):
date = current_date+relativedelta(days=1)
return date
@pytest.fixture
def day_of_month(future_date):
return future_date.day
@pytest.fixture
def start_of_recurrence(future_date):
"""
Date object representing the first day of a record with recurrence
"""
return future_date
@pytest.fixture
def end_of_recurrence(future_date):
"""
Return a date which is used to determine the end month the recurrence
should occur
"""
date = future_date+relativedelta(months=6)
return date
@pytest.fixture
def next_month(current_date):
date = current_date+relativedelta(months=1)
return date
@pytest.fixture
def next_month_future(future_date):
date = future_date+relativedelta(months=1)
return date
@pytest.fixture
def infinite_future_date(current_date):
date = current_date+relativedelta(years=360)
return date
@pytest.fixture
def month_control(user, current_date):
"""
Return a MonthControl object for the current date.
Important: currently any Record fixture should come before month_control
"""
month_control = MonthControl(
user, current_date.month, current_date.year, cache={})
return month_control
@pytest.fixture
def month_control_with_budget(user, current_date):
"""
Return a MonthControlWithBudget object for the current date.
Important: currently any Record fixture should come before month_control
"""
month_control = MonthControlWithBudget(
user, current_date.month, current_date.year, cache={})
return month_control
def _user(username='test_user'):
raw_password = "fake"
new_user = User.objects.create_user(
username=username, email="a@b.com", password=raw_password)
setattr(new_user, "raw_password", raw_password)
return new_user
@pytest.fixture
def user():
return _user()
@pytest.fixture
def another_user():
return _user('another_user')
@pytest.fixture
def outcome(user):
"""
Main category of outcome type
"""
category = Category.objects.create(
name="outcome", type_category=OUTCOME, user=user)
return category
@pytest.fixture
def income(user):
"""
Main category of income type
"""
category = Category.objects.create(
name="income", type_category=INCOME, user=user)
return category
@pytest.fixture
def savings(user):
"""
Category of Savings
"""
category = Category.objects.create(
name="savings", type_category=SAVINGS, user=user)
return category
@pytest.fixture
def outcome_current(user, outcome, current_date):
"""
Record of type Outcome set to today (current date)
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def outcome_future(user, outcome, future_date):
"""
Record of type Outcome set in the future
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=future_date, user=user)
return record
@pytest.fixture
def outcome_recurrent(user, outcome, start_of_recurrence):
"""
Record of type Outcome set in the future with a day of the month set
to create a recurring record
This fixture should not be used with outcome_recurrent_limited and
outcome_with_parent since they change the instance of this own record
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=start_of_recurrence, user=user,
day_of_month=start_of_recurrence.day)
return record
@pytest.fixture
def outcome_recurrent_limited(user, outcome_recurrent, end_of_recurrence):
"""
Record of type Outcome set in the future with a recurring day of the month
set and limited to a certain time
"""
outcome_recurrent.end_date = end_of_recurrence
outcome_recurrent.save()
return outcome_recurrent
@pytest.fixture
def outcome_with_parent(
outcome_future, outcome_recurrent, next_month_future):
outcome_future.parent = outcome_recurrent
outcome_future.start_date = next_month_future
outcome_future.save()
return outcome_future
@pytest.fixture
def savings_current(request, user, savings, current_date):
"""
Record of type Outcome set in the future
"""
record = Record.objects.create(
category=savings, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def budget(user):
budget = Budget.objects.create(user=user, amount=1)
return budget
| 24.389423
| 78
| 0.723044
|
import pytest
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from records.models import (
Category, Record, Budget, OUTCOME, INCOME, SAVINGS, tmz)
from records.month_control import MonthControl, MonthControlWithBudget
@pytest.fixture
def current_date():
today = date.today()
today_datetime = datetime(
day=today.day, month=today.month, year=today.year)
return tmz(today_datetime)
@pytest.fixture
def future_date(current_date):
date = current_date+relativedelta(days=1)
return date
@pytest.fixture
def day_of_month(future_date):
return future_date.day
@pytest.fixture
def start_of_recurrence(future_date):
return future_date
@pytest.fixture
def end_of_recurrence(future_date):
date = future_date+relativedelta(months=6)
return date
@pytest.fixture
def next_month(current_date):
date = current_date+relativedelta(months=1)
return date
@pytest.fixture
def next_month_future(future_date):
date = future_date+relativedelta(months=1)
return date
@pytest.fixture
def infinite_future_date(current_date):
date = current_date+relativedelta(years=360)
return date
@pytest.fixture
def month_control(user, current_date):
month_control = MonthControl(
user, current_date.month, current_date.year, cache={})
return month_control
@pytest.fixture
def month_control_with_budget(user, current_date):
month_control = MonthControlWithBudget(
user, current_date.month, current_date.year, cache={})
return month_control
def _user(username='test_user'):
raw_password = "fake"
new_user = User.objects.create_user(
username=username, email="a@b.com", password=raw_password)
setattr(new_user, "raw_password", raw_password)
return new_user
@pytest.fixture
def user():
return _user()
@pytest.fixture
def another_user():
return _user('another_user')
@pytest.fixture
def outcome(user):
category = Category.objects.create(
name="outcome", type_category=OUTCOME, user=user)
return category
@pytest.fixture
def income(user):
category = Category.objects.create(
name="income", type_category=INCOME, user=user)
return category
@pytest.fixture
def savings(user):
category = Category.objects.create(
name="savings", type_category=SAVINGS, user=user)
return category
@pytest.fixture
def outcome_current(user, outcome, current_date):
record = Record.objects.create(
category=outcome, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def outcome_future(user, outcome, future_date):
record = Record.objects.create(
category=outcome, amount=1, start_date=future_date, user=user)
return record
@pytest.fixture
def outcome_recurrent(user, outcome, start_of_recurrence):
record = Record.objects.create(
category=outcome, amount=1, start_date=start_of_recurrence, user=user,
day_of_month=start_of_recurrence.day)
return record
@pytest.fixture
def outcome_recurrent_limited(user, outcome_recurrent, end_of_recurrence):
outcome_recurrent.end_date = end_of_recurrence
outcome_recurrent.save()
return outcome_recurrent
@pytest.fixture
def outcome_with_parent(
outcome_future, outcome_recurrent, next_month_future):
outcome_future.parent = outcome_recurrent
outcome_future.start_date = next_month_future
outcome_future.save()
return outcome_future
@pytest.fixture
def savings_current(request, user, savings, current_date):
record = Record.objects.create(
category=savings, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def budget(user):
budget = Budget.objects.create(user=user, amount=1)
return budget
| true
| true
|
790882ca2f71e387f57bad9561d8c48b85ea3f06
| 14,031
|
py
|
Python
|
pyregion/mpl_helper.py
|
keflavich/pyregion
|
1ed46731eedffcb52910b0574b2a4e7a8cc99a7d
|
[
"MIT"
] | null | null | null |
pyregion/mpl_helper.py
|
keflavich/pyregion
|
1ed46731eedffcb52910b0574b2a4e7a8cc99a7d
|
[
"MIT"
] | null | null | null |
pyregion/mpl_helper.py
|
keflavich/pyregion
|
1ed46731eedffcb52910b0574b2a4e7a8cc99a7d
|
[
"MIT"
] | null | null | null |
import copy
import numpy as np
from math import cos, sin, pi, atan2
import warnings
import matplotlib.patches as mpatches
from matplotlib.path import Path
from matplotlib.lines import Line2D
from matplotlib.transforms import Affine2D, Bbox, IdentityTransform
from matplotlib.text import Annotation
def rotated_polygon(xy, ox, oy, angle):
# angle in degree
theta = angle / 180. * pi
st = sin(theta)
ct = cos(theta)
xy = np.asarray(xy, dtype="d")
x, y = xy[:, 0], xy[:, 1]
x1 = x - ox
y1 = y - oy
x2 = ct * x1 + -st * y1
y2 = st * x1 + ct * y1
xp = x2 + ox
yp = y2 + oy
return np.hstack((xp.reshape((-1, 1)), yp.reshape((-1, 1))))
# sss3 = [s1[0] for s1 in sss2 if isinstance(s1[0], parser_ds9.Shape)]
_point_type_dict = dict(circle="o",
box="s",
diamond="D",
x="x",
cross="+",
arrow="^",
boxcircle="*")
_ds9_to_mpl_colormap = dict(green="lime",
)
def properties_func_default(shape, saved_attrs):
attr_list = copy.copy(shape.attr[0])
attr_dict = copy.copy(shape.attr[1])
attr_list.extend(saved_attrs[0])
attr_dict.update(saved_attrs[1])
color = attr_dict.get("color", None)
color = _ds9_to_mpl_colormap.get(color, color)
if shape.name == "text":
kwargs = dict(color=color,
rotation=attr_dict.get("textangle", 0),
)
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs["fontsize"] = fontsize
elif shape.name == "point":
point_attrs = attr_dict.get("point", "boxcircle").split()
if len(point_attrs) == 1:
point_type = point_attrs[0]
point_size = 11
elif len(point_attrs) > 1:
point_type = point_attrs[0]
point_size = int(point_attrs[1])
marker = _point_type_dict.get(point_type, "o")
kwargs = dict(markeredgecolor=color,
markerfacecolor="none",
marker=marker,
markeredgewidth=int(attr_dict.get("width", 1)),
markersize=point_size
)
elif shape.name in ["line", "vector"]:
fontsize = 10 # default font size
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs = dict(color=color,
linewidth=int(attr_dict.get("width", 1)),
mutation_scale=fontsize,
)
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
else:
kwargs = dict(edgecolor=color,
linewidth=int(attr_dict.get("width", 1)),
facecolor="none"
)
if "background" in attr_list:
kwargs["linestyle"] = "dashed"
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
if shape.exclude:
kwargs["hatch"] = "/"
return kwargs
def _get_text(txt, x, y, dx, dy, ha="center", va="center", **kwargs):
if "color" in kwargs:
textcolor = kwargs["color"]
del kwargs["color"]
elif "markeredgecolor" in kwargs:
textcolor = kwargs["markeredgecolor"]
else:
import matplotlib as mpl
textcolor = mpl.rcParams['text.color']
ann = Annotation(txt, (x, y), xytext=(dx, dy),
xycoords='data',
textcoords="offset points",
color=textcolor,
ha=ha, va=va,
**kwargs)
ann.set_transform(IdentityTransform())
return ann
def as_mpl_artists(shape_list,
properties_func=None,
text_offset=5.0, origin=1):
"""
Converts a region list to a list of patches and a list of artists.
Optional Keywords:
[ text_offset ] - If there is text associated with the regions, add
some vertical offset (in pixels) to the text so that it doesn't overlap
with the regions.
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, set origin=0.
"""
patch_list = []
artist_list = []
if properties_func is None:
properties_func = properties_func_default
# properties for continued(? multiline?) regions
saved_attrs = None
for shape in shape_list:
patches = []
if saved_attrs is None:
_attrs = [], {}
else:
_attrs = copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1])
kwargs = properties_func(shape, _attrs)
if shape.name == "composite":
saved_attrs = shape.attr
continue
if saved_attrs is None and shape.continued:
saved_attrs = shape.attr
# elif (shape.name in shape.attr[1]):
# if (shape.attr[1][shape.name] != "ignore"):
# saved_attrs = shape.attr
if not shape.continued:
saved_attrs = None
# text associated with the shape
txt = shape.attr[1].get("text")
if shape.name == "polygon":
xy = np.array(shape.coord_list)
xy.shape = -1, 2
# -1 for change origin to 0,0
patches = [mpatches.Polygon(xy - origin, closed=True, **kwargs)]
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
_box = np.array([[-w / 2., -h / 2.],
[-w / 2., h / 2.],
[w / 2., h / 2.],
[w / 2., -h / 2.]])
box = _box + [xc, yc]
rotbox = rotated_polygon(box, xc, yc, rot)
patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
patches = [mpatches.Ellipse((xc, yc), 2 * maj, 2 * min,
angle=angle, **kwargs)
for maj, min in zip(maj_list, min_list)]
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
patches = [mpatches.Ellipse((xc, yc), 2 * r, 2 * r, **kwargs) for r in r_list]
elif shape.name == "circle":
xc, yc, major = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Ellipse((xc, yc), 2 * major, 2 * major, angle=0, **kwargs)]
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in np.linspace(r1, r2, rn + 1)]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in [r1, r2]]
for aa in [a1, a2]:
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
# mpl takes angle a1, a2 as angle as in circle before
# transformation to ellipse.
x1, y1 = cos(a1 / 180. * pi), sin(a1 / 180. * pi) * r11 / r12
x2, y2 = cos(a2 / 180. * pi), sin(a2 / 180. * pi) * r11 / r12
a1, a2 = atan2(y1, x1) / pi * 180., atan2(y2, x2) / pi * 180.
patches = [mpatches.Arc((xc, yc), rr1 * 2, rr2 * 2,
angle=angle, theta1=a1, theta2=a2,
**kwargs)
for rr1, rr2 in zip(np.linspace(r11, r21, rn + 1),
np.linspace(r12, r22, rn + 1))]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r11, r21]) * np.cos(aa / 180. * np.pi)
yy = np.array([r11, r21]) * np.sin(aa / 180. * np.pi)
p = Path(np.transpose([xx, yy]))
tr = Affine2D().scale(1, r12 / r11).rotate_deg(angle).translate(xc, yc)
p2 = tr.transform_path(p)
patches.append(mpatches.PathPatch(p2, **kwargs))
elif shape.name == "text":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
if txt:
_t = _get_text(txt, xc, yc, 0, 0, **kwargs)
artist_list.append(_t)
elif shape.name == "point":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
artist_list.append(Line2D([xc], [yc],
**kwargs))
if txt:
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
_t = _get_text(txt, xc, yc, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
elif shape.name in ["line", "vector"]:
if shape.name == "line":
x1, y1, x2, y2 = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1, x2, y2 = x1 - origin, y1 - origin, x2 - origin, y2 - origin
a1, a2 = shape.attr[1].get("line", "0 0").strip().split()[:2]
arrowstyle = "-"
if int(a1):
arrowstyle = "<" + arrowstyle
if int(a2):
arrowstyle = arrowstyle + ">"
else: # shape.name == "vector"
x1, y1, l, a = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1 = x1 - origin, y1 - origin
x2, y2 = x1 + l * np.cos(a / 180. * np.pi), y1 + l * np.sin(a / 180. * np.pi)
v1 = int(shape.attr[1].get("vector", "0").strip())
if v1:
arrowstyle = "->"
else:
arrowstyle = "-"
patches = [mpatches.FancyArrowPatch(posA=(x1, y1),
posB=(x2, y2),
arrowstyle=arrowstyle,
arrow_transmuter=None,
connectionstyle="arc3",
patchA=None, patchB=None,
shrinkA=0, shrinkB=0,
connector=None,
**kwargs)]
else:
warnings.warn("'as_mpl_artists' does not know how to convert {0} "
"to mpl artist".format(shape.name))
patch_list.extend(patches)
if txt and patches:
# the text associated with a shape uses different
# matplotlib keywords than the shape itself for, e.g.,
# color
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
# calculate the text position
_bb = [p.get_window_extent() for p in patches]
# this is to work around backward-incompatible change made
# in matplotlib 1.2. This change is later reverted so only
# some versions are affected. With affected version of
# matplotlib, get_window_extent method calls get_transform
# method which sets the _transformSet to True, which is
# not desired.
for p in patches:
p._transformSet = False
_bbox = Bbox.union(_bb)
x0, y0, x1, y1 = _bbox.extents
xc = .5 * (x0 + x1)
_t = _get_text(txt, xc, y1, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
return patch_list, artist_list
| 36.069409
| 93
| 0.480721
|
import copy
import numpy as np
from math import cos, sin, pi, atan2
import warnings
import matplotlib.patches as mpatches
from matplotlib.path import Path
from matplotlib.lines import Line2D
from matplotlib.transforms import Affine2D, Bbox, IdentityTransform
from matplotlib.text import Annotation
def rotated_polygon(xy, ox, oy, angle):
theta = angle / 180. * pi
st = sin(theta)
ct = cos(theta)
xy = np.asarray(xy, dtype="d")
x, y = xy[:, 0], xy[:, 1]
x1 = x - ox
y1 = y - oy
x2 = ct * x1 + -st * y1
y2 = st * x1 + ct * y1
xp = x2 + ox
yp = y2 + oy
return np.hstack((xp.reshape((-1, 1)), yp.reshape((-1, 1))))
_point_type_dict = dict(circle="o",
box="s",
diamond="D",
x="x",
cross="+",
arrow="^",
boxcircle="*")
_ds9_to_mpl_colormap = dict(green="lime",
)
def properties_func_default(shape, saved_attrs):
attr_list = copy.copy(shape.attr[0])
attr_dict = copy.copy(shape.attr[1])
attr_list.extend(saved_attrs[0])
attr_dict.update(saved_attrs[1])
color = attr_dict.get("color", None)
color = _ds9_to_mpl_colormap.get(color, color)
if shape.name == "text":
kwargs = dict(color=color,
rotation=attr_dict.get("textangle", 0),
)
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs["fontsize"] = fontsize
elif shape.name == "point":
point_attrs = attr_dict.get("point", "boxcircle").split()
if len(point_attrs) == 1:
point_type = point_attrs[0]
point_size = 11
elif len(point_attrs) > 1:
point_type = point_attrs[0]
point_size = int(point_attrs[1])
marker = _point_type_dict.get(point_type, "o")
kwargs = dict(markeredgecolor=color,
markerfacecolor="none",
marker=marker,
markeredgewidth=int(attr_dict.get("width", 1)),
markersize=point_size
)
elif shape.name in ["line", "vector"]:
fontsize = 10
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs = dict(color=color,
linewidth=int(attr_dict.get("width", 1)),
mutation_scale=fontsize,
)
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
else:
kwargs = dict(edgecolor=color,
linewidth=int(attr_dict.get("width", 1)),
facecolor="none"
)
if "background" in attr_list:
kwargs["linestyle"] = "dashed"
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
if shape.exclude:
kwargs["hatch"] = "/"
return kwargs
def _get_text(txt, x, y, dx, dy, ha="center", va="center", **kwargs):
if "color" in kwargs:
textcolor = kwargs["color"]
del kwargs["color"]
elif "markeredgecolor" in kwargs:
textcolor = kwargs["markeredgecolor"]
else:
import matplotlib as mpl
textcolor = mpl.rcParams['text.color']
ann = Annotation(txt, (x, y), xytext=(dx, dy),
xycoords='data',
textcoords="offset points",
color=textcolor,
ha=ha, va=va,
**kwargs)
ann.set_transform(IdentityTransform())
return ann
def as_mpl_artists(shape_list,
properties_func=None,
text_offset=5.0, origin=1):
patch_list = []
artist_list = []
if properties_func is None:
properties_func = properties_func_default
saved_attrs = None
for shape in shape_list:
patches = []
if saved_attrs is None:
_attrs = [], {}
else:
_attrs = copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1])
kwargs = properties_func(shape, _attrs)
if shape.name == "composite":
saved_attrs = shape.attr
continue
if saved_attrs is None and shape.continued:
saved_attrs = shape.attr
if not shape.continued:
saved_attrs = None
txt = shape.attr[1].get("text")
if shape.name == "polygon":
xy = np.array(shape.coord_list)
xy.shape = -1, 2
patches = [mpatches.Polygon(xy - origin, closed=True, **kwargs)]
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
xc, yc = xc - origin, yc - origin
_box = np.array([[-w / 2., -h / 2.],
[-w / 2., h / 2.],
[w / 2., h / 2.],
[w / 2., -h / 2.]])
box = _box + [xc, yc]
rotbox = rotated_polygon(box, xc, yc, rot)
patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
patches = [mpatches.Ellipse((xc, yc), 2 * maj, 2 * min,
angle=angle, **kwargs)
for maj, min in zip(maj_list, min_list)]
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
patches = [mpatches.Ellipse((xc, yc), 2 * r, 2 * r, **kwargs) for r in r_list]
elif shape.name == "circle":
xc, yc, major = shape.coord_list
xc, yc = xc - origin, yc - origin
patches = [mpatches.Ellipse((xc, yc), 2 * major, 2 * major, angle=0, **kwargs)]
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in np.linspace(r1, r2, rn + 1)]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in [r1, r2]]
for aa in [a1, a2]:
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
xc, yc = xc - origin, yc - origin
x1, y1 = cos(a1 / 180. * pi), sin(a1 / 180. * pi) * r11 / r12
x2, y2 = cos(a2 / 180. * pi), sin(a2 / 180. * pi) * r11 / r12
a1, a2 = atan2(y1, x1) / pi * 180., atan2(y2, x2) / pi * 180.
patches = [mpatches.Arc((xc, yc), rr1 * 2, rr2 * 2,
angle=angle, theta1=a1, theta2=a2,
**kwargs)
for rr1, rr2 in zip(np.linspace(r11, r21, rn + 1),
np.linspace(r12, r22, rn + 1))]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r11, r21]) * np.cos(aa / 180. * np.pi)
yy = np.array([r11, r21]) * np.sin(aa / 180. * np.pi)
p = Path(np.transpose([xx, yy]))
tr = Affine2D().scale(1, r12 / r11).rotate_deg(angle).translate(xc, yc)
p2 = tr.transform_path(p)
patches.append(mpatches.PathPatch(p2, **kwargs))
elif shape.name == "text":
xc, yc = shape.coord_list[:2]
xc, yc = xc - origin, yc - origin
if txt:
_t = _get_text(txt, xc, yc, 0, 0, **kwargs)
artist_list.append(_t)
elif shape.name == "point":
xc, yc = shape.coord_list[:2]
xc, yc = xc - origin, yc - origin
artist_list.append(Line2D([xc], [yc],
**kwargs))
if txt:
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
_t = _get_text(txt, xc, yc, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
elif shape.name in ["line", "vector"]:
if shape.name == "line":
x1, y1, x2, y2 = shape.coord_list[:4]
x1, y1, x2, y2 = x1 - origin, y1 - origin, x2 - origin, y2 - origin
a1, a2 = shape.attr[1].get("line", "0 0").strip().split()[:2]
arrowstyle = "-"
if int(a1):
arrowstyle = "<" + arrowstyle
if int(a2):
arrowstyle = arrowstyle + ">"
else:
x1, y1, l, a = shape.coord_list[:4]
x1, y1 = x1 - origin, y1 - origin
x2, y2 = x1 + l * np.cos(a / 180. * np.pi), y1 + l * np.sin(a / 180. * np.pi)
v1 = int(shape.attr[1].get("vector", "0").strip())
if v1:
arrowstyle = "->"
else:
arrowstyle = "-"
patches = [mpatches.FancyArrowPatch(posA=(x1, y1),
posB=(x2, y2),
arrowstyle=arrowstyle,
arrow_transmuter=None,
connectionstyle="arc3",
patchA=None, patchB=None,
shrinkA=0, shrinkB=0,
connector=None,
**kwargs)]
else:
warnings.warn("'as_mpl_artists' does not know how to convert {0} "
"to mpl artist".format(shape.name))
patch_list.extend(patches)
if txt and patches:
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
_bb = [p.get_window_extent() for p in patches]
for p in patches:
p._transformSet = False
_bbox = Bbox.union(_bb)
x0, y0, x1, y1 = _bbox.extents
xc = .5 * (x0 + x1)
_t = _get_text(txt, xc, y1, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
return patch_list, artist_list
| true
| true
|
79088521341c908e9acfbace2f7914850759e997
| 3,642
|
py
|
Python
|
code_examples/tensorflow/kernel_benchmarks/dense.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
code_examples/tensorflow/kernel_benchmarks/dense.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
code_examples/tensorflow/kernel_benchmarks/dense.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Benchmark a single Dense layer with no host/device data transfers.
The Items/sec reported at the end of the benchmark is based on wall time.
Run with -h or --help for options.
"""
import inspect
import os
import sys
import tensorflow as tf
from tensorflow.python.ipu import utils
def dense(opts, inputs):
# Add ReLU activation function if appropriate option is set
if opts.activation:
return tf.layers.dense(units=opts.size, inputs=inputs, activation=tf.nn.relu)
else:
return tf.layers.dense(units=opts.size, inputs=inputs)
def inputs(opts, index):
value = tf.cast(index, tf.float16)
return {
"inputs": tf.broadcast_to(value, [opts.batch_size, opts.size]),
}
def graph_builder(opts, inputs):
output = dense(opts, inputs["inputs"])
if opts.train:
# Loss is the mean across output matrix:
loss = tf.reduce_mean(output)
optimiser = tf.train.GradientDescentOptimizer(0.01)
with tf.variable_scope("train", reuse=tf.AUTO_REUSE):
# We need to ensure that the train op is executed as part of
# the benchmarking loop by maintaining a step variable and
# forcing a control dependency between it and the train op:
global_step = tf.get_variable(
"step_control", dtype=tf.int32, shape=[])
grads_and_vars = optimiser.compute_gradients(
loss, tf.trainable_variables())
train = optimiser.apply_gradients(grads_and_vars, global_step)
with tf.control_dependencies([train]):
global_step = tf.identity(global_step)
return global_step
return output
def initializer():
utils.move_variable_initialization_to_cpu()
return tf.global_variables_initializer()
def add_args(parser):
parser.add_argument("--batch-size", default=32, type=int,
help="Number of inputs in a mini-batch")
parser.add_argument("--size", default=1024, type=int,
help="Dense layer size")
parser.add_argument("--train", action='store_true', dest='train',
help="Compute loss and optimization pass")
parser.add_argument("--include-activation", action='store_true', dest='activation',
help="Include ReLU activation (otherwise linear/no activation")
parser.set_defaults(train=False, batches_per_step=5000, steps=5)
return parser
def iteration_report(opts, time):
return "{:5f} items/sec".format(opts.batch_size * opts.batches_per_step / time)
if __name__ == '__main__':
# Add benchmark module to path
cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
sys.path.insert(1, os.path.join(cwd, '..', '..', '..', 'utils',
'benchmarks', 'tensorflow'))
import benchmark
module = benchmark.Benchmark(
graph_builder,
inputs,
initializer,
add_args,
iteration_report
)
options = benchmark.parse_opts(module, False)
if options.shards > 0:
raise NotImplementedError(
"--shards option has not been implemented with this example")
# Log Benchmark Message
print(" Dense layer {} Synthetic benchmark.\n"
" Batch size {}.\n"
" Batches per Step {}.\n"
" Dense size {}.\n"
.format(
"Training" if options.train else "Inference",
options.batch_size,
options.batches_per_step if not options.cycle_report else "n/a",
options.size))
benchmark.run(module, options)
| 33.109091
| 87
| 0.635365
|
import inspect
import os
import sys
import tensorflow as tf
from tensorflow.python.ipu import utils
def dense(opts, inputs):
if opts.activation:
return tf.layers.dense(units=opts.size, inputs=inputs, activation=tf.nn.relu)
else:
return tf.layers.dense(units=opts.size, inputs=inputs)
def inputs(opts, index):
value = tf.cast(index, tf.float16)
return {
"inputs": tf.broadcast_to(value, [opts.batch_size, opts.size]),
}
def graph_builder(opts, inputs):
output = dense(opts, inputs["inputs"])
if opts.train:
loss = tf.reduce_mean(output)
optimiser = tf.train.GradientDescentOptimizer(0.01)
with tf.variable_scope("train", reuse=tf.AUTO_REUSE):
global_step = tf.get_variable(
"step_control", dtype=tf.int32, shape=[])
grads_and_vars = optimiser.compute_gradients(
loss, tf.trainable_variables())
train = optimiser.apply_gradients(grads_and_vars, global_step)
with tf.control_dependencies([train]):
global_step = tf.identity(global_step)
return global_step
return output
def initializer():
utils.move_variable_initialization_to_cpu()
return tf.global_variables_initializer()
def add_args(parser):
parser.add_argument("--batch-size", default=32, type=int,
help="Number of inputs in a mini-batch")
parser.add_argument("--size", default=1024, type=int,
help="Dense layer size")
parser.add_argument("--train", action='store_true', dest='train',
help="Compute loss and optimization pass")
parser.add_argument("--include-activation", action='store_true', dest='activation',
help="Include ReLU activation (otherwise linear/no activation")
parser.set_defaults(train=False, batches_per_step=5000, steps=5)
return parser
def iteration_report(opts, time):
return "{:5f} items/sec".format(opts.batch_size * opts.batches_per_step / time)
if __name__ == '__main__':
cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
sys.path.insert(1, os.path.join(cwd, '..', '..', '..', 'utils',
'benchmarks', 'tensorflow'))
import benchmark
module = benchmark.Benchmark(
graph_builder,
inputs,
initializer,
add_args,
iteration_report
)
options = benchmark.parse_opts(module, False)
if options.shards > 0:
raise NotImplementedError(
"--shards option has not been implemented with this example")
print(" Dense layer {} Synthetic benchmark.\n"
" Batch size {}.\n"
" Batches per Step {}.\n"
" Dense size {}.\n"
.format(
"Training" if options.train else "Inference",
options.batch_size,
options.batches_per_step if not options.cycle_report else "n/a",
options.size))
benchmark.run(module, options)
| true
| true
|
7908855d61873f2dcaabb241bac1d57344c71570
| 7,430
|
py
|
Python
|
demo/search/src/eval/evaluation.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | 32
|
2022-03-30T10:24:00.000Z
|
2022-03-31T16:19:15.000Z
|
demo/search/src/eval/evaluation.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | null | null | null |
demo/search/src/eval/evaluation.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | 3
|
2022-03-30T10:28:57.000Z
|
2022-03-30T11:37:39.000Z
|
#
# Copyright 2022 DMetaSoul
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
"""
import sys
import json
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
qids_to_relevant_passageids = {}
for line in f:
try:
sample = json.loads(line.strip())
qid = sample["question_id"]
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
for answer_paragraph in sample["answer_paragraphs"]:
qids_to_relevant_passageids[qid].append(answer_paragraph["paragraph_id"])
except:
raise IOError('\"%s\" is not valid format' % line)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
qid_to_ranked_candidate_passages = {}
try:
preds = json.load(f)
for qid in preds.keys():
tmp = [0] * 50
qid_to_ranked_candidate_passages[qid] = tmp
for rank, pid in enumerate(preds[qid][:50]):
qid_to_ranked_candidate_passages[qid][rank] = pid
except:
raise IOError('Submitted file is not valid format')
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set(
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids - set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1.0 / (i + 1)
ranking.pop()
ranking.append(i + 1)
break
for i, pid in enumerate(candidate_pid):
if pid in target_pid:
recall_q_all.add(qid)
if i < 50:
recall_q_top50.add(qid)
if i == 0:
recall_q_top1.add(qid)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR / len(qids_to_relevant_passageids)
recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids)
recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids)
recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)
all_scores['MRR@10'] = MRR
all_scores["recall@1"] = recall_top1
all_scores["recall@50"] = recall_top50
# all_scores["recall@all"] = recall_all
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python result_eval.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
print('Usage: result_eval.py <reference ranking> <candidate ranking>')
exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
result = dict()
for metric in sorted(metrics):
result[metric] = metrics[metric]
result_json = json.dumps(result)
print(result_json)
if __name__ == '__main__':
main()
| 37.715736
| 161
| 0.689367
|
import sys
import json
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
qids_to_relevant_passageids = {}
for line in f:
try:
sample = json.loads(line.strip())
qid = sample["question_id"]
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
for answer_paragraph in sample["answer_paragraphs"]:
qids_to_relevant_passageids[qid].append(answer_paragraph["paragraph_id"])
except:
raise IOError('\"%s\" is not valid format' % line)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
qid_to_ranked_candidate_passages = {}
try:
preds = json.load(f)
for qid in preds.keys():
tmp = [0] * 50
qid_to_ranked_candidate_passages[qid] = tmp
for rank, pid in enumerate(preds[qid][:50]):
qid_to_ranked_candidate_passages[qid][rank] = pid
except:
raise IOError('Submitted file is not valid format')
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
message = ''
allowed = True
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
for qid in qids_to_ranked_candidate_passages:
duplicate_pids = set(
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids - set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1.0 / (i + 1)
ranking.pop()
ranking.append(i + 1)
break
for i, pid in enumerate(candidate_pid):
if pid in target_pid:
recall_q_all.add(qid)
if i < 50:
recall_q_top50.add(qid)
if i == 0:
recall_q_top1.add(qid)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR / len(qids_to_relevant_passageids)
recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids)
recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids)
recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)
all_scores['MRR@10'] = MRR
all_scores["recall@1"] = recall_top1
all_scores["recall@50"] = recall_top50
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
print('Usage: result_eval.py <reference ranking> <candidate ranking>')
exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
result = dict()
for metric in sorted(metrics):
result[metric] = metrics[metric]
result_json = json.dumps(result)
print(result_json)
if __name__ == '__main__':
main()
| true
| true
|
790886c21871b8643b6ad7fb41cd85f3c4945fd1
| 23,928
|
py
|
Python
|
tests/test_hgvs_transcriptmapper.py
|
jmuhlich/hgvs
|
26aba8877791b0f94f1e14a5a49c60bcdaf2e6fd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_hgvs_transcriptmapper.py
|
jmuhlich/hgvs
|
26aba8877791b0f94f1e14a5a49c60bcdaf2e6fd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_hgvs_transcriptmapper.py
|
jmuhlich/hgvs
|
26aba8877791b0f94f1e14a5a49c60bcdaf2e6fd
|
[
"Apache-2.0"
] | 3
|
2016-08-08T03:04:37.000Z
|
2020-01-16T08:56:16.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from nose.plugins.attrib import attr
import hgvs.dataproviders.uta
import hgvs.location
import hgvs.parser
from hgvs.exceptions import HGVSError
from hgvs.transcriptmapper import TranscriptMapper
@attr(tags=["quick"])
class Test_transcriptmapper(unittest.TestCase):
ref = 'GRCh37.p10'
def setUp(self):
self.hdp = hgvs.dataproviders.uta.connect()
def test_transcriptmapper_failures(self):
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='bogus', alt_ac='NM_033089.6', alt_aln_method='splign')
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='NM_033089.6', alt_ac='bogus', alt_aln_method='splign')
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='NM_000051.3', alt_ac='NC_000011.9', alt_aln_method='bogus')
def test_transcriptmapper_TranscriptMapper_LCE3C_uncertain(self):
"""Use NM_178434.2 tests to test mapping with uncertain positions"""
tx_ac = 'NM_178434.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
{'g': parser.parse_g_interval('(152573138)'), 'r': parser.parse_r_interval('(1)'), 'c': parser.parse_c_interval('(-70)')},
{'g': parser.parse_g_interval('(152573138_152573139)'), 'r': parser.parse_r_interval('(1_2)'), 'c': parser.parse_c_interval('(-70_-69)')},
# ? is not yet supported
# {'g': parser.parse_g_interval('(?_152573139)'), 'r': parser.parse_r_interval('(?_2)'), 'c': parser.parse_c_interval('(?_-69)')},
# {'g': parser.parse_g_interval('(152573138_?)'), 'r': parser.parse_r_interval('(1_?)'), 'c': parser.parse_c_interval('(-70_?)')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_LCE3C(self):
"""NM_178434.2: LCE3C single exon, strand = +1, all coordinate input/output are in HGVS"""
tx_ac = 'NM_178434.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
# 5'
{'g': parser.parse_g_interval('152573138'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-70')},
{'g': parser.parse_g_interval('152573140'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-68')},
# cds
{'g': parser.parse_g_interval('152573207'), 'r': parser.parse_r_interval('70'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('152573208'), 'r': parser.parse_r_interval('71'), 'c': parser.parse_c_interval('1')},
# 3'
{'g': parser.parse_g_interval('152573492'), 'r': parser.parse_r_interval('355'), 'c': parser.parse_c_interval('285')},
{'g': parser.parse_g_interval('152573493'), 'r': parser.parse_r_interval('356'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('152573560'), 'r': parser.parse_r_interval('423'), 'c': parser.parse_c_interval('*68')},
{'g': parser.parse_g_interval('152573562'), 'r': parser.parse_r_interval('425'), 'c': parser.parse_c_interval('*70')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_HIST3H2A(self):
"""NM_033445.2: LCE3C single exon, strand = -1, all coordinate input/output are in HGVS"""
tx_ac = 'NM_033445.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
# 3'
{'g': parser.parse_g_interval('228645560'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-42')},
{'g': parser.parse_g_interval('228645558'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-40')},
# cds
{'g': parser.parse_g_interval('228645519'), 'r': parser.parse_r_interval('42'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('228645518'), 'r': parser.parse_r_interval('43'), 'c': parser.parse_c_interval('1')},
# 5'
{'g': parser.parse_g_interval('228645126'), 'r': parser.parse_r_interval('435'), 'c': parser.parse_c_interval('393')},
{'g': parser.parse_g_interval('228645125'), 'r': parser.parse_r_interval('436'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('228645124'), 'r': parser.parse_r_interval('437'), 'c': parser.parse_c_interval('*2')},
{'g': parser.parse_g_interval('228645065'), 'r': parser.parse_r_interval('496'), 'c': parser.parse_c_interval('*61')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_LCE2B(self):
"""NM_014357.4: LCE2B, two exons, strand = +1, all coordinate input/output are in HGVS"""
tx_ac = 'NM_014357.4'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
# 5'
{'g': parser.parse_g_interval('152658599'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-54')},
{'g': parser.parse_g_interval('152658601'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-52')},
# cds
{'g': parser.parse_g_interval('152659319'), 'r': parser.parse_r_interval('54'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('152659320'), 'r': parser.parse_r_interval('55'), 'c': parser.parse_c_interval('1')},
# around end of exon 1
{'g': parser.parse_g_interval('152658632'), 'r': parser.parse_r_interval('34'), 'c': parser.parse_c_interval('-21')},
{'g': parser.parse_g_interval('152658633'), 'r': parser.parse_r_interval('34+1'), 'c': parser.parse_c_interval('-21+1')},
# span
{'g': parser.parse_g_interval('152658633_152659299'), 'r': parser.parse_r_interval('34+1_35-1'), 'c': parser.parse_c_interval('-21+1_-20-1')},
# around beginning of exon 2
{'g': parser.parse_g_interval('152659300'), 'r': parser.parse_r_interval('35'), 'c': parser.parse_c_interval('-20')},
{'g': parser.parse_g_interval('152659299'), 'r': parser.parse_r_interval('35-1'), 'c': parser.parse_c_interval('-20-1')},
# around end of exon 2
{'g': parser.parse_g_interval('152659652'), 'r': parser.parse_r_interval('387'), 'c': parser.parse_c_interval('333')},
{'g': parser.parse_g_interval('152659653'), 'r': parser.parse_r_interval('388'), 'c': parser.parse_c_interval('*1')},
# span
{'g': parser.parse_g_interval('152659651_152659654'), 'r': parser.parse_r_interval('386_389'), 'c': parser.parse_c_interval('332_*2')},
# 3'
{'g': parser.parse_g_interval('152659877'), 'r': parser.parse_r_interval('612'), 'c': parser.parse_c_interval('*225')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_PTH2(self):
"""NM_178449.3: PTH2, two exons, strand = -1, all coordinate input/output are in HGVS"""
tx_ac = 'NM_178449.3'
alt_ac = 'NC_000019.9'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
# 3'
{'g': parser.parse_g_interval('49926698'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-102')},
# cds
{'g': parser.parse_g_interval('49926597'), 'r': parser.parse_r_interval('102'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('49926596'), 'r': parser.parse_r_interval('103'), 'c': parser.parse_c_interval('1')},
# around end of exon 1
{'g': parser.parse_g_interval('49926469'), 'r': parser.parse_r_interval('230'), 'c': parser.parse_c_interval('128')},
{'g': parser.parse_g_interval('49926468'), 'r': parser.parse_r_interval('230+1'), 'c': parser.parse_c_interval('128+1')},
# span
{'g': parser.parse_g_interval('49925901_49926467'), 'r': parser.parse_r_interval('230+2_231-2'), 'c': parser.parse_c_interval('128+2_129-2')},
# around beginning of exon 2
{'g': parser.parse_g_interval('49925900'), 'r': parser.parse_r_interval('231-1'), 'c': parser.parse_c_interval('129-1')},
{'g': parser.parse_g_interval('49925899'), 'r': parser.parse_r_interval('231'), 'c': parser.parse_c_interval('129')},
# around end of exon 2
{'g': parser.parse_g_interval('49925725'), 'r': parser.parse_r_interval('405'), 'c': parser.parse_c_interval('303')},
{'g': parser.parse_g_interval('49925724'), 'r': parser.parse_r_interval('406'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('49925671'), 'r': parser.parse_r_interval('459'), 'c': parser.parse_c_interval('*54')},
]
self.run_cases(tm, test_cases)
def run_cases(self, tm, test_cases):
for test_case in test_cases:
self.assertEquals(tm.g_to_r(test_case['g']), test_case['r'])
self.assertEquals(tm.r_to_g(test_case['r']), test_case['g'])
self.assertEquals(tm.r_to_c(test_case['r']), test_case['c'])
self.assertEquals(tm.c_to_r(test_case['c']), test_case['r'])
self.assertEquals(tm.g_to_c(test_case['g']), test_case['c'])
self.assertEquals(tm.c_to_g(test_case['c']), test_case['g'])
if __name__ == '__main__':
unittest.main()
# TODO: Reintegrate older tests, especially those with indels
### harder tests ###
#def test_transcriptmapper_TranscriptMapper_1_ZCCHC3(self):
# """
# reece=> select * from uta.tx_info where ac='NM_033089.6';
# gene | strand | ac | cds_start_i | cds_end_i | descr | summary
# --------+--------+-------------+-------------+-----------+---------------------------------------+---------
# ZCCHC3 | 1 | NM_033089.6 | 24 | 1236 | zinc finger, CCHC domain containing 3 |
#
# reece=> select * from uta.tx_exons where ac='NM_033089.6';
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar |
# -------------+-----+------+-----------+---------+------------+-----------+---------+-------------+------------------------
# NM_033089.6 | 1 | 1 | 0 | 2759 | GRCh37.p10 | 278203 | 280965 | 484M3D2275M | GGAGGATGCTGGGAAGGAGGTAA
# """
# # http://tinyurl.com/mattx8u
# #
# # Around the deletion
# # http://tinyurl.com/jwt3txg
# # 687 690
# # C | C G G | C
# # \___ ___/
# # C | C
# # 484
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
# ac = 'NM_033089.6'
# tm = TranscriptMapper(self.hdp, ac, self.ref)
# cds = 24 + 1 # hgvs
# # gs, ge = genomic start/end; rs,re = rna start/end; cs, ce = cdna start/end; so, eo = start offset/end offset
# test_cases = [
# {'gs': 278204, 'ge': 278204, 'rs': 1, 're': 1, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 1-cds},
# {'gs': 278214, 'ge': 278214, 'rs': 11, 're': 11, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 11-cds, 'ce': 11-cds},
# {'gs': 278204, 'ge': 278214, 'rs': 1, 're': 11, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 11-cds},
#
# # around cds (cds can't be zero)
# {'gs': 278227, 'ge': 278227, 'rs': 24, 're': 24, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 24-cds, 'ce': 24-cds},
#
# # beyond cds add 1 due to hgvs
# {'gs': 278228, 'ge': 278228, 'rs': 25, 're': 25, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 25-cds+1, 'ce': 25-cds+1},
# {'gs': 278229, 'ge': 278229, 'rs': 26, 're': 26, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 26-cds+1, 'ce': 26-cds+1},
# {'gs': 280966, 'ge': 280966, 'rs': 2760, 're': 2760, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 2760-cds+1, 'ce': 2760-cds+1},
# {'gs': 278687, 'ge': 278687, 'rs': 484, 're': 484, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 484-cds+1, 'ce': 484-cds+1},
# {'gs': 278687, 'ge': 278688, 'rs': 484, 're': 485, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 484-cds+1, 'ce': 485-cds+1},
# {'gs': 278688, 'ge':278691, 'rs': 485, 're': 485, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 485-cds+1, 'ce': 485-cds+1},
#
# # around cds_start (24) and cds_end (1236), mindful of *coding* del (3D)
# {'gs': 278204+24, 'ge': 278204+1236, 'rs': 25, 're': 1237-3, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 25-cds+1, 'ce': 1237-cds-3+1},
# {'gs': 280956, 'ge': 280966, 'rs': 2750, 're': 2760, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 2750-cds+1, 'ce': 2760-cds+1},
# ]
# self.run_cases(tm, test_cases)
#
#def test_transcriptmapper_TranscriptMapper_2_MCL1(self):
# """
# reece=> select * from uta.tx_info where ac='NM_182763.2';
# gene | strand | ac | cds_start_i | cds_end_i | descr |
# ------+--------+-------------+-------------+-----------+-------------------------------------------------+----------------
# MCL1 | -1 | NM_182763.2 | 208 | 1024 | myeloid cell leukemia sequence 1 (BCL2-related) | This gene encod
#
# reece=> select * from uta.tx_exons where ac='NM_182763.2';
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar |
# -------------+-----+------+-----------+---------+------------+-----------+-----------+--------------+---------------------
# NM_182763.2 | 1 | 1b | 0 | 896 | GRCh37.p10 | 150551318 | 150552214 | 896M |
# NM_182763.2 | 2 | 3 | 896 | 3841 | GRCh37.p10 | 150547026 | 150549967 | 1077M4I1864M | GATGGGTTTGTGGAGTTCTT
# """
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
#
# ac = 'NM_182763.2'
# tm = TranscriptMapper(self.hdp, ac, self.ref)
# cds = 208 + 1 # hgvs
# test_cases = [
# {'gs': 150552215, 'ge': 150552215, 'rs': 1, 're': 1, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 1-cds, 'ce': 1-cds},
# {'gs': 150552214, 'ge': 150552214, 'rs': 2, 're': 2, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 2-cds, 'ce': 2-cds},
#
# # beyond cds add 1 due to hgvs
# {'gs': 150552007, 'ge': 150552007, 'rs': 209, 're': 209, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 209-cds+1, 'ce': 209-cds+1},
# {'gs': 150547027, 'ge': 150547027, 'rs': 3842, 're': 3842, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 3842-cds+1, 'ce': 3842-cds+1},
#
# #{'gs': 150549968, 'ge': 150549968, 'rs': 897, 're': 897, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150551318, 'ge': 150551318, 'rs': 897, 're': 897, 'so': 1, 'eo': 1, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150551318, 'ge': 150551319, 'rs': 897, 're': 897, 'so': 1, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150551317, 'ge': 150551318, 'rs': 897, 're': 897, 'so': 2, 'eo': 1, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150549968, 'ge': 150549969, 'rs': 897, 're': 897, 'so': 0, 'eo': -1, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150549969, 'ge': 150549970, 'rs': 897, 're': 897, 'so': -1, 'eo': -2, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
#
# # exon 2, 4nt insertion ~ r.2760
# # See http://tinyurl.com/mwegybw
# # The coords of this indel via NW alignment differ from those at NCBI, but are the same canonicalized
# # variant. Nothing to do about that short of running Splign ourselves. Test a few examples.
# {'gs': 150548892, 'ge': 150548892, 'rs': 1973, 're': 1973, 'so': 0, 'eo':0, 'd': hgvs.location.SEQ_START , 'cs': 1973-cds+1, 'ce': 1973-cds+1},
# #? {'gs': 150548891, 'ge': 150548892, 'rs': 1972, 're': 1973, 'so': 0, 'eo':0, 'd': hgvs.location.SEQ_START , 'cs': 1972-cds+1, 'ce': 1973-cds+1},
# {'gs': 150548890, 'ge': 150548892, 'rs': 1973, 're': 1979, 'so': 0, 'eo':0, 'd': hgvs.location.SEQ_START , 'cs': 1973-cds+1, 'ce': 1979-cds+1},
# ]
# self.run_cases(tm, test_cases)
#
# ## exon 2, 4nt insertion ~ r.2760
# ## See http://tinyurl.com/mwegybw
# ## The coords of this indel via NW alignment differ from those at
# ## NCBI, but are the same canonicalized variant. Nothing to do
# ## about that short of running Splign ourselves.
# #self.assertEqual(tm.r_to_g(1972, 1972), (150548891, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1973), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1974), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1975), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1976), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1977), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1978), (150548889, 150548891))
# #
# #self.assertEqual(tm.g_to_r(150548891, 150548891), (1972, 1972, 0, 0))
# #self.assertEqual(tm.g_to_r(150548890, 150548891), (1972, 1973, 0, 0))
# #self.assertEqual(tm.g_to_r(150548889, 150548891), (1972, 1978, 0, 0))
# #
# ## around cds_start (208) and cds_end (1024), mindful of *non-coding* ins (4I)
# ## i.e., we *don't* need to account for the 4nt insertion here
# #self.assertEquals(tm.r_to_c(208, 1024), (0, 1024 - 208, 0, 0))
# #self.assertEquals(tm.c_to_r(0, 1024 - 208), (208, 1024, 0, 0))
# #self.assertEquals(tm.g_to_c(150552214 - 208, 150552214 - 208), (0, 0, 0, 0))
# #self.assertEquals(tm.c_to_g(0, 0), (150552214 - 208, 150552214 - 208))
# ## cds_end is in 2nd exon
# #self.assertEquals(tm.g_to_c(150549967 - (1024 - 896), 150549967 - (1024 - 896)), (1024 - 208, 1024 - 208, 0, 0))
# #self.assertEquals(tm.c_to_g(1024 - 208, 1024 - 208), (150549967 - (1024 - 896), 150549967 - (1024 - 896)))
#
#
#def test_transcriptmapper_TranscriptMapper_3_IFI27L1(self):
# """
# #reece=> select * from uta.tx_info where ac='NM_145249.2';
# # gene | chr | strand | ac | cds_start_i | cds_end_i | descr | summary
# #---------+-----+--------+-------------+-------------+-----------+-----------------------------------------------+---------
# # IFI27L1 | 14 | 1 | NM_145249.2 | 254 | 569 | interferon, alpha-inducible protein 27-like 1 |
# #(1 row)
# # reece=>select * from uta.tx_exons where ac = 'NM_145249.2';
# #
# # ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | g_cigar | g_seq_a | t_seq_a
# # -------------+-----+------+-----------+---------+------------+-----------+----------+---------+---------+---------
# # NM_145249.2 | 1 | 1 | 0 | 157 | GRCh37.p10 | 94547638 | 94547795 | 157M | |
# # NM_145249.2 | 2 | 2a | 157 | 282 | GRCh37.p10 | 94563186 | 94563311 | 125M | |
# # NM_145249.2 | 3 | 3 | 282 | 315 | GRCh37.p10 | 94567084 | 94567117 | 33M | |
# # NM_145249.2 | 4 | 4 | 315 | 477 | GRCh37.p10 | 94568159 | 94568321 | 162M | |
# # NM_145249.2 | 5 | 5 | 477 | 715 | GRCh37.p10 | 94568822 | 94569060 | 238M | |
# """
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
#
# ac = 'NM_145249.2'
# tm = TranscriptMapper(self.hdp, ac, self.ref)
# cds = 254 + 1 # hgvs
# test_cases = [
# #{'gs': 94547639, 'ge': 94547639, 'rs': 1, 're': 1, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 1-cds},
# #{'gs': 94547796, 'ge': 94547796, 'rs': 158, 're': 158, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 158-cds, 'ce': 158-cds},
# #{'gs': 94563185, 'ge': 94563185, 'rs': 159, 're': 159, 'so': -2, 'eo': -2, 'd': hgvs.location.SEQ_START, 'cs': 159-cds, 'ce': 159-cds},
#
# # beyond cds add 1 due to hgvs
# #{'gs': 94567118, 'ge': 94567120, 'rs': 316, 're': 316, 'so': 0, 'eo': 2, 'd': hgvs.location.SEQ_START, 'cs': 316-cds+1, 'ce': 316-cds+1},
# {'gs': 94567115, 'ge': 94567118, 'rs': 313, 're': 316, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 313-cds+1, 'ce': 316-cds+1},
#
# # intron in the middle between exon 1 and 2
# #{'gs': 94555500, 'ge': 94555501, 'rs': 157, 're': 158, 'so': 7686, 'eo': -7685, 'd': hgvs.location.SEQ_START, 'cs': 157-cds+1, 'ce': 158-cds+1},
# #{'gs': 94555481, 'ge': 94555501, 'rs': 157, 're': 158, 'so': 7686, 'eo': -7685, 'd': hgvs.location.SEQ_START, 'cs': 157-cds+1, 'ce': 158-cds+1},
# ]
# self.run_cases(tm, test_cases)
### ANOTHER POSSIBLE TEST CASE ###
# reece=> select * from uta.tx_info where ac = 'NM_145171.3';
# gene | strand | ac | cds_start_i | cds_end_i | descr | summary
# -------+--------+-------------+-------------+-----------+-----------------------------+-----------------------------------
# GPHB5 | -1 | NM_145171.3 | 57 | 450 | glycoprotein hormone beta 5 | GPHB5 is a cystine knot-forming...
#
# reece=> select * from uta.tx_exons where ac = 'NM_145171.3' order by g_start_i;
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar | g_seq_a
# -------------+-----+------+-----------+---------+------------+-----------+----------+-----------+-------------------------
# NM_145171.3 | 3 | 3 | 261 | 543 | GRCh37.p10 | 63779548 | 63779830 | 282M |
# NM_145171.3 | 2 | 2 | 56 | 261 | GRCh37.p10 | 63784360 | 63784564 | 156M1I48M | CATGAAGCTGGCATTCCTCTT...
# NM_145171.3 | 1 | 1 | 0 | 56 | GRCh37.p10 | 63785537 | 63785593 | 56M |
# def test_transcriptmapper_TranscriptMapper_GPHB5(self):
# ac = 'NM_145171.3'
# tm = TranscriptMapper(self.hdp,ac,self.ref)
# pass
## <LICENSE>
## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs)
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## </LICENSE>
| 68.365714
| 159
| 0.546974
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from nose.plugins.attrib import attr
import hgvs.dataproviders.uta
import hgvs.location
import hgvs.parser
from hgvs.exceptions import HGVSError
from hgvs.transcriptmapper import TranscriptMapper
@attr(tags=["quick"])
class Test_transcriptmapper(unittest.TestCase):
ref = 'GRCh37.p10'
def setUp(self):
self.hdp = hgvs.dataproviders.uta.connect()
def test_transcriptmapper_failures(self):
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='bogus', alt_ac='NM_033089.6', alt_aln_method='splign')
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='NM_033089.6', alt_ac='bogus', alt_aln_method='splign')
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='NM_000051.3', alt_ac='NC_000011.9', alt_aln_method='bogus')
def test_transcriptmapper_TranscriptMapper_LCE3C_uncertain(self):
tx_ac = 'NM_178434.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
{'g': parser.parse_g_interval('(152573138)'), 'r': parser.parse_r_interval('(1)'), 'c': parser.parse_c_interval('(-70)')},
{'g': parser.parse_g_interval('(152573138_152573139)'), 'r': parser.parse_r_interval('(1_2)'), 'c': parser.parse_c_interval('(-70_-69)')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_LCE3C(self):
tx_ac = 'NM_178434.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
{'g': parser.parse_g_interval('152573138'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-70')},
{'g': parser.parse_g_interval('152573140'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-68')},
# cds
{'g': parser.parse_g_interval('152573207'), 'r': parser.parse_r_interval('70'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('152573208'), 'r': parser.parse_r_interval('71'), 'c': parser.parse_c_interval('1')},
# 3'
{'g': parser.parse_g_interval('152573492'), 'r': parser.parse_r_interval('355'), 'c': parser.parse_c_interval('285')},
{'g': parser.parse_g_interval('152573493'), 'r': parser.parse_r_interval('356'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('152573560'), 'r': parser.parse_r_interval('423'), 'c': parser.parse_c_interval('*68')},
{'g': parser.parse_g_interval('152573562'), 'r': parser.parse_r_interval('425'), 'c': parser.parse_c_interval('*70')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_HIST3H2A(self):
tx_ac = 'NM_033445.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
{'g': parser.parse_g_interval('228645560'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-42')},
{'g': parser.parse_g_interval('228645558'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-40')},
# cds
{'g': parser.parse_g_interval('228645519'), 'r': parser.parse_r_interval('42'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('228645518'), 'r': parser.parse_r_interval('43'), 'c': parser.parse_c_interval('1')},
# 5'
{'g': parser.parse_g_interval('228645126'), 'r': parser.parse_r_interval('435'), 'c': parser.parse_c_interval('393')},
{'g': parser.parse_g_interval('228645125'), 'r': parser.parse_r_interval('436'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('228645124'), 'r': parser.parse_r_interval('437'), 'c': parser.parse_c_interval('*2')},
{'g': parser.parse_g_interval('228645065'), 'r': parser.parse_r_interval('496'), 'c': parser.parse_c_interval('*61')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_LCE2B(self):
tx_ac = 'NM_014357.4'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
{'g': parser.parse_g_interval('152658599'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-54')},
{'g': parser.parse_g_interval('152658601'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-52')},
# cds
{'g': parser.parse_g_interval('152659319'), 'r': parser.parse_r_interval('54'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('152659320'), 'r': parser.parse_r_interval('55'), 'c': parser.parse_c_interval('1')},
# around end of exon 1
{'g': parser.parse_g_interval('152658632'), 'r': parser.parse_r_interval('34'), 'c': parser.parse_c_interval('-21')},
{'g': parser.parse_g_interval('152658633'), 'r': parser.parse_r_interval('34+1'), 'c': parser.parse_c_interval('-21+1')},
# span
{'g': parser.parse_g_interval('152658633_152659299'), 'r': parser.parse_r_interval('34+1_35-1'), 'c': parser.parse_c_interval('-21+1_-20-1')},
# around beginning of exon 2
{'g': parser.parse_g_interval('152659300'), 'r': parser.parse_r_interval('35'), 'c': parser.parse_c_interval('-20')},
{'g': parser.parse_g_interval('152659299'), 'r': parser.parse_r_interval('35-1'), 'c': parser.parse_c_interval('-20-1')},
# around end of exon 2
{'g': parser.parse_g_interval('152659652'), 'r': parser.parse_r_interval('387'), 'c': parser.parse_c_interval('333')},
{'g': parser.parse_g_interval('152659653'), 'r': parser.parse_r_interval('388'), 'c': parser.parse_c_interval('*1')},
# span
{'g': parser.parse_g_interval('152659651_152659654'), 'r': parser.parse_r_interval('386_389'), 'c': parser.parse_c_interval('332_*2')},
# 3'
{'g': parser.parse_g_interval('152659877'), 'r': parser.parse_r_interval('612'), 'c': parser.parse_c_interval('*225')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_PTH2(self):
tx_ac = 'NM_178449.3'
alt_ac = 'NC_000019.9'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
{'g': parser.parse_g_interval('49926698'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-102')},
# cds
{'g': parser.parse_g_interval('49926597'), 'r': parser.parse_r_interval('102'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('49926596'), 'r': parser.parse_r_interval('103'), 'c': parser.parse_c_interval('1')},
# around end of exon 1
{'g': parser.parse_g_interval('49926469'), 'r': parser.parse_r_interval('230'), 'c': parser.parse_c_interval('128')},
{'g': parser.parse_g_interval('49926468'), 'r': parser.parse_r_interval('230+1'), 'c': parser.parse_c_interval('128+1')},
# span
{'g': parser.parse_g_interval('49925901_49926467'), 'r': parser.parse_r_interval('230+2_231-2'), 'c': parser.parse_c_interval('128+2_129-2')},
# around beginning of exon 2
{'g': parser.parse_g_interval('49925900'), 'r': parser.parse_r_interval('231-1'), 'c': parser.parse_c_interval('129-1')},
{'g': parser.parse_g_interval('49925899'), 'r': parser.parse_r_interval('231'), 'c': parser.parse_c_interval('129')},
# around end of exon 2
{'g': parser.parse_g_interval('49925725'), 'r': parser.parse_r_interval('405'), 'c': parser.parse_c_interval('303')},
{'g': parser.parse_g_interval('49925724'), 'r': parser.parse_r_interval('406'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('49925671'), 'r': parser.parse_r_interval('459'), 'c': parser.parse_c_interval('*54')},
]
self.run_cases(tm, test_cases)
def run_cases(self, tm, test_cases):
for test_case in test_cases:
self.assertEquals(tm.g_to_r(test_case['g']), test_case['r'])
self.assertEquals(tm.r_to_g(test_case['r']), test_case['g'])
self.assertEquals(tm.r_to_c(test_case['r']), test_case['c'])
self.assertEquals(tm.c_to_r(test_case['c']), test_case['r'])
self.assertEquals(tm.g_to_c(test_case['g']), test_case['c'])
self.assertEquals(tm.c_to_g(test_case['c']), test_case['g'])
if __name__ == '__main__':
unittest.main()
# TODO: Reintegrate older tests, especially those with indels
### harder tests ###
#def test_transcriptmapper_TranscriptMapper_1_ZCCHC3(self):
# """
# reece=> select * from uta.tx_info where ac='NM_033089.6';
# gene | strand | ac | cds_start_i | cds_end_i | descr | summary
# --------+--------+-------------+-------------+-----------+---------------------------------------+---------
# ZCCHC3 | 1 | NM_033089.6 | 24 | 1236 | zinc finger, CCHC domain containing 3 |
#
# reece=> select * from uta.tx_exons where ac='NM_033089.6';
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar |
# -------------+-----+------+-----------+---------+------------+-----------+---------+-------------+------------------------
# NM_033089.6 | 1 | 1 | 0 | 2759 | GRCh37.p10 | 278203 | 280965 | 484M3D2275M | GGAGGATGCTGGGAAGGAGGTAA
# """
# # http://tinyurl.com/mattx8u
# #
# # Around the deletion
# # http://tinyurl.com/jwt3txg
# # 687 690
# # C | C G G | C
# # \___ ___/
# # C | C
# # 484
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
# ac = 'NM_033089.6'
# tm = TranscriptMapper(self.hdp, ac, self.ref)
# cds = 24 + 1 # hgvs
# # gs, ge = genomic start/end; rs,re = rna start/end; cs, ce = cdna start/end; so, eo = start offset/end offset
# test_cases = [
# {'gs': 278204, 'ge': 278204, 'rs': 1, 're': 1, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 1-cds},
# {'gs': 278214, 'ge': 278214, 'rs': 11, 're': 11, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 11-cds, 'ce': 11-cds},
# {'gs': 278204, 'ge': 278214, 'rs': 1, 're': 11, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 11-cds},
#
# # around cds (cds can't be zero)
x_info where ac='NM_182763.2';
# gene | strand | ac | cds_start_i | cds_end_i | descr |
# ------+--------+-------------+-------------+-----------+-------------------------------------------------+----------------
# MCL1 | -1 | NM_182763.2 | 208 | 1024 | myeloid cell leukemia sequence 1 (BCL2-related) | This gene encod
#
# reece=> select * from uta.tx_exons where ac='NM_182763.2';
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar |
# -------------+-----+------+-----------+---------+------------+-----------+-----------+--------------+---------------------
# NM_182763.2 | 1 | 1b | 0 | 896 | GRCh37.p10 | 150551318 | 150552214 | 896M |
# NM_182763.2 | 2 | 3 | 896 | 3841 | GRCh37.p10 | 150547026 | 150549967 | 1077M4I1864M | GATGGGTTTGTGGAGTTCTT
# """
| 5 | 477 | 715 | GRCh37.p10 | 94568822 | 94569060 | 238M | |
# """
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
#
# ac = 'NM_145249.2'
# tm = TranscriptMapper(self.hdp, ac, self.ref)
# cds = 254 + 1 # hgvs
# test_cases = [
# #{'gs': 94547639, 'ge': 94547639, 'rs': 1, 're': 1, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 1-cds},
# #{'gs': 94547796, 'ge': 94547796, 'rs': 158, 're': 158, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 158-cds, 'ce': 158-cds},
# #{'gs': 94563185, 'ge': 94563185, 'rs': 159, 're': 159, 'so': -2, 'eo': -2, 'd': hgvs.location.SEQ_START, 'cs': 159-cds, 'ce': 159-cds},
#
# # beyond cds add 1 due to hgvs
# #{'gs': 94567118, 'ge': 94567120, 'rs': 316, 're': 316, 'so': 0, 'eo': 2, 'd': hgvs.location.SEQ_START, 'cs': 316-cds+1, 'ce': 316-cds+1},
# {'gs': 94567115, 'ge': 94567118, 'rs': 313, 're': 316, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 313-cds+1, 'ce': 316-cds+1},
#
# # intron in the middle between exon 1 and 2
# #{'gs': 94555500, 'ge': 94555501, 'rs': 157, 're': 158, 'so': 7686, 'eo': -7685, 'd': hgvs.location.SEQ_START, 'cs': 157-cds+1, 'ce': 158-cds+1},
# #{'gs': 94555481, 'ge': 94555501, 'rs': 157, 're': 158, 'so': 7686, 'eo': -7685, 'd': hgvs.location.SEQ_START, 'cs': 157-cds+1, 'ce': 158-cds+1},
# ]
# self.run_cases(tm, test_cases)
### ANOTHER POSSIBLE TEST CASE ###
# reece=> select * from uta.tx_info where ac = 'NM_145171.3';
# gene | strand | ac | cds_start_i | cds_end_i | descr | summary
# -------+--------+-------------+-------------+-----------+-----------------------------+-----------------------------------
# GPHB5 | -1 | NM_145171.3 | 57 | 450 | glycoprotein hormone beta 5 | GPHB5 is a cystine knot-forming...
#
# reece=> select * from uta.tx_exons where ac = 'NM_145171.3' order by g_start_i;
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar | g_seq_a
# -------------+-----+------+-----------+---------+------------+-----------+----------+-----------+-------------------------
# NM_145171.3 | 3 | 3 | 261 | 543 | GRCh37.p10 | 63779548 | 63779830 | 282M |
# NM_145171.3 | 2 | 2 | 56 | 261 | GRCh37.p10 | 63784360 | 63784564 | 156M1I48M | CATGAAGCTGGCATTCCTCTT...
# NM_145171.3 | 1 | 1 | 0 | 56 | GRCh37.p10 | 63785537 | 63785593 | 56M |
# def test_transcriptmapper_TranscriptMapper_GPHB5(self):
# ac = 'NM_145171.3'
# tm = TranscriptMapper(self.hdp,ac,self.ref)
# pass
## <LICENSE>
## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs)
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## </LICENSE>
| true
| true
|
790887059b23bb6f3d43ae788a4f4730e1f1fcc2
| 345
|
py
|
Python
|
wxcloudrun/np_test/views.py
|
nixiaopan/nipan
|
4e57e7f25089bae64a233c2e3ba49b703147183e
|
[
"MIT"
] | null | null | null |
wxcloudrun/np_test/views.py
|
nixiaopan/nipan
|
4e57e7f25089bae64a233c2e3ba49b703147183e
|
[
"MIT"
] | null | null | null |
wxcloudrun/np_test/views.py
|
nixiaopan/nipan
|
4e57e7f25089bae64a233c2e3ba49b703147183e
|
[
"MIT"
] | null | null | null |
import json
import logging
from django.http import JsonResponse
logger = logging.getLogger('log')
from wxcloudrun.utils.SQL.DBUtils import DBUtils
def test1(request):
print(request.headers)
logger.info(request.headers)
rsp = JsonResponse({'code': 0, 'errorMsg': '😁'}, json_dumps_params={'ensure_ascii': False})
return rsp
| 19.166667
| 95
| 0.727536
|
import json
import logging
from django.http import JsonResponse
logger = logging.getLogger('log')
from wxcloudrun.utils.SQL.DBUtils import DBUtils
def test1(request):
print(request.headers)
logger.info(request.headers)
rsp = JsonResponse({'code': 0, 'errorMsg': '😁'}, json_dumps_params={'ensure_ascii': False})
return rsp
| true
| true
|
790887e7d19242e1bf1bd1d30f8c3fd2df502ebb
| 3,401
|
py
|
Python
|
api_drf/api_drf/settings.py
|
kaparis/spa101
|
0c225d0676a13a33950aca5954c02b443237149d
|
[
"MIT"
] | null | null | null |
api_drf/api_drf/settings.py
|
kaparis/spa101
|
0c225d0676a13a33950aca5954c02b443237149d
|
[
"MIT"
] | null | null | null |
api_drf/api_drf/settings.py
|
kaparis/spa101
|
0c225d0676a13a33950aca5954c02b443237149d
|
[
"MIT"
] | null | null | null |
"""
Django settings for api_drf project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2p$!i%#w$3e9(l3v4#%_#fi2_fae2l7ksdsd+1*vrc6_#8_@_*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'mainService'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api_drf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api_drf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# When you enable API versioning, the request.version attribute will contain a string
# that corresponds to the version requested in the incoming client request.
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
}
| 26.570313
| 91
| 0.701558
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '2p$!i%#w$3e9(l3v4#%_#fi2_fae2l7ksdsd+1*vrc6_#8_@_*'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'mainService'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api_drf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api_drf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# When you enable API versioning, the request.version attribute will contain a string
# that corresponds to the version requested in the incoming client request.
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
}
| true
| true
|
790887ffc8dffa14b3b055c2b6f737bcef1c3d66
| 318
|
py
|
Python
|
src/accounts/migrations/0002_remove_user_is_donor.py
|
paceite/Seelife---An-NGO-Website
|
02e6b5ec94d9a76079eccde54b3cd40b9e979def
|
[
"MIT"
] | null | null | null |
src/accounts/migrations/0002_remove_user_is_donor.py
|
paceite/Seelife---An-NGO-Website
|
02e6b5ec94d9a76079eccde54b3cd40b9e979def
|
[
"MIT"
] | null | null | null |
src/accounts/migrations/0002_remove_user_is_donor.py
|
paceite/Seelife---An-NGO-Website
|
02e6b5ec94d9a76079eccde54b3cd40b9e979def
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-03-16 10:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='is_donor',
),
]
| 17.666667
| 47
| 0.578616
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='is_donor',
),
]
| true
| true
|
7908880a1e3fcb6c65a2a2b3e8d276dba81fcf74
| 1,185
|
py
|
Python
|
Python/questions/WordLadder/word-ladder.py
|
udcymen/leetcode
|
7c7c4085e6a8cea7106dd8bca86b370ca53e3ddd
|
[
"MIT"
] | null | null | null |
Python/questions/WordLadder/word-ladder.py
|
udcymen/leetcode
|
7c7c4085e6a8cea7106dd8bca86b370ca53e3ddd
|
[
"MIT"
] | null | null | null |
Python/questions/WordLadder/word-ladder.py
|
udcymen/leetcode
|
7c7c4085e6a8cea7106dd8bca86b370ca53e3ddd
|
[
"MIT"
] | null | null | null |
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if endWord not in wordList:
return 0
result = 0
l = len(beginWord)
beginSet = {beginWord}
endSet = {endWord}
wordList = set(wordList)
while beginSet or endSet:
result += 1
if len(beginSet) < len(endSet):
beginSet, endSet = endSet, beginSet
newSet = set()
for word in beginSet:
for index in range(l):
for c in string.ascii_lowercase:
newWord = word[:index] + c + word[index + 1:]
if newWord in endSet:
return result + 1
if newWord not in wordList:
continue
wordList.remove(newWord)
newSet.add(newWord)
beginSet = newSet
return 0
| 32.027027
| 85
| 0.389873
|
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if endWord not in wordList:
return 0
result = 0
l = len(beginWord)
beginSet = {beginWord}
endSet = {endWord}
wordList = set(wordList)
while beginSet or endSet:
result += 1
if len(beginSet) < len(endSet):
beginSet, endSet = endSet, beginSet
newSet = set()
for word in beginSet:
for index in range(l):
for c in string.ascii_lowercase:
newWord = word[:index] + c + word[index + 1:]
if newWord in endSet:
return result + 1
if newWord not in wordList:
continue
wordList.remove(newWord)
newSet.add(newWord)
beginSet = newSet
return 0
| true
| true
|
790888a73bee43f4c8f7b5ae9835d1dbf78874a1
| 32,113
|
py
|
Python
|
dev/html2text.py
|
sceniclife/arlo
|
edbd17fb8ab60b1d5d36354c52f4ddb0966a9514
|
[
"Apache-2.0"
] | null | null | null |
dev/html2text.py
|
sceniclife/arlo
|
edbd17fb8ab60b1d5d36354c52f4ddb0966a9514
|
[
"Apache-2.0"
] | null | null | null |
dev/html2text.py
|
sceniclife/arlo
|
edbd17fb8ab60b1d5d36354c52f4ddb0966a9514
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.200.3"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError: #Python3
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try: #Python3
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Escape all special characters. Output is less readable, but avoids corner case formatting issues.
ESCAPE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
IGNORE_EMPHASIS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c != ' ' and c != ' ':
return c == ' '
return line
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
"""returns a hash of css attributes"""
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
"""returns a hash of css selectors, each of which contains a hash of css attributes"""
# remove @import sentences
data += ';'
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
except ValueError:
elements = {} # not that important
return elements
def element_style(attrs, style_def, parent_style):
"""returns a hash of the 'final' style attributes of the element"""
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
"""finds out whether this is an ordered or unordered list"""
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_has_height(style):
"""check if the style of the element has the 'height' attribute explicitly defined"""
if 'height' in style:
return True
return False
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
"""check if the css of the current element defines a fixed width font"""
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
"""extract numbering from list element attributes"""
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class HTML2Text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
# Config options
self.unicode_snob = UNICODE_SNOB
self.escape_snob = ESCAPE_SNOB
self.links_each_paragraph = LINKS_EACH_PARAGRAPH
self.body_width = BODY_WIDTH
self.skip_internal_links = SKIP_INTERNAL_LINKS
self.inline_links = INLINE_LINKS
self.google_list_indent = GOOGLE_LIST_INDENT
self.ignore_links = IGNORE_ANCHORS
self.ignore_images = IGNORE_IMAGES
self.ignore_emphasis = IGNORE_EMPHASIS
self.google_doc = False
self.ul_item_mark = '*'
self.emphasis_mark = '_'
self.strong_mark = '**'
if out is None:
self.out = self.outtextf
else:
self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.maybe_automatic_link = None
self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
try: del unifiable_n[name2cp('nbsp')]
except KeyError: pass
unifiable['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def handle(self, data):
self.feed(data)
self.feed("")
return self.optwrap(self.close())
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if self.unicode_snob:
nbsp = unichr(name2cp('nbsp'))
else:
nbsp = u' '
self.outtext = self.outtext.replace(u' _place_holder;', nbsp)
return self.outtext
def handle_charref(self, c):
self.o(self.charref(c), 1)
def handle_entityref(self, c):
self.o(self.entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
#attrs = fixattrs(attrs)
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if self.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if self.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u'] and not self.ignore_emphasis: self.o(self.emphasis_mark)
if tag in ['strong', 'b'] and not self.ignore_emphasis: self.o(self.strong_mark)
if tag in ['del', 'strike', 's']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if self.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["code", "tt"] and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not self.ignore_links:
if start:
if has_key(attrs, 'href') and not (self.skip_internal_links and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.maybe_automatic_link = attrs['href']
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link:
self.maybe_automatic_link = None
elif a:
if self.inline_links:
self.o("](" + escape_md(a['href']) + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not self.ignore_images:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
self.o("(" + escape_md(attrs['href']) + ")")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("[" + str(attrs['count']) + "]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0:
self.p_p = 1
def p(self):
self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
if not data.startswith("\n"): # <pre>stuff...
data = "\n" + data
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
if not self.list:
bq += " "
#else: list content is already partially indented
for i in xrange(len(self.list)):
bq += " "
data = data.replace("\n", "\n"+bq)
if self.startpre:
self.startpre = 0
if self.list:
data = data.lstrip("\n") # use existing initial indentation
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if href == data and self.absolute_url_matcher.match(href):
self.o("<" + data + ">")
return
else:
self.o("[")
self.maybe_automatic_link = None
if not self.code and not self.pre:
data = escape_md_section(data, snob=self.escape_snob)
self.o(data, 1)
def unknown_decl(self, data): pass
def charref(self, name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not self.unicode_snob and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError: #Python3
return chr(c)
def entityref(self, c):
if not self.unicode_snob and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError: #Python3
return chr(name2cp(c))
def replaceEntities(self, s):
s = s.group(1)
if s[0] == "#":
return self.charref(s[1:])
else: return self.entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(self, s):
return self.r_unescape.sub(self.replaceEntities, s)
def google_nest_count(self, style):
"""calculate the nesting count of google doc lists"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
return nest_count
def optwrap(self, text):
"""Wrap all paragraphs in the provided text."""
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
ordered_list_matcher = re.compile(r'\d+\.\s')
unordered_list_matcher = re.compile(r'[-\*\+]\s')
md_chars_matcher = re.compile(r"([\\\[\]\(\)])")
md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!])")
md_dot_matcher = re.compile(r"""
^ # start of line
(\s*\d+) # optional whitespace and a number
(\.) # dot
(?=\s) # lookahead assert whitespace
""", re.MULTILINE | re.VERBOSE)
md_plus_matcher = re.compile(r"""
^
(\s*)
(\+)
(?=\s)
""", flags=re.MULTILINE | re.VERBOSE)
md_dash_matcher = re.compile(r"""
^
(\s*)
(-)
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
# or another dash (header or hr)
""", flags=re.MULTILINE | re.VERBOSE)
slash_chars = r'\`*_{}[]()#+-.!'
md_backslash_matcher = re.compile(r'''
(\\) # match one slash
(?=[%s]) # followed by a char that requires escaping
''' % re.escape(slash_chars),
flags=re.VERBOSE)
def skipwrap(para):
# If the text begins with four spaces or one tab, it's a code block; don't wrap
if para[0:4] == ' ' or para[0] == '\t':
return True
# If the text begins with only two "--", possibly preceded by whitespace, that's
# an emdash; so wrap.
stripped = para.lstrip()
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
return False
# I'm not sure what this is for; I thought it was to detect lists, but there's
# a <br>-inside-<span> case in one of the tests that also depends upon it.
if stripped[0:1] == '-' or stripped[0:1] == '*':
return True
# If the text begins with a single -, *, or +, followed by a space, or an integer,
# followed by a ., followed by a space (in either case optionally preceeded by
# whitespace), it's a list; don't wrap.
if ordered_list_matcher.match(stripped) or unordered_list_matcher.match(stripped):
return True
return False
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text(html, baseurl=''):
h = HTML2Text(baseurl=baseurl)
return h.handle(html)
def unescape(s, unicode_snob=False):
h = HTML2Text()
h.unicode_snob = unicode_snob
return h.unescape(s)
def escape_md(text):
"""Escapes markdown-sensitive characters within other markdown constructs."""
return md_chars_matcher.sub(r"\\\1", text)
def escape_md_section(text, snob=False):
"""Escapes markdown-sensitive characters across whole document sections."""
text = md_backslash_matcher.sub(r"\\\1", text)
if snob:
text = md_chars_matcher_all.sub(r"\\\1", text)
text = md_dot_matcher.sub(r"\1\\\2", text)
text = md_plus_matcher.sub(r"\1\\\2", text)
text = md_dash_matcher.sub(r"\1\\\2", text)
return text
def main():
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("--ignore-emphasis", dest="ignore_emphasis", action="store_true",
default=IGNORE_EMPHASIS, help="don't include any formatting for emphasis")
p.add_option("--ignore-links", dest="ignore_links", action="store_true",
default=IGNORE_ANCHORS, help="don't include any formatting for links")
p.add_option("--ignore-images", dest="ignore_images", action="store_true",
default=IGNORE_IMAGES, help="don't include any formatting for images")
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk",
default=False, help="use an asterisk rather than an underscore for emphasized text")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=BODY_WIDTH, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevant when -g is specified as well")
p.add_option("--escape-all", action="store_true", dest="escape_snob",
default=False, help="Escape all special characters. Output is less readable, but avoids corner case formatting issues.")
(options, args) = p.parse_args()
# process input
encoding = "utf-8"
if len(args) > 0:
file_ = args[0]
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
data = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, data)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
else:
data = sys.stdin.read()
data = data.decode(encoding)
h = HTML2Text(baseurl=baseurl)
# handle options
if options.ul_style_dash: h.ul_item_mark = '-'
if options.em_style_asterisk:
h.emphasis_mark = '*'
h.strong_mark = '__'
h.body_width = options.body_width
h.list_indent = options.list_indent
h.ignore_emphasis = options.ignore_emphasis
h.ignore_links = options.ignore_links
h.ignore_images = options.ignore_images
h.google_doc = options.google_doc
h.hide_strikethrough = options.hide_strikethrough
h.escape_snob = options.escape_snob
wrapwrite(h.handle(data))
if __name__ == "__main__":
main()
| 35.096175
| 129
| 0.53128
|
__version__ = "3.200.3"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError:
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try:
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
UNICODE_SNOB = 0
ESCAPE_SNOB = 0
LINKS_EACH_PARAGRAPH = 0
BODY_WIDTH = 78
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
INLINE_LINKS = True
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
IGNORE_EMPHASIS = False
"'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
### End Entity Nonsense ###
def onlywhite(line):
for c in line:
if c != ' ' and c != ' ':
return c == ' '
return line
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
# remove @import sentences
data += ';'
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
except ValueError:
elements = {} # not that important
return elements
def element_style(attrs, style_def, parent_style):
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_has_height(style):
if 'height' in style:
return True
return False
def google_text_emphasis(style):
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class HTML2Text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
# Config options
self.unicode_snob = UNICODE_SNOB
self.escape_snob = ESCAPE_SNOB
self.links_each_paragraph = LINKS_EACH_PARAGRAPH
self.body_width = BODY_WIDTH
self.skip_internal_links = SKIP_INTERNAL_LINKS
self.inline_links = INLINE_LINKS
self.google_list_indent = GOOGLE_LIST_INDENT
self.ignore_links = IGNORE_ANCHORS
self.ignore_images = IGNORE_IMAGES
self.ignore_emphasis = IGNORE_EMPHASIS
self.google_doc = False
self.ul_item_mark = '*'
self.emphasis_mark = '_'
self.strong_mark = '**'
if out is None:
self.out = self.outtextf
else:
self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.maybe_automatic_link = None
self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
try: del unifiable_n[name2cp('nbsp')]
except KeyError: pass
unifiable['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def handle(self, data):
self.feed(data)
self.feed("")
return self.optwrap(self.close())
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if self.unicode_snob:
nbsp = unichr(name2cp('nbsp'))
else:
nbsp = u' '
self.outtext = self.outtext.replace(u' _place_holder;', nbsp)
return self.outtext
def handle_charref(self, c):
self.o(self.charref(c), 1)
def handle_entityref(self, c):
self.o(self.entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if self.google_doc:
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return
if tag in ['p', 'div']:
if self.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u'] and not self.ignore_emphasis: self.o(self.emphasis_mark)
if tag in ['strong', 'b'] and not self.ignore_emphasis: self.o(self.strong_mark)
if tag in ['del', 'strike', 's']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if self.google_doc:
if not self.inheader:
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["code", "tt"] and not self.pre: self.o('`')
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not self.ignore_links:
if start:
if has_key(attrs, 'href') and not (self.skip_internal_links and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.maybe_automatic_link = attrs['href']
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link:
self.maybe_automatic_link = None
elif a:
if self.inline_links:
self.o("](" + escape_md(a['href']) + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not self.ignore_images:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
self.o("(" + escape_md(attrs['href']) + ")")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("[" + str(attrs['count']) + "]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count)
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0:
self.p_p = 1
def p(self):
self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
"\n"):
data = "\n" + data
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
if not self.list:
bq += " "
for i in xrange(len(self.list)):
bq += " "
data = data.replace("\n", "\n"+bq)
if self.startpre:
self.startpre = 0
if self.list:
data = data.lstrip("\n")
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if href == data and self.absolute_url_matcher.match(href):
self.o("<" + data + ">")
return
else:
self.o("[")
self.maybe_automatic_link = None
if not self.code and not self.pre:
data = escape_md_section(data, snob=self.escape_snob)
self.o(data, 1)
def unknown_decl(self, data): pass
def charref(self, name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not self.unicode_snob and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError:
return chr(c)
def entityref(self, c):
if not self.unicode_snob and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError:
return chr(name2cp(c))
def replaceEntities(self, s):
s = s.group(1)
if s[0] == "#":
return self.charref(s[1:])
else: return self.entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(self, s):
return self.r_unescape.sub(self.replaceEntities, s)
def google_nest_count(self, style):
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
return nest_count
def optwrap(self, text):
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
ordered_list_matcher = re.compile(r'\d+\.\s')
unordered_list_matcher = re.compile(r'[-\*\+]\s')
md_chars_matcher = re.compile(r"([\\\[\]\(\)])")
md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!])")
md_dot_matcher = re.compile(r"""
^ # start of line
(\s*\d+) # optional whitespace and a number
(\.) # dot
(?=\s) # lookahead assert whitespace
""", re.MULTILINE | re.VERBOSE)
md_plus_matcher = re.compile(r"""
^
(\s*)
(\+)
(?=\s)
""", flags=re.MULTILINE | re.VERBOSE)
md_dash_matcher = re.compile(r"""
^
(\s*)
(-)
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
# or another dash (header or hr)
""", flags=re.MULTILINE | re.VERBOSE)
slash_chars = r'\`*_{}[]()#+-.!'
md_backslash_matcher = re.compile(r'''
(\\) # match one slash
(?=[%s]) # followed by a char that requires escaping
''' % re.escape(slash_chars),
flags=re.VERBOSE)
def skipwrap(para):
if para[0:4] == ' ' or para[0] == '\t':
return True
# an emdash; so wrap.
stripped = para.lstrip()
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
return False
# I'm not sure what this is for; I thought it was to detect lists, but there's
# a <br>-inside-<span> case in one of the tests that also depends upon it.
if stripped[0:1] == '-' or stripped[0:1] == '*':
return True
# If the text begins with a single -, *, or +, followed by a space, or an integer,
# followed by a ., followed by a space (in either case optionally preceeded by
# whitespace), it's a list; don't wrap.
if ordered_list_matcher.match(stripped) or unordered_list_matcher.match(stripped):
return True
return False
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text(html, baseurl=''):
h = HTML2Text(baseurl=baseurl)
return h.handle(html)
def unescape(s, unicode_snob=False):
h = HTML2Text()
h.unicode_snob = unicode_snob
return h.unescape(s)
def escape_md(text):
return md_chars_matcher.sub(r"\\\1", text)
def escape_md_section(text, snob=False):
text = md_backslash_matcher.sub(r"\\\1", text)
if snob:
text = md_chars_matcher_all.sub(r"\\\1", text)
text = md_dot_matcher.sub(r"\1\\\2", text)
text = md_plus_matcher.sub(r"\1\\\2", text)
text = md_dash_matcher.sub(r"\1\\\2", text)
return text
def main():
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("--ignore-emphasis", dest="ignore_emphasis", action="store_true",
default=IGNORE_EMPHASIS, help="don't include any formatting for emphasis")
p.add_option("--ignore-links", dest="ignore_links", action="store_true",
default=IGNORE_ANCHORS, help="don't include any formatting for links")
p.add_option("--ignore-images", dest="ignore_images", action="store_true",
default=IGNORE_IMAGES, help="don't include any formatting for images")
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk",
default=False, help="use an asterisk rather than an underscore for emphasized text")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=BODY_WIDTH, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevant when -g is specified as well")
p.add_option("--escape-all", action="store_true", dest="escape_snob",
default=False, help="Escape all special characters. Output is less readable, but avoids corner case formatting issues.")
(options, args) = p.parse_args()
encoding = "utf-8"
if len(args) > 0:
file_ = args[0]
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
data = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, data)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
else:
data = sys.stdin.read()
data = data.decode(encoding)
h = HTML2Text(baseurl=baseurl)
if options.ul_style_dash: h.ul_item_mark = '-'
if options.em_style_asterisk:
h.emphasis_mark = '*'
h.strong_mark = '__'
h.body_width = options.body_width
h.list_indent = options.list_indent
h.ignore_emphasis = options.ignore_emphasis
h.ignore_links = options.ignore_links
h.ignore_images = options.ignore_images
h.google_doc = options.google_doc
h.hide_strikethrough = options.hide_strikethrough
h.escape_snob = options.escape_snob
wrapwrite(h.handle(data))
if __name__ == "__main__":
main()
| true
| true
|
790888c89dd43e43cdc2dafb433b71dd44131ea8
| 4,405
|
py
|
Python
|
setup.py
|
upstream-janitor/dulwich
|
a92ab5e826872ddac1cbd72fa1b6a41fe08c6834
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
upstream-janitor/dulwich
|
a92ab5e826872ddac1cbd72fa1b6a41fe08c6834
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
upstream-janitor/dulwich
|
a92ab5e826872ddac1cbd72fa1b6a41fe08c6834
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# encoding: utf-8
# Setup file for dulwich
# Copyright (C) 2008-2016 Jelmer Vernooij <jelmer@jelmer.uk>
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
has_setuptools = False
else:
has_setuptools = True
from distutils.core import Distribution
import io
import os
import sys
from typing import Dict, Any
if sys.version_info < (3, 6):
raise Exception(
'Dulwich only supports Python 3.6 and later. '
'For 2.7 support, please install a version prior to 0.20')
dulwich_version_string = '0.20.32'
class DulwichDistribution(Distribution):
def is_pure(self):
if self.pure:
return True
def has_ext_modules(self):
return not self.pure
global_options = Distribution.global_options + [
('pure', None, "use pure Python code instead of C "
"extensions (slower on CPython)")]
pure = False
if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
# XCode 4.0 dropped support for ppc architecture, which is hardcoded in
# distutils.sysconfig
import subprocess
p = subprocess.Popen(
['/usr/bin/xcodebuild', '-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env={})
out, err = p.communicate()
for line in out.splitlines():
line = line.decode("utf8")
# Also parse only first digit, because 3.2.1 can't be parsed nicely
if (line.startswith('Xcode') and
int(line.split()[1].split('.')[0]) >= 4):
os.environ['ARCHFLAGS'] = ''
tests_require = ['fastimport']
if '__pypy__' not in sys.modules and sys.platform != 'win32':
tests_require.extend([
'gevent', 'geventhttpclient', 'setuptools>=17.1'])
ext_modules = [
Extension('dulwich._objects', ['dulwich/_objects.c']),
Extension('dulwich._pack', ['dulwich/_pack.c']),
Extension('dulwich._diff_tree', ['dulwich/_diff_tree.c']),
]
setup_kwargs = {} # type: Dict[str, Any]
scripts = ['bin/dul-receive-pack', 'bin/dul-upload-pack']
if has_setuptools:
setup_kwargs['extras_require'] = {
'fastimport': ['fastimport'],
'https': ['urllib3[secure]>=1.24.1'],
'pgp': ['gpg'],
'watch': ['pyinotify'],
}
setup_kwargs['install_requires'] = ['urllib3>=1.24.1', 'certifi']
setup_kwargs['include_package_data'] = True
setup_kwargs['test_suite'] = 'dulwich.tests.test_suite'
setup_kwargs['tests_require'] = tests_require
setup_kwargs['entry_points'] = {
"console_scripts": [
"dulwich=dulwich.cli:main",
]}
setup_kwargs['python_requires'] = '>=3.6'
else:
scripts.append('bin/dulwich')
with io.open(os.path.join(os.path.dirname(__file__), "README.rst"),
encoding="utf-8") as f:
description = f.read()
setup(name='dulwich',
author="Jelmer Vernooij",
author_email="jelmer@jelmer.uk",
url="https://www.dulwich.io/",
long_description=description,
description="Python Git Library",
version=dulwich_version_string,
license='Apachev2 or later or GPLv2',
project_urls={
"Bug Tracker": "https://github.com/dulwich/dulwich/issues",
"Repository": "https://www.dulwich.io/code/",
"GitHub": "https://github.com/dulwich/dulwich",
},
keywords="git vcs",
packages=['dulwich', 'dulwich.tests', 'dulwich.tests.compat',
'dulwich.contrib'],
package_data={'': ['../docs/tutorial/*.txt', 'py.typed']},
scripts=scripts,
ext_modules=ext_modules,
zip_safe=False,
distclass=DulwichDistribution,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Topic :: Software Development :: Version Control',
],
**setup_kwargs
)
| 32.153285
| 75
| 0.620204
|
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
has_setuptools = False
else:
has_setuptools = True
from distutils.core import Distribution
import io
import os
import sys
from typing import Dict, Any
if sys.version_info < (3, 6):
raise Exception(
'Dulwich only supports Python 3.6 and later. '
'For 2.7 support, please install a version prior to 0.20')
dulwich_version_string = '0.20.32'
class DulwichDistribution(Distribution):
def is_pure(self):
if self.pure:
return True
def has_ext_modules(self):
return not self.pure
global_options = Distribution.global_options + [
('pure', None, "use pure Python code instead of C "
"extensions (slower on CPython)")]
pure = False
if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
import subprocess
p = subprocess.Popen(
['/usr/bin/xcodebuild', '-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env={})
out, err = p.communicate()
for line in out.splitlines():
line = line.decode("utf8")
if (line.startswith('Xcode') and
int(line.split()[1].split('.')[0]) >= 4):
os.environ['ARCHFLAGS'] = ''
tests_require = ['fastimport']
if '__pypy__' not in sys.modules and sys.platform != 'win32':
tests_require.extend([
'gevent', 'geventhttpclient', 'setuptools>=17.1'])
ext_modules = [
Extension('dulwich._objects', ['dulwich/_objects.c']),
Extension('dulwich._pack', ['dulwich/_pack.c']),
Extension('dulwich._diff_tree', ['dulwich/_diff_tree.c']),
]
setup_kwargs = {} # type: Dict[str, Any]
scripts = ['bin/dul-receive-pack', 'bin/dul-upload-pack']
if has_setuptools:
setup_kwargs['extras_require'] = {
'fastimport': ['fastimport'],
'https': ['urllib3[secure]>=1.24.1'],
'pgp': ['gpg'],
'watch': ['pyinotify'],
}
setup_kwargs['install_requires'] = ['urllib3>=1.24.1', 'certifi']
setup_kwargs['include_package_data'] = True
setup_kwargs['test_suite'] = 'dulwich.tests.test_suite'
setup_kwargs['tests_require'] = tests_require
setup_kwargs['entry_points'] = {
"console_scripts": [
"dulwich=dulwich.cli:main",
]}
setup_kwargs['python_requires'] = '>=3.6'
else:
scripts.append('bin/dulwich')
with io.open(os.path.join(os.path.dirname(__file__), "README.rst"),
encoding="utf-8") as f:
description = f.read()
setup(name='dulwich',
author="Jelmer Vernooij",
author_email="jelmer@jelmer.uk",
url="https://www.dulwich.io/",
long_description=description,
description="Python Git Library",
version=dulwich_version_string,
license='Apachev2 or later or GPLv2',
project_urls={
"Bug Tracker": "https://github.com/dulwich/dulwich/issues",
"Repository": "https://www.dulwich.io/code/",
"GitHub": "https://github.com/dulwich/dulwich",
},
keywords="git vcs",
packages=['dulwich', 'dulwich.tests', 'dulwich.tests.compat',
'dulwich.contrib'],
package_data={'': ['../docs/tutorial/*.txt', 'py.typed']},
scripts=scripts,
ext_modules=ext_modules,
zip_safe=False,
distclass=DulwichDistribution,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Topic :: Software Development :: Version Control',
],
**setup_kwargs
)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.