body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
async def test_form_ssdp(hass):
'Test we get the form with ssdp source.'
(await setup.async_setup_component(hass, 'persistent_notification', {}))
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': config_entries.SOURCE_SSDP}, data={'friendlyName': 'UniFi Dream Machine', 'modelDescription': 'UniFi Dream Machine Pro', 'ssdp_location': 'http://192.168.208.1:41417/rootDesc.xml', 'serialNumber': 'e0:63:da:20:14:a9'}))
assert (result['type'] == 'form')
assert (result['step_id'] == 'user')
assert (result['errors'] == {})
context = next((flow['context'] for flow in hass.config_entries.flow.async_progress() if (flow['flow_id'] == result['flow_id'])))
assert (context['title_placeholders'] == {'host': '192.168.208.1', 'site': 'default'})
| 368,373,610,883,121,660
|
Test we get the form with ssdp source.
|
tests/components/unifi/test_config_flow.py
|
test_form_ssdp
|
Nixon506E/home-assistant
|
python
|
async def test_form_ssdp(hass):
(await setup.async_setup_component(hass, 'persistent_notification', {}))
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': config_entries.SOURCE_SSDP}, data={'friendlyName': 'UniFi Dream Machine', 'modelDescription': 'UniFi Dream Machine Pro', 'ssdp_location': 'http://192.168.208.1:41417/rootDesc.xml', 'serialNumber': 'e0:63:da:20:14:a9'}))
assert (result['type'] == 'form')
assert (result['step_id'] == 'user')
assert (result['errors'] == {})
context = next((flow['context'] for flow in hass.config_entries.flow.async_progress() if (flow['flow_id'] == result['flow_id'])))
assert (context['title_placeholders'] == {'host': '192.168.208.1', 'site': 'default'})
|
async def test_form_ssdp_aborts_if_host_already_exists(hass):
'Test we abort if the host is already configured.'
(await setup.async_setup_component(hass, 'persistent_notification', {}))
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'controller': {'host': '192.168.208.1', 'site': 'site_id'}})
entry.add_to_hass(hass)
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': config_entries.SOURCE_SSDP}, data={'friendlyName': 'UniFi Dream Machine', 'modelDescription': 'UniFi Dream Machine Pro', 'ssdp_location': 'http://192.168.208.1:41417/rootDesc.xml', 'serialNumber': 'e0:63:da:20:14:a9'}))
assert (result['type'] == 'abort')
assert (result['reason'] == 'already_configured')
| -2,059,152,354,034,528,300
|
Test we abort if the host is already configured.
|
tests/components/unifi/test_config_flow.py
|
test_form_ssdp_aborts_if_host_already_exists
|
Nixon506E/home-assistant
|
python
|
async def test_form_ssdp_aborts_if_host_already_exists(hass):
(await setup.async_setup_component(hass, 'persistent_notification', {}))
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'controller': {'host': '192.168.208.1', 'site': 'site_id'}})
entry.add_to_hass(hass)
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': config_entries.SOURCE_SSDP}, data={'friendlyName': 'UniFi Dream Machine', 'modelDescription': 'UniFi Dream Machine Pro', 'ssdp_location': 'http://192.168.208.1:41417/rootDesc.xml', 'serialNumber': 'e0:63:da:20:14:a9'}))
assert (result['type'] == 'abort')
assert (result['reason'] == 'already_configured')
|
async def test_form_ssdp_aborts_if_serial_already_exists(hass):
'Test we abort if the serial is already configured.'
(await setup.async_setup_component(hass, 'persistent_notification', {}))
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'controller': {'host': '1.2.3.4', 'site': 'site_id'}}, unique_id='e0:63:da:20:14:a9')
entry.add_to_hass(hass)
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': config_entries.SOURCE_SSDP}, data={'friendlyName': 'UniFi Dream Machine', 'modelDescription': 'UniFi Dream Machine Pro', 'ssdp_location': 'http://192.168.208.1:41417/rootDesc.xml', 'serialNumber': 'e0:63:da:20:14:a9'}))
assert (result['type'] == 'abort')
assert (result['reason'] == 'already_configured')
| -1,591,089,730,494,826,500
|
Test we abort if the serial is already configured.
|
tests/components/unifi/test_config_flow.py
|
test_form_ssdp_aborts_if_serial_already_exists
|
Nixon506E/home-assistant
|
python
|
async def test_form_ssdp_aborts_if_serial_already_exists(hass):
(await setup.async_setup_component(hass, 'persistent_notification', {}))
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'controller': {'host': '1.2.3.4', 'site': 'site_id'}}, unique_id='e0:63:da:20:14:a9')
entry.add_to_hass(hass)
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': config_entries.SOURCE_SSDP}, data={'friendlyName': 'UniFi Dream Machine', 'modelDescription': 'UniFi Dream Machine Pro', 'ssdp_location': 'http://192.168.208.1:41417/rootDesc.xml', 'serialNumber': 'e0:63:da:20:14:a9'}))
assert (result['type'] == 'abort')
assert (result['reason'] == 'already_configured')
|
async def test_form_ssdp_gets_form_with_ignored_entry(hass):
'Test we can still setup if there is an ignored entry.'
(await setup.async_setup_component(hass, 'persistent_notification', {}))
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'not_controller_key': None}, source=config_entries.SOURCE_IGNORE)
entry.add_to_hass(hass)
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': config_entries.SOURCE_SSDP}, data={'friendlyName': 'UniFi Dream Machine New', 'modelDescription': 'UniFi Dream Machine Pro', 'ssdp_location': 'http://1.2.3.4:41417/rootDesc.xml', 'serialNumber': 'e0:63:da:20:14:a9'}))
assert (result['type'] == 'form')
assert (result['step_id'] == 'user')
assert (result['errors'] == {})
context = next((flow['context'] for flow in hass.config_entries.flow.async_progress() if (flow['flow_id'] == result['flow_id'])))
assert (context['title_placeholders'] == {'host': '1.2.3.4', 'site': 'default'})
| 7,864,262,452,009,473,000
|
Test we can still setup if there is an ignored entry.
|
tests/components/unifi/test_config_flow.py
|
test_form_ssdp_gets_form_with_ignored_entry
|
Nixon506E/home-assistant
|
python
|
async def test_form_ssdp_gets_form_with_ignored_entry(hass):
(await setup.async_setup_component(hass, 'persistent_notification', {}))
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'not_controller_key': None}, source=config_entries.SOURCE_IGNORE)
entry.add_to_hass(hass)
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': config_entries.SOURCE_SSDP}, data={'friendlyName': 'UniFi Dream Machine New', 'modelDescription': 'UniFi Dream Machine Pro', 'ssdp_location': 'http://1.2.3.4:41417/rootDesc.xml', 'serialNumber': 'e0:63:da:20:14:a9'}))
assert (result['type'] == 'form')
assert (result['step_id'] == 'user')
assert (result['errors'] == {})
context = next((flow['context'] for flow in hass.config_entries.flow.async_progress() if (flow['flow_id'] == result['flow_id'])))
assert (context['title_placeholders'] == {'host': '1.2.3.4', 'site': 'default'})
|
def setup_platform(hass, config, add_devices, discovery_info=None):
'Setup the available BloomSky weather sensors.'
logger = logging.getLogger(__name__)
bloomsky = get_component('bloomsky')
sensors = config.get('monitored_conditions', SENSOR_TYPES)
for device in bloomsky.BLOOMSKY.devices.values():
for variable in sensors:
if (variable in SENSOR_TYPES):
add_devices([BloomSkySensor(bloomsky.BLOOMSKY, device, variable)])
else:
logger.error('Cannot find definition for device: %s', variable)
| 4,047,506,490,008,436,700
|
Setup the available BloomSky weather sensors.
|
homeassistant/components/sensor/bloomsky.py
|
setup_platform
|
1lann/home-assistant
|
python
|
def setup_platform(hass, config, add_devices, discovery_info=None):
logger = logging.getLogger(__name__)
bloomsky = get_component('bloomsky')
sensors = config.get('monitored_conditions', SENSOR_TYPES)
for device in bloomsky.BLOOMSKY.devices.values():
for variable in sensors:
if (variable in SENSOR_TYPES):
add_devices([BloomSkySensor(bloomsky.BLOOMSKY, device, variable)])
else:
logger.error('Cannot find definition for device: %s', variable)
|
def __init__(self, bs, device, sensor_name):
'Initialize a bloomsky sensor.'
self._bloomsky = bs
self._device_id = device['DeviceID']
self._sensor_name = sensor_name
self._name = '{} {}'.format(device['DeviceName'], sensor_name)
self._unique_id = 'bloomsky_sensor {}'.format(self._name)
self.update()
| -5,146,920,514,635,128,000
|
Initialize a bloomsky sensor.
|
homeassistant/components/sensor/bloomsky.py
|
__init__
|
1lann/home-assistant
|
python
|
def __init__(self, bs, device, sensor_name):
self._bloomsky = bs
self._device_id = device['DeviceID']
self._sensor_name = sensor_name
self._name = '{} {}'.format(device['DeviceName'], sensor_name)
self._unique_id = 'bloomsky_sensor {}'.format(self._name)
self.update()
|
@property
def name(self):
'The name of the BloomSky device and this sensor.'
return self._name
| 9,038,137,262,409,299,000
|
The name of the BloomSky device and this sensor.
|
homeassistant/components/sensor/bloomsky.py
|
name
|
1lann/home-assistant
|
python
|
@property
def name(self):
return self._name
|
@property
def unique_id(self):
'Return the unique ID for this sensor.'
return self._unique_id
| 1,956,464,878,067,841,800
|
Return the unique ID for this sensor.
|
homeassistant/components/sensor/bloomsky.py
|
unique_id
|
1lann/home-assistant
|
python
|
@property
def unique_id(self):
return self._unique_id
|
@property
def state(self):
'The current state, eg. value, of this sensor.'
return self._state
| -8,008,055,729,367,274,000
|
The current state, eg. value, of this sensor.
|
homeassistant/components/sensor/bloomsky.py
|
state
|
1lann/home-assistant
|
python
|
@property
def state(self):
return self._state
|
@property
def unit_of_measurement(self):
'Return the sensor units.'
return SENSOR_UNITS.get(self._sensor_name, None)
| -20,660,918,308,585,132
|
Return the sensor units.
|
homeassistant/components/sensor/bloomsky.py
|
unit_of_measurement
|
1lann/home-assistant
|
python
|
@property
def unit_of_measurement(self):
return SENSOR_UNITS.get(self._sensor_name, None)
|
def update(self):
'Request an update from the BloomSky API.'
self._bloomsky.refresh_devices()
state = self._bloomsky.devices[self._device_id]['Data'][self._sensor_name]
if (self._sensor_name in FORMAT_NUMBERS):
self._state = '{0:.2f}'.format(state)
else:
self._state = state
| 100,917,558,859,747,420
|
Request an update from the BloomSky API.
|
homeassistant/components/sensor/bloomsky.py
|
update
|
1lann/home-assistant
|
python
|
def update(self):
self._bloomsky.refresh_devices()
state = self._bloomsky.devices[self._device_id]['Data'][self._sensor_name]
if (self._sensor_name in FORMAT_NUMBERS):
self._state = '{0:.2f}'.format(state)
else:
self._state = state
|
@cluster(num_nodes=12)
@parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH))
@parametrize(producer_version=str(LATEST_0_10), consumer_version=str(LATEST_0_10))
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
def test_compatibility(self, producer_version, consumer_version):
" This tests performs the following checks:\n The workload is a mix of 0.9.x, 0.10.x and 0.11.x producers and consumers\n that produce to and consume from a DEV_BRANCH cluster\n 1. initially the topic is using message format 0.9.0\n 2. change the message format version for topic to 0.10.0 on the fly.\n 3. change the message format version for topic to 0.11.0 on the fly.\n 4. change the message format version for topic back to 0.10.0 on the fly (only if the client version is 0.11.0 or newer)\n - The producers and consumers should not have any issue.\n\n Note regarding step number 4. Downgrading the message format version is generally unsupported as it breaks\n older clients. More concretely, if we downgrade a topic from 0.11.0 to 0.10.0 after it contains messages with\n version 0.11.0, we will return the 0.11.0 messages without down conversion due to an optimisation in the\n handling of fetch requests. This will break any consumer that doesn't support 0.11.0. So, in practice, step 4\n is similar to step 2 and it didn't seem worth it to increase the cluster size to in order to add a step 5 that\n would change the message format version for the topic back to 0.9.0.0.\n "
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {'partitions': 3, 'replication-factor': 3, 'configs': {'min.insync.replicas': 2}}})
self.kafka.start()
self.logger.info('First format change to 0.9.0')
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, 'group1')
self.logger.info('Second format change to 0.10.0')
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, 'group2')
self.logger.info('Third format change to 0.11.0')
self.kafka.alter_message_format(self.topic, str(LATEST_0_11))
self.produce_and_consume(producer_version, consumer_version, 'group3')
if ((producer_version == str(DEV_BRANCH)) and (consumer_version == str(DEV_BRANCH))):
self.logger.info('Fourth format change back to 0.10.0')
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, 'group4')
| -1,733,980,396,280,035,000
|
This tests performs the following checks:
The workload is a mix of 0.9.x, 0.10.x and 0.11.x producers and consumers
that produce to and consume from a DEV_BRANCH cluster
1. initially the topic is using message format 0.9.0
2. change the message format version for topic to 0.10.0 on the fly.
3. change the message format version for topic to 0.11.0 on the fly.
4. change the message format version for topic back to 0.10.0 on the fly (only if the client version is 0.11.0 or newer)
- The producers and consumers should not have any issue.
Note regarding step number 4. Downgrading the message format version is generally unsupported as it breaks
older clients. More concretely, if we downgrade a topic from 0.11.0 to 0.10.0 after it contains messages with
version 0.11.0, we will return the 0.11.0 messages without down conversion due to an optimisation in the
handling of fetch requests. This will break any consumer that doesn't support 0.11.0. So, in practice, step 4
is similar to step 2 and it didn't seem worth it to increase the cluster size to in order to add a step 5 that
would change the message format version for the topic back to 0.9.0.0.
|
tests/kafkatest/tests/client/message_format_change_test.py
|
test_compatibility
|
1810824959/kafka
|
python
|
@cluster(num_nodes=12)
@parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH))
@parametrize(producer_version=str(LATEST_0_10), consumer_version=str(LATEST_0_10))
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
def test_compatibility(self, producer_version, consumer_version):
" This tests performs the following checks:\n The workload is a mix of 0.9.x, 0.10.x and 0.11.x producers and consumers\n that produce to and consume from a DEV_BRANCH cluster\n 1. initially the topic is using message format 0.9.0\n 2. change the message format version for topic to 0.10.0 on the fly.\n 3. change the message format version for topic to 0.11.0 on the fly.\n 4. change the message format version for topic back to 0.10.0 on the fly (only if the client version is 0.11.0 or newer)\n - The producers and consumers should not have any issue.\n\n Note regarding step number 4. Downgrading the message format version is generally unsupported as it breaks\n older clients. More concretely, if we downgrade a topic from 0.11.0 to 0.10.0 after it contains messages with\n version 0.11.0, we will return the 0.11.0 messages without down conversion due to an optimisation in the\n handling of fetch requests. This will break any consumer that doesn't support 0.11.0. So, in practice, step 4\n is similar to step 2 and it didn't seem worth it to increase the cluster size to in order to add a step 5 that\n would change the message format version for the topic back to 0.9.0.0.\n "
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {'partitions': 3, 'replication-factor': 3, 'configs': {'min.insync.replicas': 2}}})
self.kafka.start()
self.logger.info('First format change to 0.9.0')
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, 'group1')
self.logger.info('Second format change to 0.10.0')
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, 'group2')
self.logger.info('Third format change to 0.11.0')
self.kafka.alter_message_format(self.topic, str(LATEST_0_11))
self.produce_and_consume(producer_version, consumer_version, 'group3')
if ((producer_version == str(DEV_BRANCH)) and (consumer_version == str(DEV_BRANCH))):
self.logger.info('Fourth format change back to 0.10.0')
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, 'group4')
|
def make_finite(data_gen):
'An adapter for Keras data generators that makes them finite.\n\n The default behavior in Keras is to keep looping infinitely through\n the data.\n\n Args:\n data_gen: An infinite Keras data generator.\n\n Yields:\n Same values as the parameter generator.\n '
num_samples = data_gen.samples
num_processed = 0
for batch in data_gen:
batch_size = batch[0].shape[0]
if ((batch_size + num_processed) > num_samples):
batch_size = (num_samples - num_processed)
should_stop = True
else:
should_stop = False
if (batch_size == 0):
return
batch = tuple((x[:batch_size] for x in batch))
(yield batch)
num_processed += batch_size
if should_stop:
return
| 8,984,614,373,540,869,000
|
An adapter for Keras data generators that makes them finite.
The default behavior in Keras is to keep looping infinitely through
the data.
Args:
data_gen: An infinite Keras data generator.
Yields:
Same values as the parameter generator.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
make_finite
|
121Y/examples
|
python
|
def make_finite(data_gen):
'An adapter for Keras data generators that makes them finite.\n\n The default behavior in Keras is to keep looping infinitely through\n the data.\n\n Args:\n data_gen: An infinite Keras data generator.\n\n Yields:\n Same values as the parameter generator.\n '
num_samples = data_gen.samples
num_processed = 0
for batch in data_gen:
batch_size = batch[0].shape[0]
if ((batch_size + num_processed) > num_samples):
batch_size = (num_samples - num_processed)
should_stop = True
else:
should_stop = False
if (batch_size == 0):
return
batch = tuple((x[:batch_size] for x in batch))
(yield batch)
num_processed += batch_size
if should_stop:
return
|
def pad_batch(batch, batch_size):
'Resize batch to a given size, tiling present samples over missing.\n\n Example:\n Suppose batch_size is 5, batch is [1, 2].\n Then the return value is [1, 2, 1, 2, 1].\n\n Args:\n batch: An ndarray with first dimension size <= batch_size.\n batch_size: Desired size for first dimension.\n\n Returns:\n An ndarray of the same shape, except first dimension has\n the desired size.\n '
padded = np.zeros(((batch_size,) + batch.shape[1:]), dtype=batch.dtype)
next_idx = 0
while (next_idx < batch_size):
fill_len = min(batch.shape[0], (batch_size - next_idx))
padded[next_idx:(next_idx + fill_len)] = batch[:fill_len]
next_idx += fill_len
return padded
| -8,425,979,928,581,526,000
|
Resize batch to a given size, tiling present samples over missing.
Example:
Suppose batch_size is 5, batch is [1, 2].
Then the return value is [1, 2, 1, 2, 1].
Args:
batch: An ndarray with first dimension size <= batch_size.
batch_size: Desired size for first dimension.
Returns:
An ndarray of the same shape, except first dimension has
the desired size.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
pad_batch
|
121Y/examples
|
python
|
def pad_batch(batch, batch_size):
'Resize batch to a given size, tiling present samples over missing.\n\n Example:\n Suppose batch_size is 5, batch is [1, 2].\n Then the return value is [1, 2, 1, 2, 1].\n\n Args:\n batch: An ndarray with first dimension size <= batch_size.\n batch_size: Desired size for first dimension.\n\n Returns:\n An ndarray of the same shape, except first dimension has\n the desired size.\n '
padded = np.zeros(((batch_size,) + batch.shape[1:]), dtype=batch.dtype)
next_idx = 0
while (next_idx < batch_size):
fill_len = min(batch.shape[0], (batch_size - next_idx))
padded[next_idx:(next_idx + fill_len)] = batch[:fill_len]
next_idx += fill_len
return padded
|
def __init__(self, dataset_dir, base_model, head_model, optimizer):
'Creates a wrapper for a set of models and a data set.'
self.dataset_dir = dataset_dir
datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=(1.0 / 255), validation_split=VALIDATION_SPLIT)
self.train_img_generator = datagen.flow_from_directory(self.dataset_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, subset='training')
self.val_img_generator = datagen.flow_from_directory(self.dataset_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, subset='validation')
converter = tflite_transfer_converter.TFLiteTransferConverter(NUM_CLASSES, base_model, head_model, optimizer, BATCH_SIZE)
models = converter._convert()
self.initialize_model = models['initialize']
self.bottleneck_model = models['bottleneck']
self.train_head_model = models['train_head']
self.inference_model = models['inference']
self.optimizer_model = models['optimizer']
self.variables = self._generate_initial_variables()
optim_state_shapes = self._optimizer_state_shapes()
self.optim_state = [np.zeros(shape, dtype=np.float32) for shape in optim_state_shapes]
| -7,315,273,624,684,874,000
|
Creates a wrapper for a set of models and a data set.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
__init__
|
121Y/examples
|
python
|
def __init__(self, dataset_dir, base_model, head_model, optimizer):
self.dataset_dir = dataset_dir
datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=(1.0 / 255), validation_split=VALIDATION_SPLIT)
self.train_img_generator = datagen.flow_from_directory(self.dataset_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, subset='training')
self.val_img_generator = datagen.flow_from_directory(self.dataset_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, subset='validation')
converter = tflite_transfer_converter.TFLiteTransferConverter(NUM_CLASSES, base_model, head_model, optimizer, BATCH_SIZE)
models = converter._convert()
self.initialize_model = models['initialize']
self.bottleneck_model = models['bottleneck']
self.train_head_model = models['train_head']
self.inference_model = models['inference']
self.optimizer_model = models['optimizer']
self.variables = self._generate_initial_variables()
optim_state_shapes = self._optimizer_state_shapes()
self.optim_state = [np.zeros(shape, dtype=np.float32) for shape in optim_state_shapes]
|
def _generate_initial_variables(self):
'Generates the initial model variables.'
interpreter = tf.lite.Interpreter(model_content=self.initialize_model)
zero_in = interpreter.get_input_details()[0]
variable_outs = interpreter.get_output_details()
interpreter.allocate_tensors()
interpreter.set_tensor(zero_in['index'], np.float32(0.0))
interpreter.invoke()
return [interpreter.get_tensor(var['index']) for var in variable_outs]
| 6,259,699,830,710,801,000
|
Generates the initial model variables.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
_generate_initial_variables
|
121Y/examples
|
python
|
def _generate_initial_variables(self):
interpreter = tf.lite.Interpreter(model_content=self.initialize_model)
zero_in = interpreter.get_input_details()[0]
variable_outs = interpreter.get_output_details()
interpreter.allocate_tensors()
interpreter.set_tensor(zero_in['index'], np.float32(0.0))
interpreter.invoke()
return [interpreter.get_tensor(var['index']) for var in variable_outs]
|
def _optimizer_state_shapes(self):
'Reads the shapes of the optimizer parameters (mutable state).'
interpreter = tf.lite.Interpreter(model_content=self.optimizer_model)
num_variables = len(self.variables)
optim_state_inputs = interpreter.get_input_details()[(num_variables * 2):]
return [input_['shape'] for input_ in optim_state_inputs]
| 8,370,520,153,972,423,000
|
Reads the shapes of the optimizer parameters (mutable state).
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
_optimizer_state_shapes
|
121Y/examples
|
python
|
def _optimizer_state_shapes(self):
interpreter = tf.lite.Interpreter(model_content=self.optimizer_model)
num_variables = len(self.variables)
optim_state_inputs = interpreter.get_input_details()[(num_variables * 2):]
return [input_['shape'] for input_ in optim_state_inputs]
|
def prepare_bottlenecks(self):
'Passes all images through the base model and save the bottlenecks.\n\n This method has to be called before any training or inference.\n '
(self.train_bottlenecks, self.train_labels) = self._collect_and_generate_bottlenecks(self.train_img_generator)
(self.val_bottlenecks, self.val_labels) = self._collect_and_generate_bottlenecks(self.val_img_generator)
| 2,736,615,339,603,315,700
|
Passes all images through the base model and save the bottlenecks.
This method has to be called before any training or inference.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
prepare_bottlenecks
|
121Y/examples
|
python
|
def prepare_bottlenecks(self):
'Passes all images through the base model and save the bottlenecks.\n\n This method has to be called before any training or inference.\n '
(self.train_bottlenecks, self.train_labels) = self._collect_and_generate_bottlenecks(self.train_img_generator)
(self.val_bottlenecks, self.val_labels) = self._collect_and_generate_bottlenecks(self.val_img_generator)
|
def _collect_and_generate_bottlenecks(self, image_gen):
'Consumes a generator and converts all images to bottlenecks.\n\n Args:\n image_gen: A Keras data generator for images to process\n\n Returns:\n Two NumPy arrays: (bottlenecks, labels).\n '
collected_bottlenecks = np.zeros(((image_gen.samples,) + BOTTLENECK_SHAPE), dtype=np.float32)
collected_labels = np.zeros((image_gen.samples, NUM_CLASSES), dtype=np.float32)
next_idx = 0
for (bottlenecks, truth) in self._generate_bottlenecks(make_finite(image_gen)):
batch_size = bottlenecks.shape[0]
collected_bottlenecks[next_idx:(next_idx + batch_size)] = bottlenecks
collected_labels[next_idx:(next_idx + batch_size)] = truth
next_idx += batch_size
return (collected_bottlenecks, collected_labels)
| 2,256,107,985,492,601,900
|
Consumes a generator and converts all images to bottlenecks.
Args:
image_gen: A Keras data generator for images to process
Returns:
Two NumPy arrays: (bottlenecks, labels).
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
_collect_and_generate_bottlenecks
|
121Y/examples
|
python
|
def _collect_and_generate_bottlenecks(self, image_gen):
'Consumes a generator and converts all images to bottlenecks.\n\n Args:\n image_gen: A Keras data generator for images to process\n\n Returns:\n Two NumPy arrays: (bottlenecks, labels).\n '
collected_bottlenecks = np.zeros(((image_gen.samples,) + BOTTLENECK_SHAPE), dtype=np.float32)
collected_labels = np.zeros((image_gen.samples, NUM_CLASSES), dtype=np.float32)
next_idx = 0
for (bottlenecks, truth) in self._generate_bottlenecks(make_finite(image_gen)):
batch_size = bottlenecks.shape[0]
collected_bottlenecks[next_idx:(next_idx + batch_size)] = bottlenecks
collected_labels[next_idx:(next_idx + batch_size)] = truth
next_idx += batch_size
return (collected_bottlenecks, collected_labels)
|
def _generate_bottlenecks(self, image_gen):
'Generator adapter that passes images through the bottleneck model.\n\n Args:\n image_gen: A generator that returns images to be processed. Images are\n paired with ground truth labels.\n\n Yields:\n Bottlenecks from input images, paired with ground truth labels.\n '
interpreter = tf.lite.Interpreter(model_content=self.bottleneck_model)
[x_in] = interpreter.get_input_details()
[bottleneck_out] = interpreter.get_output_details()
for (x, y) in image_gen:
batch_size = x.shape[0]
interpreter.resize_tensor_input(x_in['index'], (batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))
interpreter.allocate_tensors()
interpreter.set_tensor(x_in['index'], x)
interpreter.invoke()
bottleneck = interpreter.get_tensor(bottleneck_out['index'])
(yield (bottleneck, y))
| -2,999,031,076,376,204,300
|
Generator adapter that passes images through the bottleneck model.
Args:
image_gen: A generator that returns images to be processed. Images are
paired with ground truth labels.
Yields:
Bottlenecks from input images, paired with ground truth labels.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
_generate_bottlenecks
|
121Y/examples
|
python
|
def _generate_bottlenecks(self, image_gen):
'Generator adapter that passes images through the bottleneck model.\n\n Args:\n image_gen: A generator that returns images to be processed. Images are\n paired with ground truth labels.\n\n Yields:\n Bottlenecks from input images, paired with ground truth labels.\n '
interpreter = tf.lite.Interpreter(model_content=self.bottleneck_model)
[x_in] = interpreter.get_input_details()
[bottleneck_out] = interpreter.get_output_details()
for (x, y) in image_gen:
batch_size = x.shape[0]
interpreter.resize_tensor_input(x_in['index'], (batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))
interpreter.allocate_tensors()
interpreter.set_tensor(x_in['index'], x)
interpreter.invoke()
bottleneck = interpreter.get_tensor(bottleneck_out['index'])
(yield (bottleneck, y))
|
def train_head(self, num_epochs):
'Trains the head model for a given number of epochs.\n\n SGD is used as an optimizer.\n\n Args:\n num_epochs: how many epochs should be trained\n\n Returns:\n A list of train_loss values after every epoch trained.\n\n Raises:\n RuntimeError: when prepare_bottlenecks() has not been called.\n '
if (not hasattr(self, 'train_bottlenecks')):
raise RuntimeError('prepare_bottlenecks has not been called')
results = []
for _ in range(num_epochs):
loss = self._train_one_epoch(self._generate_batches(self.train_bottlenecks, self.train_labels))
results.append(loss)
return results
| -6,946,596,642,517,278,000
|
Trains the head model for a given number of epochs.
SGD is used as an optimizer.
Args:
num_epochs: how many epochs should be trained
Returns:
A list of train_loss values after every epoch trained.
Raises:
RuntimeError: when prepare_bottlenecks() has not been called.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
train_head
|
121Y/examples
|
python
|
def train_head(self, num_epochs):
'Trains the head model for a given number of epochs.\n\n SGD is used as an optimizer.\n\n Args:\n num_epochs: how many epochs should be trained\n\n Returns:\n A list of train_loss values after every epoch trained.\n\n Raises:\n RuntimeError: when prepare_bottlenecks() has not been called.\n '
if (not hasattr(self, 'train_bottlenecks')):
raise RuntimeError('prepare_bottlenecks has not been called')
results = []
for _ in range(num_epochs):
loss = self._train_one_epoch(self._generate_batches(self.train_bottlenecks, self.train_labels))
results.append(loss)
return results
|
def _generate_batches(self, x, y):
'Creates a generator that iterates over the data in batches.'
num_total = x.shape[0]
for begin in range(0, num_total, BATCH_SIZE):
end = min((begin + BATCH_SIZE), num_total)
(yield (x[begin:end], y[begin:end]))
| 6,442,735,420,916,530,000
|
Creates a generator that iterates over the data in batches.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
_generate_batches
|
121Y/examples
|
python
|
def _generate_batches(self, x, y):
num_total = x.shape[0]
for begin in range(0, num_total, BATCH_SIZE):
end = min((begin + BATCH_SIZE), num_total)
(yield (x[begin:end], y[begin:end]))
|
def _train_one_epoch(self, train_gen):
'Performs one training epoch.'
interpreter = tf.lite.Interpreter(model_content=self.train_head_model)
interpreter.allocate_tensors()
(x_in, y_in) = interpreter.get_input_details()[:2]
variable_ins = interpreter.get_input_details()[2:]
loss_out = interpreter.get_output_details()[0]
gradient_outs = interpreter.get_output_details()[1:]
epoch_loss = 0.0
num_processed = 0
for (bottlenecks, truth) in train_gen:
batch_size = bottlenecks.shape[0]
if (batch_size < BATCH_SIZE):
bottlenecks = pad_batch(bottlenecks, BATCH_SIZE)
truth = pad_batch(truth, BATCH_SIZE)
interpreter.set_tensor(x_in['index'], bottlenecks)
interpreter.set_tensor(y_in['index'], truth)
for (variable_in, variable_value) in zip(variable_ins, self.variables):
interpreter.set_tensor(variable_in['index'], variable_value)
interpreter.invoke()
loss = interpreter.get_tensor(loss_out['index'])
gradients = [interpreter.get_tensor(gradient_out['index']) for gradient_out in gradient_outs]
self._apply_gradients(gradients)
epoch_loss += (loss * batch_size)
num_processed += batch_size
epoch_loss /= num_processed
return epoch_loss
| -3,484,671,471,586,543,600
|
Performs one training epoch.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
_train_one_epoch
|
121Y/examples
|
python
|
def _train_one_epoch(self, train_gen):
interpreter = tf.lite.Interpreter(model_content=self.train_head_model)
interpreter.allocate_tensors()
(x_in, y_in) = interpreter.get_input_details()[:2]
variable_ins = interpreter.get_input_details()[2:]
loss_out = interpreter.get_output_details()[0]
gradient_outs = interpreter.get_output_details()[1:]
epoch_loss = 0.0
num_processed = 0
for (bottlenecks, truth) in train_gen:
batch_size = bottlenecks.shape[0]
if (batch_size < BATCH_SIZE):
bottlenecks = pad_batch(bottlenecks, BATCH_SIZE)
truth = pad_batch(truth, BATCH_SIZE)
interpreter.set_tensor(x_in['index'], bottlenecks)
interpreter.set_tensor(y_in['index'], truth)
for (variable_in, variable_value) in zip(variable_ins, self.variables):
interpreter.set_tensor(variable_in['index'], variable_value)
interpreter.invoke()
loss = interpreter.get_tensor(loss_out['index'])
gradients = [interpreter.get_tensor(gradient_out['index']) for gradient_out in gradient_outs]
self._apply_gradients(gradients)
epoch_loss += (loss * batch_size)
num_processed += batch_size
epoch_loss /= num_processed
return epoch_loss
|
def _apply_gradients(self, gradients):
'Applies the optimizer to the model parameters.'
interpreter = tf.lite.Interpreter(model_content=self.optimizer_model)
interpreter.allocate_tensors()
num_variables = len(self.variables)
variable_ins = interpreter.get_input_details()[:num_variables]
gradient_ins = interpreter.get_input_details()[num_variables:(num_variables * 2)]
state_ins = interpreter.get_input_details()[(num_variables * 2):]
variable_outs = interpreter.get_output_details()[:num_variables]
state_outs = interpreter.get_output_details()[num_variables:]
for (variable, gradient, variable_in, gradient_in) in zip(self.variables, gradients, variable_ins, gradient_ins):
interpreter.set_tensor(variable_in['index'], variable)
interpreter.set_tensor(gradient_in['index'], gradient)
for (optim_state_elem, state_in) in zip(self.optim_state, state_ins):
interpreter.set_tensor(state_in['index'], optim_state_elem)
interpreter.invoke()
self.variables = [interpreter.get_tensor(variable_out['index']) for variable_out in variable_outs]
self.optim_state = [interpreter.get_tensor(state_out['index']) for state_out in state_outs]
| 1,172,778,093,618,668,000
|
Applies the optimizer to the model parameters.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
_apply_gradients
|
121Y/examples
|
python
|
def _apply_gradients(self, gradients):
interpreter = tf.lite.Interpreter(model_content=self.optimizer_model)
interpreter.allocate_tensors()
num_variables = len(self.variables)
variable_ins = interpreter.get_input_details()[:num_variables]
gradient_ins = interpreter.get_input_details()[num_variables:(num_variables * 2)]
state_ins = interpreter.get_input_details()[(num_variables * 2):]
variable_outs = interpreter.get_output_details()[:num_variables]
state_outs = interpreter.get_output_details()[num_variables:]
for (variable, gradient, variable_in, gradient_in) in zip(self.variables, gradients, variable_ins, gradient_ins):
interpreter.set_tensor(variable_in['index'], variable)
interpreter.set_tensor(gradient_in['index'], gradient)
for (optim_state_elem, state_in) in zip(self.optim_state, state_ins):
interpreter.set_tensor(state_in['index'], optim_state_elem)
interpreter.invoke()
self.variables = [interpreter.get_tensor(variable_out['index']) for variable_out in variable_outs]
self.optim_state = [interpreter.get_tensor(state_out['index']) for state_out in state_outs]
|
def measure_inference_accuracy(self):
'Runs the inference model and measures accuracy on the validation set.'
interpreter = tf.lite.Interpreter(model_content=self.inference_model)
bottleneck_in = interpreter.get_input_details()[0]
variable_ins = interpreter.get_input_details()[1:]
[y_out] = interpreter.get_output_details()
inference_accuracy = 0.0
num_processed = 0
for (bottleneck, truth) in self._generate_batches(self.val_bottlenecks, self.val_labels):
batch_size = bottleneck.shape[0]
interpreter.resize_tensor_input(bottleneck_in['index'], ((batch_size,) + BOTTLENECK_SHAPE))
interpreter.allocate_tensors()
interpreter.set_tensor(bottleneck_in['index'], bottleneck)
for (variable_in, variable_value) in zip(variable_ins, self.variables):
interpreter.set_tensor(variable_in['index'], variable_value)
interpreter.invoke()
preds = interpreter.get_tensor(y_out['index'])
acc = ((np.argmax(preds, axis=1) == np.argmax(truth, axis=1)).sum() / batch_size)
inference_accuracy += (acc * batch_size)
num_processed += batch_size
inference_accuracy /= num_processed
return inference_accuracy
| -1,351,305,172,050,004,500
|
Runs the inference model and measures accuracy on the validation set.
|
lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py
|
measure_inference_accuracy
|
121Y/examples
|
python
|
def measure_inference_accuracy(self):
interpreter = tf.lite.Interpreter(model_content=self.inference_model)
bottleneck_in = interpreter.get_input_details()[0]
variable_ins = interpreter.get_input_details()[1:]
[y_out] = interpreter.get_output_details()
inference_accuracy = 0.0
num_processed = 0
for (bottleneck, truth) in self._generate_batches(self.val_bottlenecks, self.val_labels):
batch_size = bottleneck.shape[0]
interpreter.resize_tensor_input(bottleneck_in['index'], ((batch_size,) + BOTTLENECK_SHAPE))
interpreter.allocate_tensors()
interpreter.set_tensor(bottleneck_in['index'], bottleneck)
for (variable_in, variable_value) in zip(variable_ins, self.variables):
interpreter.set_tensor(variable_in['index'], variable_value)
interpreter.invoke()
preds = interpreter.get_tensor(y_out['index'])
acc = ((np.argmax(preds, axis=1) == np.argmax(truth, axis=1)).sum() / batch_size)
inference_accuracy += (acc * batch_size)
num_processed += batch_size
inference_accuracy /= num_processed
return inference_accuracy
|
def importance_matrix(sensitivities, data, print_imp=True, show_table=True, tag_to_ablate=None):
'\n Builds a matrix of tag sensitivities\n :param sensitivities: This is a matrix of [num_tags, num_neurons],\n which is [10 x 50] in our experimental configuration.\n :return:\n '
important_lists = []
important_nps = np.zeros(50, dtype=int)
sensitivities = sensitivities[1:]
for i in range(len(sensitivities)):
important_list = []
important_np = np.zeros(50, dtype=int)
tag_sensitivity_row = sensitivities[i]
for j in range(len(tag_sensitivity_row)):
most_important = np.argmax(tag_sensitivity_row)
important_list.append(most_important)
important_np[j] = most_important
index = [most_important]
tag_sensitivity_row[most_important] = np.NINF
important_lists.append(important_list)
important_nps = np.vstack((important_nps, important_np))
important_nps = np.delete(important_nps, 0, axis=0)
np.save('imps.npy', important_nps)
important_nps = np.transpose(important_nps)
if show_table:
sns.set()
sns.set(font_scale=0.5)
x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
del x_tick[0]
ax = sns.heatmap(important_nps, annot=True, xticklabels=x_tick, cmap=ListedColormap(['white']), cbar=False, yticklabels=False, linecolor='gray', linewidths=0.4)
title = 'Importance rankings of neurons per tag'
plt.title(title, fontsize=18)
ttl = ax.title
ttl.set_position([0.5, 1.05])
plt.show()
def trim_model_dir(model_dir):
model_dir = model_dir.replace('/', '-')
return model_dir
ax.figure.savefig('ImportanceRankings-{}.png'.format(trim_model_dir(data.model_dir)))
if print_imp:
imp_file = open('Importance-{}.txt'.format(trim_model_dir(data.model_dir)), 'w+')
print('Neuron importance ranking for each NER tag:')
for (i, l) in enumerate(important_lists):
tags = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
del tags[0]
print('\t{}\t{}'.format(tags[i], l))
imp_file.write('{}\t{}\n'.format(tags[i], l))
imp_file.write('\n')
np.savetxt('Importance-{}.tsv'.format(trim_model_dir(data.model_dir)), important_nps, fmt='%2.0d', delimiter='\t')
return important_nps
| 5,640,006,925,624,561,000
|
Builds a matrix of tag sensitivities
:param sensitivities: This is a matrix of [num_tags, num_neurons],
which is [10 x 50] in our experimental configuration.
:return:
|
main.py
|
importance_matrix
|
DeniseMak/ner-neuron
|
python
|
def importance_matrix(sensitivities, data, print_imp=True, show_table=True, tag_to_ablate=None):
'\n Builds a matrix of tag sensitivities\n :param sensitivities: This is a matrix of [num_tags, num_neurons],\n which is [10 x 50] in our experimental configuration.\n :return:\n '
important_lists = []
important_nps = np.zeros(50, dtype=int)
sensitivities = sensitivities[1:]
for i in range(len(sensitivities)):
important_list = []
important_np = np.zeros(50, dtype=int)
tag_sensitivity_row = sensitivities[i]
for j in range(len(tag_sensitivity_row)):
most_important = np.argmax(tag_sensitivity_row)
important_list.append(most_important)
important_np[j] = most_important
index = [most_important]
tag_sensitivity_row[most_important] = np.NINF
important_lists.append(important_list)
important_nps = np.vstack((important_nps, important_np))
important_nps = np.delete(important_nps, 0, axis=0)
np.save('imps.npy', important_nps)
important_nps = np.transpose(important_nps)
if show_table:
sns.set()
sns.set(font_scale=0.5)
x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
del x_tick[0]
ax = sns.heatmap(important_nps, annot=True, xticklabels=x_tick, cmap=ListedColormap(['white']), cbar=False, yticklabels=False, linecolor='gray', linewidths=0.4)
title = 'Importance rankings of neurons per tag'
plt.title(title, fontsize=18)
ttl = ax.title
ttl.set_position([0.5, 1.05])
plt.show()
def trim_model_dir(model_dir):
model_dir = model_dir.replace('/', '-')
return model_dir
ax.figure.savefig('ImportanceRankings-{}.png'.format(trim_model_dir(data.model_dir)))
if print_imp:
imp_file = open('Importance-{}.txt'.format(trim_model_dir(data.model_dir)), 'w+')
print('Neuron importance ranking for each NER tag:')
for (i, l) in enumerate(important_lists):
tags = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
del tags[0]
print('\t{}\t{}'.format(tags[i], l))
imp_file.write('{}\t{}\n'.format(tags[i], l))
imp_file.write('\n')
np.savetxt('Importance-{}.tsv'.format(trim_model_dir(data.model_dir)), important_nps, fmt='%2.0d', delimiter='\t')
return important_nps
|
def heatmap_sensitivity(sensitivities, modelname=DEFAULT_TRAINED_FILE, testname='', show_pad=False, show_vals=True, disable=False):
'\n Shows a heatmap for the sensitivity values, saves the heatmap to a PNG file,\n and also saves the sensitivity matrix to an .npy file,\n which we use for calculating correlations between models later.\n :param sensitivities: This is a matrix of [num_tags, num_neurons],\n which is [10 x 50] in our experimental configuration.\n :param disable: disable is just to turn off for debugging\n :return:\n '
sensitivities = np.transpose(sensitivities)
start = 1
if show_pad:
start = 0
sensitivities = sensitivities[0:50, start:10]
sns.set()
sns.set(font_scale=0.5)
x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
if show_pad:
x_tick[0] = 'PAD'
else:
del x_tick[0]
sensitivities_temp = np.zeros((50, 9))
x_tick_output = ['B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG', 'B-MISC', 'I-MISC', 'O']
for i in range(len(x_tick_output)):
sensitivities_temp[:, i] = sensitivities[:, x_tick.index(x_tick_output[i])]
np.save((modelname + '_sensitivities.npy'), sensitivities_temp)
if (not disable):
ax = sns.heatmap(sensitivities, xticklabels=x_tick, annot=show_vals, fmt='.2g')
title = ('({}): '.format(testname) + modelname)
plt.title(title, fontsize=18)
ttl = ax.title
ttl.set_position([0.5, 1.05])
plt.show()
ax.figure.savefig((modelname + '_heatmap.png'))
| 6,119,709,356,291,908,000
|
Shows a heatmap for the sensitivity values, saves the heatmap to a PNG file,
and also saves the sensitivity matrix to an .npy file,
which we use for calculating correlations between models later.
:param sensitivities: This is a matrix of [num_tags, num_neurons],
which is [10 x 50] in our experimental configuration.
:param disable: disable is just to turn off for debugging
:return:
|
main.py
|
heatmap_sensitivity
|
DeniseMak/ner-neuron
|
python
|
def heatmap_sensitivity(sensitivities, modelname=DEFAULT_TRAINED_FILE, testname=, show_pad=False, show_vals=True, disable=False):
'\n Shows a heatmap for the sensitivity values, saves the heatmap to a PNG file,\n and also saves the sensitivity matrix to an .npy file,\n which we use for calculating correlations between models later.\n :param sensitivities: This is a matrix of [num_tags, num_neurons],\n which is [10 x 50] in our experimental configuration.\n :param disable: disable is just to turn off for debugging\n :return:\n '
sensitivities = np.transpose(sensitivities)
start = 1
if show_pad:
start = 0
sensitivities = sensitivities[0:50, start:10]
sns.set()
sns.set(font_scale=0.5)
x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]
if show_pad:
x_tick[0] = 'PAD'
else:
del x_tick[0]
sensitivities_temp = np.zeros((50, 9))
x_tick_output = ['B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG', 'B-MISC', 'I-MISC', 'O']
for i in range(len(x_tick_output)):
sensitivities_temp[:, i] = sensitivities[:, x_tick.index(x_tick_output[i])]
np.save((modelname + '_sensitivities.npy'), sensitivities_temp)
if (not disable):
ax = sns.heatmap(sensitivities, xticklabels=x_tick, annot=show_vals, fmt='.2g')
title = ('({}): '.format(testname) + modelname)
plt.title(title, fontsize=18)
ttl = ax.title
ttl.set_position([0.5, 1.05])
plt.show()
ax.figure.savefig((modelname + '_heatmap.png'))
|
def get_sensitivity_matrix(label, debug=True):
'\n Given a tag like 4: (B-PER), return the sensitivity matrix\n :param label:\n :return:\n '
avg_for_label = (data.tag_contributions[label] / data.tag_counts[label])
sum_other_counts = 0
sum_other_contributions = np.zeros((10, 50))
for l in data.tag_counts:
if ((l != label) and (l != 0)):
sum_other_counts += data.tag_counts[l]
sum_other_contributions += data.tag_contributions[l]
avg_for_others = (sum_other_contributions / sum_other_counts)
s_ij = (avg_for_label - avg_for_others)
s_ij_label = s_ij[label]
return s_ij_label
| -964,061,719,209,157,100
|
Given a tag like 4: (B-PER), return the sensitivity matrix
:param label:
:return:
|
main.py
|
get_sensitivity_matrix
|
DeniseMak/ner-neuron
|
python
|
def get_sensitivity_matrix(label, debug=True):
'\n Given a tag like 4: (B-PER), return the sensitivity matrix\n :param label:\n :return:\n '
avg_for_label = (data.tag_contributions[label] / data.tag_counts[label])
sum_other_counts = 0
sum_other_contributions = np.zeros((10, 50))
for l in data.tag_counts:
if ((l != label) and (l != 0)):
sum_other_counts += data.tag_counts[l]
sum_other_contributions += data.tag_contributions[l]
avg_for_others = (sum_other_contributions / sum_other_counts)
s_ij = (avg_for_label - avg_for_others)
s_ij_label = s_ij[label]
return s_ij_label
|
def predict_check(pred_variable, gold_variable, mask_variable, sentence_classification=False):
'\n input:\n pred_variable (batch_size, sent_len): pred tag result, in numpy format\n gold_variable (batch_size, sent_len): gold result variable\n mask_variable (batch_size, sent_len): mask variable\n '
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
if sentence_classification:
right_token = np.sum(overlaped)
total_token = overlaped.shape[0]
else:
right_token = np.sum((overlaped * mask))
total_token = mask.sum()
return (right_token, total_token)
| 9,179,701,905,884,813,000
|
input:
pred_variable (batch_size, sent_len): pred tag result, in numpy format
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
|
main.py
|
predict_check
|
DeniseMak/ner-neuron
|
python
|
def predict_check(pred_variable, gold_variable, mask_variable, sentence_classification=False):
'\n input:\n pred_variable (batch_size, sent_len): pred tag result, in numpy format\n gold_variable (batch_size, sent_len): gold result variable\n mask_variable (batch_size, sent_len): mask variable\n '
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
if sentence_classification:
right_token = np.sum(overlaped)
total_token = overlaped.shape[0]
else:
right_token = np.sum((overlaped * mask))
total_token = mask.sum()
return (right_token, total_token)
|
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
'\n input:\n pred_variable (batch_size, sent_len): pred tag result\n gold_variable (batch_size, sent_len): gold result variable\n mask_variable (batch_size, sent_len): mask variable\n '
pred_variable = pred_variable[word_recover]
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if (mask[idx][idy] != 0)]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if (mask[idx][idy] != 0)]
assert (len(pred) == len(gold))
pred_label.append(pred)
gold_label.append(gold)
return (pred_label, gold_label)
| -7,238,641,698,619,610,000
|
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
|
main.py
|
recover_label
|
DeniseMak/ner-neuron
|
python
|
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
'\n input:\n pred_variable (batch_size, sent_len): pred tag result\n gold_variable (batch_size, sent_len): gold result variable\n mask_variable (batch_size, sent_len): mask variable\n '
pred_variable = pred_variable[word_recover]
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if (mask[idx][idy] != 0)]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if (mask[idx][idy] != 0)]
assert (len(pred) == len(gold))
pred_label.append(pred)
gold_label.append(gold)
return (pred_label, gold_label)
|
def recover_nbest_label(pred_variable, mask_variable, label_alphabet, word_recover):
'\n input:\n pred_variable (batch_size, sent_len, nbest): pred tag result\n mask_variable (batch_size, sent_len): mask variable\n word_recover (batch_size)\n output:\n nbest_pred_label list: [batch_size, nbest, each_seq_len]\n '
pred_variable = pred_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = pred_variable.size(0)
seq_len = pred_variable.size(1)
nbest = pred_variable.size(2)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
for idx in range(batch_size):
pred = []
for idz in range(nbest):
each_pred = [label_alphabet.get_instance(pred_tag[idx][idy][idz]) for idy in range(seq_len) if (mask[idx][idy] != 0)]
pred.append(each_pred)
pred_label.append(pred)
return pred_label
| 7,128,950,410,604,630,000
|
input:
pred_variable (batch_size, sent_len, nbest): pred tag result
mask_variable (batch_size, sent_len): mask variable
word_recover (batch_size)
output:
nbest_pred_label list: [batch_size, nbest, each_seq_len]
|
main.py
|
recover_nbest_label
|
DeniseMak/ner-neuron
|
python
|
def recover_nbest_label(pred_variable, mask_variable, label_alphabet, word_recover):
'\n input:\n pred_variable (batch_size, sent_len, nbest): pred tag result\n mask_variable (batch_size, sent_len): mask variable\n word_recover (batch_size)\n output:\n nbest_pred_label list: [batch_size, nbest, each_seq_len]\n '
pred_variable = pred_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = pred_variable.size(0)
seq_len = pred_variable.size(1)
nbest = pred_variable.size(2)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
for idx in range(batch_size):
pred = []
for idz in range(nbest):
each_pred = [label_alphabet.get_instance(pred_tag[idx][idy][idz]) for idy in range(seq_len) if (mask[idx][idy] != 0)]
pred.append(each_pred)
pred_label.append(pred)
return pred_label
|
def evaluate(data, model, name, nbest=None, print_tag_counts=False, tag_to_ablate=None):
"\n\n :param data:\n :param model:\n :param name:\n :param nbest:\n :param print_tag_counts:\n :param tag_to_ablate: if this is set to a tag name, like 'B-ORG', then in the LSTM layer's forward() we ablate the\n number of neurons specified by data.ablate_num\n :return:\n "
ablate_list_for_tag = None
if tag_to_ablate:
data.ablate_tag = tag_to_ablate
ablate_list_for_tag = data.ablate_list[tag_to_ablate]
print('\nEVALUATE file: {}, set={}, \n\t ablate_num={} tag: {} \nablate_list_for_tag={}'.format(data.model_dir, name, data.current_ablate_ind, tag_to_ablate, ablate_list_for_tag))
if (name == 'train'):
instances = data.train_Ids
elif (name == 'dev'):
instances = data.dev_Ids
elif (name == 'test'):
instances = data.test_Ids
elif (name == 'raw'):
instances = data.raw_Ids
else:
print('Error: wrong evaluate name,', name)
exit(1)
right_token = 0
whole_token = 0
nbest_pred_results = []
pred_scores = []
pred_results = []
gold_results = []
model.eval()
' Get count of model parameters '
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = ((train_num // batch_size) + 1)
for batch_id in range(total_batch):
start = (batch_id * batch_size)
end = ((batch_id + 1) * batch_size)
if (end > train_num):
end = train_num
instance = instances[start:end]
if (not instance):
continue
(batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask) = batchify_with_label(instance, data.HP_gpu, False, data.sentence_classification)
if (nbest and (not data.sentence_classification)):
(scores, nbest_tag_seq) = model.decode_nbest(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask, nbest)
nbest_pred_result = recover_nbest_label(nbest_tag_seq, mask, data.label_alphabet, batch_wordrecover)
nbest_pred_results += nbest_pred_result
pred_scores += scores[batch_wordrecover].cpu().data.numpy().tolist()
tag_seq = nbest_tag_seq[:, :, 0]
else:
tag_seq = model(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask)
(pred_label, gold_label) = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)
pred_results += pred_label
gold_results += gold_label
decode_time = (time.time() - start_time)
speed = (len(instances) / decode_time)
(acc, p, r, f) = get_ner_fmeasure(gold_results, pred_results, data.tagScheme, data=data)
if (nbest and (not data.sentence_classification)):
return (speed, acc, p, r, f, nbest_pred_results, pred_scores)
' Get per-tag sensitivity '
sensitivity_matrices = []
for tag in sorted(data.tag_counts):
if print_tag_counts:
if (tag == 0):
print('Padding {}: {} instances.'.format('0', data.tag_counts[tag]))
else:
print('Tag {}: {} instances.'.format(data.label_alphabet.get_instance(tag), data.tag_counts[tag]))
sensitivity_tag = get_sensitivity_matrix(tag)
sensitivity_matrices.append(sensitivity_tag)
sensitivity_combined = np.squeeze(np.stack([sensitivity_matrices]))
return (speed, acc, p, r, f, pred_results, pred_scores, sensitivity_combined)
| 6,244,522,984,307,677,000
|
:param data:
:param model:
:param name:
:param nbest:
:param print_tag_counts:
:param tag_to_ablate: if this is set to a tag name, like 'B-ORG', then in the LSTM layer's forward() we ablate the
number of neurons specified by data.ablate_num
:return:
|
main.py
|
evaluate
|
DeniseMak/ner-neuron
|
python
|
def evaluate(data, model, name, nbest=None, print_tag_counts=False, tag_to_ablate=None):
"\n\n :param data:\n :param model:\n :param name:\n :param nbest:\n :param print_tag_counts:\n :param tag_to_ablate: if this is set to a tag name, like 'B-ORG', then in the LSTM layer's forward() we ablate the\n number of neurons specified by data.ablate_num\n :return:\n "
ablate_list_for_tag = None
if tag_to_ablate:
data.ablate_tag = tag_to_ablate
ablate_list_for_tag = data.ablate_list[tag_to_ablate]
print('\nEVALUATE file: {}, set={}, \n\t ablate_num={} tag: {} \nablate_list_for_tag={}'.format(data.model_dir, name, data.current_ablate_ind, tag_to_ablate, ablate_list_for_tag))
if (name == 'train'):
instances = data.train_Ids
elif (name == 'dev'):
instances = data.dev_Ids
elif (name == 'test'):
instances = data.test_Ids
elif (name == 'raw'):
instances = data.raw_Ids
else:
print('Error: wrong evaluate name,', name)
exit(1)
right_token = 0
whole_token = 0
nbest_pred_results = []
pred_scores = []
pred_results = []
gold_results = []
model.eval()
' Get count of model parameters '
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = ((train_num // batch_size) + 1)
for batch_id in range(total_batch):
start = (batch_id * batch_size)
end = ((batch_id + 1) * batch_size)
if (end > train_num):
end = train_num
instance = instances[start:end]
if (not instance):
continue
(batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask) = batchify_with_label(instance, data.HP_gpu, False, data.sentence_classification)
if (nbest and (not data.sentence_classification)):
(scores, nbest_tag_seq) = model.decode_nbest(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask, nbest)
nbest_pred_result = recover_nbest_label(nbest_tag_seq, mask, data.label_alphabet, batch_wordrecover)
nbest_pred_results += nbest_pred_result
pred_scores += scores[batch_wordrecover].cpu().data.numpy().tolist()
tag_seq = nbest_tag_seq[:, :, 0]
else:
tag_seq = model(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask)
(pred_label, gold_label) = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)
pred_results += pred_label
gold_results += gold_label
decode_time = (time.time() - start_time)
speed = (len(instances) / decode_time)
(acc, p, r, f) = get_ner_fmeasure(gold_results, pred_results, data.tagScheme, data=data)
if (nbest and (not data.sentence_classification)):
return (speed, acc, p, r, f, nbest_pred_results, pred_scores)
' Get per-tag sensitivity '
sensitivity_matrices = []
for tag in sorted(data.tag_counts):
if print_tag_counts:
if (tag == 0):
print('Padding {}: {} instances.'.format('0', data.tag_counts[tag]))
else:
print('Tag {}: {} instances.'.format(data.label_alphabet.get_instance(tag), data.tag_counts[tag]))
sensitivity_tag = get_sensitivity_matrix(tag)
sensitivity_matrices.append(sensitivity_tag)
sensitivity_combined = np.squeeze(np.stack([sensitivity_matrices]))
return (speed, acc, p, r, f, pred_results, pred_scores, sensitivity_combined)
|
def batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train=True):
'\n input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]\n words: word ids for one sentence. (batch_size, sent_len)\n features: features ids for one sentence. (batch_size, sent_len, feature_num)\n chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)\n labels: label ids for one sentence. (batch_size, sent_len)\n\n output:\n zero padding for word and char, with their batch length\n word_seq_tensor: (batch_size, max_sent_len) Variable\n feature_seq_tensors: [(batch_size, max_sent_len),...] list of Variable\n word_seq_lengths: (batch_size,1) Tensor\n char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable\n char_seq_lengths: (batch_size*max_sent_len,1) Tensor\n char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order\n label_seq_tensor: (batch_size, max_sent_len)\n mask: (batch_size, max_sent_len)\n '
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0][0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long())
' 517 '
mask = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).bool()
for (idx, (seq, label, seqlen)) in enumerate(zip(words, labels, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
mask[idx, :seqlen] = torch.Tensor(([1] * seqlen))
for idy in range(feature_num):
feature_seq_tensors[idy][idx, :seqlen] = torch.LongTensor(features[idx][:, idy])
(word_seq_lengths, word_perm_idx) = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
pad_chars = [(chars[idx] + ([[0]] * (max_seq_len - len(chars[idx])))) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad=if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for (idx, (seq, seqlen)) in enumerate(zip(pad_chars, char_seq_lengths)):
for (idy, (word, wordlen)) in enumerate(zip(seq, seqlen)):
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view((batch_size * max_seq_len), (- 1))
char_seq_lengths = char_seq_lengths[word_perm_idx].view((batch_size * max_seq_len))
(char_seq_lengths, char_perm_idx) = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
(_, char_seq_recover) = char_perm_idx.sort(0, descending=False)
(_, word_seq_recover) = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return (word_seq_tensor, feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask)
| 1,337,854,411,642,345,500
|
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
features: features ids for one sentence. (batch_size, sent_len, feature_num)
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
labels: label ids for one sentence. (batch_size, sent_len)
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
feature_seq_tensors: [(batch_size, max_sent_len),...] list of Variable
word_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
char_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, max_sent_len)
mask: (batch_size, max_sent_len)
|
main.py
|
batchify_sequence_labeling_with_label
|
DeniseMak/ner-neuron
|
python
|
def batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train=True):
'\n input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]\n words: word ids for one sentence. (batch_size, sent_len)\n features: features ids for one sentence. (batch_size, sent_len, feature_num)\n chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)\n labels: label ids for one sentence. (batch_size, sent_len)\n\n output:\n zero padding for word and char, with their batch length\n word_seq_tensor: (batch_size, max_sent_len) Variable\n feature_seq_tensors: [(batch_size, max_sent_len),...] list of Variable\n word_seq_lengths: (batch_size,1) Tensor\n char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable\n char_seq_lengths: (batch_size*max_sent_len,1) Tensor\n char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order\n label_seq_tensor: (batch_size, max_sent_len)\n mask: (batch_size, max_sent_len)\n '
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0][0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long())
' 517 '
mask = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).bool()
for (idx, (seq, label, seqlen)) in enumerate(zip(words, labels, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
mask[idx, :seqlen] = torch.Tensor(([1] * seqlen))
for idy in range(feature_num):
feature_seq_tensors[idy][idx, :seqlen] = torch.LongTensor(features[idx][:, idy])
(word_seq_lengths, word_perm_idx) = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
pad_chars = [(chars[idx] + ([[0]] * (max_seq_len - len(chars[idx])))) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad=if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for (idx, (seq, seqlen)) in enumerate(zip(pad_chars, char_seq_lengths)):
for (idy, (word, wordlen)) in enumerate(zip(seq, seqlen)):
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view((batch_size * max_seq_len), (- 1))
char_seq_lengths = char_seq_lengths[word_perm_idx].view((batch_size * max_seq_len))
(char_seq_lengths, char_perm_idx) = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
(_, char_seq_recover) = char_perm_idx.sort(0, descending=False)
(_, word_seq_recover) = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return (word_seq_tensor, feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask)
|
def batchify_sentence_classification_with_label(input_batch_list, gpu, if_train=True):
'\n input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]\n words: word ids for one sentence. (batch_size, sent_len)\n features: features ids for one sentence. (batch_size, feature_num), each sentence has one set of feature\n chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)\n labels: label ids for one sentence. (batch_size,), each sentence has one set of feature\n\n output:\n zero padding for word and char, with their batch length\n word_seq_tensor: (batch_size, max_sent_len) Variable\n feature_seq_tensors: [(batch_size,), ... ] list of Variable\n word_seq_lengths: (batch_size,1) Tensor\n char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable\n char_seq_lengths: (batch_size*max_sent_len,1) Tensor\n char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order\n label_seq_tensor: (batch_size, )\n mask: (batch_size, max_sent_len)\n '
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
label_seq_tensor = torch.zeros((batch_size,), requires_grad=if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long())
' 517 '
mask = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).bool()
label_seq_tensor = torch.LongTensor(labels)
for (idx, (seq, seqlen)) in enumerate(zip(words, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
mask[idx, :seqlen] = torch.Tensor(([1] * seqlen))
for idy in range(feature_num):
feature_seq_tensors[idy][idx, :seqlen] = torch.LongTensor(features[idx][:, idy])
(word_seq_lengths, word_perm_idx) = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
pad_chars = [(chars[idx] + ([[0]] * (max_seq_len - len(chars[idx])))) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad=if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for (idx, (seq, seqlen)) in enumerate(zip(pad_chars, char_seq_lengths)):
for (idy, (word, wordlen)) in enumerate(zip(seq, seqlen)):
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view((batch_size * max_seq_len), (- 1))
char_seq_lengths = char_seq_lengths[word_perm_idx].view((batch_size * max_seq_len))
(char_seq_lengths, char_perm_idx) = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
(_, char_seq_recover) = char_perm_idx.sort(0, descending=False)
(_, word_seq_recover) = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return (word_seq_tensor, feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask)
| -4,112,489,582,308,551,700
|
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
features: features ids for one sentence. (batch_size, feature_num), each sentence has one set of feature
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
labels: label ids for one sentence. (batch_size,), each sentence has one set of feature
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
feature_seq_tensors: [(batch_size,), ... ] list of Variable
word_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
char_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, )
mask: (batch_size, max_sent_len)
|
main.py
|
batchify_sentence_classification_with_label
|
DeniseMak/ner-neuron
|
python
|
def batchify_sentence_classification_with_label(input_batch_list, gpu, if_train=True):
'\n input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]\n words: word ids for one sentence. (batch_size, sent_len)\n features: features ids for one sentence. (batch_size, feature_num), each sentence has one set of feature\n chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)\n labels: label ids for one sentence. (batch_size,), each sentence has one set of feature\n\n output:\n zero padding for word and char, with their batch length\n word_seq_tensor: (batch_size, max_sent_len) Variable\n feature_seq_tensors: [(batch_size,), ... ] list of Variable\n word_seq_lengths: (batch_size,1) Tensor\n char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable\n char_seq_lengths: (batch_size*max_sent_len,1) Tensor\n char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order\n label_seq_tensor: (batch_size, )\n mask: (batch_size, max_sent_len)\n '
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
label_seq_tensor = torch.zeros((batch_size,), requires_grad=if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long())
' 517 '
mask = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).bool()
label_seq_tensor = torch.LongTensor(labels)
for (idx, (seq, seqlen)) in enumerate(zip(words, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
mask[idx, :seqlen] = torch.Tensor(([1] * seqlen))
for idy in range(feature_num):
feature_seq_tensors[idy][idx, :seqlen] = torch.LongTensor(features[idx][:, idy])
(word_seq_lengths, word_perm_idx) = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
pad_chars = [(chars[idx] + ([[0]] * (max_seq_len - len(chars[idx])))) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad=if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for (idx, (seq, seqlen)) in enumerate(zip(pad_chars, char_seq_lengths)):
for (idy, (word, wordlen)) in enumerate(zip(seq, seqlen)):
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view((batch_size * max_seq_len), (- 1))
char_seq_lengths = char_seq_lengths[word_perm_idx].view((batch_size * max_seq_len))
(char_seq_lengths, char_perm_idx) = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
(_, char_seq_recover) = char_perm_idx.sort(0, descending=False)
(_, word_seq_recover) = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return (word_seq_tensor, feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask)
|
def load_model_to_test(data, train=False, dev=True, test=False, tag=None):
'\n Set any ONE of train, dev, test to true, in order to evaluate on that set.\n :param data:\n :param train:\n :param dev: Default set to test, because that was what the original experiment did\n :param test:\n :return:\n '
print('Load pretrained model...')
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
model.load_state_dict(torch.load(data.pretrained_model_path))
'----------------TESTING----------------'
if train:
(speed, acc, p, r, f, _, _, train_sensitivities) = evaluate(data, model, 'train')
heatmap_sensitivity(train_sensitivities, data.pretrained_model_path, testname='train')
if data.seg:
current_score = f
print(('Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f' % (speed, acc, p, r, f)))
else:
current_score = acc
print(('Speed: %.2fst/s; acc: %.4f' % (speed, acc)))
if dev:
(speed, acc, p, r, f, _, _, sensitivities) = evaluate(data, model, 'dev', tag_to_ablate=tag)
if data.seg:
current_score = f
print(('Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f' % (speed, acc, p, r, f)))
else:
current_score = acc
print(('Speed: %.2fst/s; acc: %.4f' % (speed, acc)))
if (data.ablate_num == 0):
heatmap_sensitivity(sensitivities, data.pretrained_model_path, testname='dev')
importance_matrix(sensitivities, data)
if test:
(speed, acc, p, r, f, _, _) = evaluate(data, model, 'test')
if data.seg:
print(('Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f' % (speed, acc, p, r, f)))
else:
print(('Speed: %.2fst/s; acc: %.4f' % (speed, acc)))
return
| -2,669,934,518,169,888,000
|
Set any ONE of train, dev, test to true, in order to evaluate on that set.
:param data:
:param train:
:param dev: Default set to test, because that was what the original experiment did
:param test:
:return:
|
main.py
|
load_model_to_test
|
DeniseMak/ner-neuron
|
python
|
def load_model_to_test(data, train=False, dev=True, test=False, tag=None):
'\n Set any ONE of train, dev, test to true, in order to evaluate on that set.\n :param data:\n :param train:\n :param dev: Default set to test, because that was what the original experiment did\n :param test:\n :return:\n '
print('Load pretrained model...')
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
model.load_state_dict(torch.load(data.pretrained_model_path))
'----------------TESTING----------------'
if train:
(speed, acc, p, r, f, _, _, train_sensitivities) = evaluate(data, model, 'train')
heatmap_sensitivity(train_sensitivities, data.pretrained_model_path, testname='train')
if data.seg:
current_score = f
print(('Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f' % (speed, acc, p, r, f)))
else:
current_score = acc
print(('Speed: %.2fst/s; acc: %.4f' % (speed, acc)))
if dev:
(speed, acc, p, r, f, _, _, sensitivities) = evaluate(data, model, 'dev', tag_to_ablate=tag)
if data.seg:
current_score = f
print(('Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f' % (speed, acc, p, r, f)))
else:
current_score = acc
print(('Speed: %.2fst/s; acc: %.4f' % (speed, acc)))
if (data.ablate_num == 0):
heatmap_sensitivity(sensitivities, data.pretrained_model_path, testname='dev')
importance_matrix(sensitivities, data)
if test:
(speed, acc, p, r, f, _, _) = evaluate(data, model, 'test')
if data.seg:
print(('Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f' % (speed, acc, p, r, f)))
else:
print(('Speed: %.2fst/s; acc: %.4f' % (speed, acc)))
return
|
@pytest.fixture
def preprocessor():
'Return an instance of FixLatexPreprocessor.'
return FixLatexPreprocessor()
| 3,498,699,546,808,116,700
|
Return an instance of FixLatexPreprocessor.
|
tests/preprocessors/test_fixlatex.py
|
preprocessor
|
IMTorgDemo/hugo-nb2hugo
|
python
|
@pytest.fixture
def preprocessor():
return FixLatexPreprocessor()
|
def __init__(self, n_dims_before, dims_neighbourhoods, strides=None, ignore_border=False, inverse=False):
'\n This extracts neighbourhoods from "images", but in a\n dimension-generic manner.\n\n In the 2D case, this is similar to downsampling, but instead of reducing\n a group of 2x2 pixels (for example) to a single new pixel in the output,\n you place those 4 pixels in a row.\n\n For example, say you have this 2x4 image::\n\n [ [ 0.5, 0.6, 0.7, 0.8 ],\n [ 0.1, 0.2, 0.3, 0.4 ] ]\n\n and you want to extract 2x2 neighbourhoods. This op would then produce::\n\n [ [ [ 0.5, 0.6, 0.1, 0.2 ] ], # the first 2x2 group of pixels\n [ [ 0.7, 0.8, 0.3, 0.4 ] ] ] # the second one\n\n so think of a 2D downsampling where each pixel of the resulting array\n is replaced by an array containing the (flattened) pixels of the\n corresponding neighbourhood.\n\n If you provide a stack of 2D image, or multiple stacks, each image\n will be treated independently, and the first dimensions of the array\n will be preserved as such.\n\n This also makes sense in the 1D or 3D case. Below I\'ll still be calling\n those "images", by analogy.\n\n In the 1D case, you\'re\n extracting subsequences from the original sequence. In the 3D case,\n you\'re extracting cuboids. If you ever find a 4D use, tell me! It\n should be possible, anyhow.\n\n Parameters\n ----------\n n_dims_before : int\n Number of dimensions preceding the "images".\n dims_neighbourhoods : tuple of ints\n Exact shape of windows to be extracted (e.g. (2,2) in the case above).\n n_dims_before + len(dims_neighbourhoods) should be equal to the\n number of dimensions in the input given to the op.\n strides : tuple of int\n Number of elements to skip when moving to the next neighbourhood,\n for each dimension of dims_neighbourhoods. There can be overlap\n between neighbourhoods, or gaps.\n ignore_border : bool\n If the dimensions of the neighbourhoods don\'t exactly divide the\n dimensions of the "images", you can either fill the last\n neighbourhood with zeros (False) or drop it entirely (True).\n inverse : bool\n You shouldn\'t have to use this. Only used by child class\n ImagesFromNeighbourhoods which simply reverses the assignment.\n '
self.n_dims_before = n_dims_before
self.dims_neighbourhoods = dims_neighbourhoods
if (strides is not None):
self.strides = strides
else:
self.strides = dims_neighbourhoods
self.ignore_border = ignore_border
self.inverse = inverse
(self.code_string, self.code) = self.make_py_code()
| -7,338,087,771,251,095,000
|
This extracts neighbourhoods from "images", but in a
dimension-generic manner.
In the 2D case, this is similar to downsampling, but instead of reducing
a group of 2x2 pixels (for example) to a single new pixel in the output,
you place those 4 pixels in a row.
For example, say you have this 2x4 image::
[ [ 0.5, 0.6, 0.7, 0.8 ],
[ 0.1, 0.2, 0.3, 0.4 ] ]
and you want to extract 2x2 neighbourhoods. This op would then produce::
[ [ [ 0.5, 0.6, 0.1, 0.2 ] ], # the first 2x2 group of pixels
[ [ 0.7, 0.8, 0.3, 0.4 ] ] ] # the second one
so think of a 2D downsampling where each pixel of the resulting array
is replaced by an array containing the (flattened) pixels of the
corresponding neighbourhood.
If you provide a stack of 2D image, or multiple stacks, each image
will be treated independently, and the first dimensions of the array
will be preserved as such.
This also makes sense in the 1D or 3D case. Below I'll still be calling
those "images", by analogy.
In the 1D case, you're
extracting subsequences from the original sequence. In the 3D case,
you're extracting cuboids. If you ever find a 4D use, tell me! It
should be possible, anyhow.
Parameters
----------
n_dims_before : int
Number of dimensions preceding the "images".
dims_neighbourhoods : tuple of ints
Exact shape of windows to be extracted (e.g. (2,2) in the case above).
n_dims_before + len(dims_neighbourhoods) should be equal to the
number of dimensions in the input given to the op.
strides : tuple of int
Number of elements to skip when moving to the next neighbourhood,
for each dimension of dims_neighbourhoods. There can be overlap
between neighbourhoods, or gaps.
ignore_border : bool
If the dimensions of the neighbourhoods don't exactly divide the
dimensions of the "images", you can either fill the last
neighbourhood with zeros (False) or drop it entirely (True).
inverse : bool
You shouldn't have to use this. Only used by child class
ImagesFromNeighbourhoods which simply reverses the assignment.
|
theano/sandbox/neighbourhoods.py
|
__init__
|
jych/Theano
|
python
|
def __init__(self, n_dims_before, dims_neighbourhoods, strides=None, ignore_border=False, inverse=False):
'\n This extracts neighbourhoods from "images", but in a\n dimension-generic manner.\n\n In the 2D case, this is similar to downsampling, but instead of reducing\n a group of 2x2 pixels (for example) to a single new pixel in the output,\n you place those 4 pixels in a row.\n\n For example, say you have this 2x4 image::\n\n [ [ 0.5, 0.6, 0.7, 0.8 ],\n [ 0.1, 0.2, 0.3, 0.4 ] ]\n\n and you want to extract 2x2 neighbourhoods. This op would then produce::\n\n [ [ [ 0.5, 0.6, 0.1, 0.2 ] ], # the first 2x2 group of pixels\n [ [ 0.7, 0.8, 0.3, 0.4 ] ] ] # the second one\n\n so think of a 2D downsampling where each pixel of the resulting array\n is replaced by an array containing the (flattened) pixels of the\n corresponding neighbourhood.\n\n If you provide a stack of 2D image, or multiple stacks, each image\n will be treated independently, and the first dimensions of the array\n will be preserved as such.\n\n This also makes sense in the 1D or 3D case. Below I\'ll still be calling\n those "images", by analogy.\n\n In the 1D case, you\'re\n extracting subsequences from the original sequence. In the 3D case,\n you\'re extracting cuboids. If you ever find a 4D use, tell me! It\n should be possible, anyhow.\n\n Parameters\n ----------\n n_dims_before : int\n Number of dimensions preceding the "images".\n dims_neighbourhoods : tuple of ints\n Exact shape of windows to be extracted (e.g. (2,2) in the case above).\n n_dims_before + len(dims_neighbourhoods) should be equal to the\n number of dimensions in the input given to the op.\n strides : tuple of int\n Number of elements to skip when moving to the next neighbourhood,\n for each dimension of dims_neighbourhoods. There can be overlap\n between neighbourhoods, or gaps.\n ignore_border : bool\n If the dimensions of the neighbourhoods don\'t exactly divide the\n dimensions of the "images", you can either fill the last\n neighbourhood with zeros (False) or drop it entirely (True).\n inverse : bool\n You shouldn\'t have to use this. Only used by child class\n ImagesFromNeighbourhoods which simply reverses the assignment.\n '
self.n_dims_before = n_dims_before
self.dims_neighbourhoods = dims_neighbourhoods
if (strides is not None):
self.strides = strides
else:
self.strides = dims_neighbourhoods
self.ignore_border = ignore_border
self.inverse = inverse
(self.code_string, self.code) = self.make_py_code()
|
def get(path):
"\n Define decorator @get('/path')\n "
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'GET'
wrapper.__route__ = path
return wrapper
return decorator
| -1,081,527,196,878,977,900
|
Define decorator @get('/path')
|
qiushaoyi/programs/qsy_program_codes/python3-webapp/www/coroweb.py
|
get
|
qsyPython/Python_play_now
|
python
|
def get(path):
"\n \n "
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'GET'
wrapper.__route__ = path
return wrapper
return decorator
|
def post(path):
"\n Define decorator @post('/path')\n "
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'POST'
wrapper.__route__ = path
return wrapper
return decorator
| -1,957,891,444,018,988,300
|
Define decorator @post('/path')
|
qiushaoyi/programs/qsy_program_codes/python3-webapp/www/coroweb.py
|
post
|
qsyPython/Python_play_now
|
python
|
def post(path):
"\n \n "
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.__method__ = 'POST'
wrapper.__route__ = path
return wrapper
return decorator
|
def _clear_edit_handler_cache(self):
'\n These tests generate new EditHandlers with different settings. The\n cached edit handlers should be cleared before and after each test run\n to ensure that no changes leak through to other tests.\n '
from wagtail.tests.testapp.models import DefaultRichBlockFieldPage
block_page_edit_handler = DefaultRichBlockFieldPage.get_edit_handler()
if block_page_edit_handler._form_class:
rich_text_block = block_page_edit_handler._form_class.base_fields['body'].block.child_blocks['rich_text']
if hasattr(rich_text_block, 'field'):
del rich_text_block.field
for page_class in get_page_models():
page_class.get_edit_handler.cache_clear()
| 6,077,374,927,507,667,000
|
These tests generate new EditHandlers with different settings. The
cached edit handlers should be cleared before and after each test run
to ensure that no changes leak through to other tests.
|
wagtail/wagtailadmin/tests/test_rich_text.py
|
_clear_edit_handler_cache
|
Girbons/wagtail
|
python
|
def _clear_edit_handler_cache(self):
'\n These tests generate new EditHandlers with different settings. The\n cached edit handlers should be cleared before and after each test run\n to ensure that no changes leak through to other tests.\n '
from wagtail.tests.testapp.models import DefaultRichBlockFieldPage
block_page_edit_handler = DefaultRichBlockFieldPage.get_edit_handler()
if block_page_edit_handler._form_class:
rich_text_block = block_page_edit_handler._form_class.base_fields['body'].block.child_blocks['rich_text']
if hasattr(rich_text_block, 'field'):
del rich_text_block.field
for page_class in get_page_models():
page_class.get_edit_handler.cache_clear()
|
def _match_vcs_scheme(url):
"Look for VCS schemes in the URL.\n\n Returns the matched VCS scheme, or None if there's no match.\n "
for scheme in vcs.schemes:
if (url.lower().startswith(scheme) and (url[len(scheme)] in '+:')):
return scheme
return None
| -2,767,249,340,109,071,000
|
Look for VCS schemes in the URL.
Returns the matched VCS scheme, or None if there's no match.
|
src/pip/_internal/index/collector.py
|
_match_vcs_scheme
|
FFY00/pip
|
python
|
def _match_vcs_scheme(url):
"Look for VCS schemes in the URL.\n\n Returns the matched VCS scheme, or None if there's no match.\n "
for scheme in vcs.schemes:
if (url.lower().startswith(scheme) and (url[len(scheme)] in '+:')):
return scheme
return None
|
def _is_url_like_archive(url):
'Return whether the URL looks like an archive.\n '
filename = Link(url).filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
return True
return False
| -4,260,495,002,805,551,600
|
Return whether the URL looks like an archive.
|
src/pip/_internal/index/collector.py
|
_is_url_like_archive
|
FFY00/pip
|
python
|
def _is_url_like_archive(url):
'\n '
filename = Link(url).filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
return True
return False
|
def _ensure_html_header(response):
'Check the Content-Type header to ensure the response contains HTML.\n\n Raises `_NotHTML` if the content type is not text/html.\n '
content_type = response.headers.get('Content-Type', '')
if (not content_type.lower().startswith('text/html')):
raise _NotHTML(content_type, response.request.method)
| 7,393,163,883,968,440,000
|
Check the Content-Type header to ensure the response contains HTML.
Raises `_NotHTML` if the content type is not text/html.
|
src/pip/_internal/index/collector.py
|
_ensure_html_header
|
FFY00/pip
|
python
|
def _ensure_html_header(response):
'Check the Content-Type header to ensure the response contains HTML.\n\n Raises `_NotHTML` if the content type is not text/html.\n '
content_type = response.headers.get('Content-Type', )
if (not content_type.lower().startswith('text/html')):
raise _NotHTML(content_type, response.request.method)
|
def _ensure_html_response(url, session):
'Send a HEAD request to the URL, and ensure the response contains HTML.\n\n Raises `_NotHTTP` if the URL is not available for a HEAD request, or\n `_NotHTML` if the content type is not text/html.\n '
(scheme, netloc, path, query, fragment) = urllib_parse.urlsplit(url)
if (scheme not in {'http', 'https'}):
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
_ensure_html_header(resp)
| 9,111,036,939,874,658,000
|
Send a HEAD request to the URL, and ensure the response contains HTML.
Raises `_NotHTTP` if the URL is not available for a HEAD request, or
`_NotHTML` if the content type is not text/html.
|
src/pip/_internal/index/collector.py
|
_ensure_html_response
|
FFY00/pip
|
python
|
def _ensure_html_response(url, session):
'Send a HEAD request to the URL, and ensure the response contains HTML.\n\n Raises `_NotHTTP` if the URL is not available for a HEAD request, or\n `_NotHTML` if the content type is not text/html.\n '
(scheme, netloc, path, query, fragment) = urllib_parse.urlsplit(url)
if (scheme not in {'http', 'https'}):
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
_ensure_html_header(resp)
|
def _get_html_response(url, session):
'Access an HTML page with GET, and return the response.\n\n This consists of three parts:\n\n 1. If the URL looks suspiciously like an archive, send a HEAD first to\n check the Content-Type is HTML, to avoid downloading a large file.\n Raise `_NotHTTP` if the content type cannot be determined, or\n `_NotHTML` if it is not HTML.\n 2. Actually perform the request. Raise HTTP exceptions on network failures.\n 3. Check the Content-Type header to make sure we got HTML, and raise\n `_NotHTML` otherwise.\n '
if _is_url_like_archive(url):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_auth_from_url(url))
resp = session.get(url, headers={'Accept': 'text/html', 'Cache-Control': 'max-age=0'})
resp.raise_for_status()
_ensure_html_header(resp)
return resp
| -2,997,599,138,486,661,600
|
Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
|
src/pip/_internal/index/collector.py
|
_get_html_response
|
FFY00/pip
|
python
|
def _get_html_response(url, session):
'Access an HTML page with GET, and return the response.\n\n This consists of three parts:\n\n 1. If the URL looks suspiciously like an archive, send a HEAD first to\n check the Content-Type is HTML, to avoid downloading a large file.\n Raise `_NotHTTP` if the content type cannot be determined, or\n `_NotHTML` if it is not HTML.\n 2. Actually perform the request. Raise HTTP exceptions on network failures.\n 3. Check the Content-Type header to make sure we got HTML, and raise\n `_NotHTML` otherwise.\n '
if _is_url_like_archive(url):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_auth_from_url(url))
resp = session.get(url, headers={'Accept': 'text/html', 'Cache-Control': 'max-age=0'})
resp.raise_for_status()
_ensure_html_header(resp)
return resp
|
def _get_encoding_from_headers(headers):
'Determine if we have any encoding information in our headers.\n '
if (headers and ('Content-Type' in headers)):
(content_type, params) = cgi.parse_header(headers['Content-Type'])
if ('charset' in params):
return params['charset']
return None
| 4,644,743,172,171,733,000
|
Determine if we have any encoding information in our headers.
|
src/pip/_internal/index/collector.py
|
_get_encoding_from_headers
|
FFY00/pip
|
python
|
def _get_encoding_from_headers(headers):
'\n '
if (headers and ('Content-Type' in headers)):
(content_type, params) = cgi.parse_header(headers['Content-Type'])
if ('charset' in params):
return params['charset']
return None
|
def _determine_base_url(document, page_url):
"Determine the HTML document's base URL.\n\n This looks for a ``<base>`` tag in the HTML document. If present, its href\n attribute denotes the base URL of anchor tags in the document. If there is\n no such tag (or if it does not have a valid href attribute), the HTML\n file's URL is used as the base URL.\n\n :param document: An HTML document representation. The current\n implementation expects the result of ``html5lib.parse()``.\n :param page_url: The URL of the HTML document.\n "
for base in document.findall('.//base'):
href = base.get('href')
if (href is not None):
return href
return page_url
| -3,132,334,396,255,823,400
|
Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
|
src/pip/_internal/index/collector.py
|
_determine_base_url
|
FFY00/pip
|
python
|
def _determine_base_url(document, page_url):
"Determine the HTML document's base URL.\n\n This looks for a ``<base>`` tag in the HTML document. If present, its href\n attribute denotes the base URL of anchor tags in the document. If there is\n no such tag (or if it does not have a valid href attribute), the HTML\n file's URL is used as the base URL.\n\n :param document: An HTML document representation. The current\n implementation expects the result of ``html5lib.parse()``.\n :param page_url: The URL of the HTML document.\n "
for base in document.findall('.//base'):
href = base.get('href')
if (href is not None):
return href
return page_url
|
def _clean_url_path_part(part):
'\n Clean a "part" of a URL path (i.e. after splitting on "@" characters).\n '
return urllib_parse.quote(urllib_parse.unquote(part))
| -1,455,366,948,776,871,400
|
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
|
src/pip/_internal/index/collector.py
|
_clean_url_path_part
|
FFY00/pip
|
python
|
def _clean_url_path_part(part):
'\n \n '
return urllib_parse.quote(urllib_parse.unquote(part))
|
def _clean_file_url_path(part):
'\n Clean the first part of a URL path that corresponds to a local\n filesystem path (i.e. the first part after splitting on "@" characters).\n '
return urllib_request.pathname2url(urllib_request.url2pathname(part))
| 1,475,497,318,402,513,400
|
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
|
src/pip/_internal/index/collector.py
|
_clean_file_url_path
|
FFY00/pip
|
python
|
def _clean_file_url_path(part):
'\n Clean the first part of a URL path that corresponds to a local\n filesystem path (i.e. the first part after splitting on "@" characters).\n '
return urllib_request.pathname2url(urllib_request.url2pathname(part))
|
def _clean_url_path(path, is_local_path):
'\n Clean the path portion of a URL.\n '
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for (to_clean, reserved) in pairwise(itertools.chain(parts, [''])):
cleaned_parts.append(clean_func(to_clean))
cleaned_parts.append(reserved.upper())
return ''.join(cleaned_parts)
| 8,829,866,879,231,687,000
|
Clean the path portion of a URL.
|
src/pip/_internal/index/collector.py
|
_clean_url_path
|
FFY00/pip
|
python
|
def _clean_url_path(path, is_local_path):
'\n \n '
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for (to_clean, reserved) in pairwise(itertools.chain(parts, [])):
cleaned_parts.append(clean_func(to_clean))
cleaned_parts.append(reserved.upper())
return .join(cleaned_parts)
|
def _clean_link(url):
'\n Make sure a link is fully quoted.\n For example, if \' \' occurs in the URL, it will be replaced with "%20",\n and without double-quoting other characters.\n '
result = urllib_parse.urlparse(url)
is_local_path = (not result.netloc)
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib_parse.urlunparse(result._replace(path=path))
| -1,719,449,041,498,693,400
|
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
|
src/pip/_internal/index/collector.py
|
_clean_link
|
FFY00/pip
|
python
|
def _clean_link(url):
'\n Make sure a link is fully quoted.\n For example, if \' \' occurs in the URL, it will be replaced with "%20",\n and without double-quoting other characters.\n '
result = urllib_parse.urlparse(url)
is_local_path = (not result.netloc)
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib_parse.urlunparse(result._replace(path=path))
|
def _create_link_from_element(anchor, page_url, base_url):
'\n Convert an anchor element in a simple repository page to a Link.\n '
href = anchor.get('href')
if (not href):
return None
url = _clean_link(urllib_parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = (unescape(pyrequire) if pyrequire else None)
yanked_reason = anchor.get('data-yanked')
if yanked_reason:
yanked_reason = unescape(yanked_reason)
link = Link(url, comes_from=page_url, requires_python=pyrequire, yanked_reason=yanked_reason)
return link
| 3,313,022,715,014,799,000
|
Convert an anchor element in a simple repository page to a Link.
|
src/pip/_internal/index/collector.py
|
_create_link_from_element
|
FFY00/pip
|
python
|
def _create_link_from_element(anchor, page_url, base_url):
'\n \n '
href = anchor.get('href')
if (not href):
return None
url = _clean_link(urllib_parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = (unescape(pyrequire) if pyrequire else None)
yanked_reason = anchor.get('data-yanked')
if yanked_reason:
yanked_reason = unescape(yanked_reason)
link = Link(url, comes_from=page_url, requires_python=pyrequire, yanked_reason=yanked_reason)
return link
|
def with_cached_html_pages(fn):
"\n Given a function that parses an Iterable[Link] from an HTMLPage, cache the\n function's result (keyed by CacheablePageContent), unless the HTMLPage\n `page` has `page.cache_link_parsing == False`.\n "
@_lru_cache(maxsize=None)
def wrapper(cacheable_page):
return list(fn(cacheable_page.page))
@functools.wraps(fn)
def wrapper_wrapper(page):
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page))
return list(fn(page))
return wrapper_wrapper
| 7,444,462,802,212,503,000
|
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
|
src/pip/_internal/index/collector.py
|
with_cached_html_pages
|
FFY00/pip
|
python
|
def with_cached_html_pages(fn):
"\n Given a function that parses an Iterable[Link] from an HTMLPage, cache the\n function's result (keyed by CacheablePageContent), unless the HTMLPage\n `page` has `page.cache_link_parsing == False`.\n "
@_lru_cache(maxsize=None)
def wrapper(cacheable_page):
return list(fn(cacheable_page.page))
@functools.wraps(fn)
def wrapper_wrapper(page):
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page))
return list(fn(page))
return wrapper_wrapper
|
@with_cached_html_pages
def parse_links(page):
'\n Parse an HTML document, and yield its anchor elements as Link objects.\n '
document = html5lib.parse(page.content, transport_encoding=page.encoding, namespaceHTMLElements=False)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall('.//a'):
link = _create_link_from_element(anchor, page_url=url, base_url=base_url)
if (link is None):
continue
(yield link)
| -2,005,043,465,176,568,300
|
Parse an HTML document, and yield its anchor elements as Link objects.
|
src/pip/_internal/index/collector.py
|
parse_links
|
FFY00/pip
|
python
|
@with_cached_html_pages
def parse_links(page):
'\n \n '
document = html5lib.parse(page.content, transport_encoding=page.encoding, namespaceHTMLElements=False)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall('.//a'):
link = _create_link_from_element(anchor, page_url=url, base_url=base_url)
if (link is None):
continue
(yield link)
|
def _remove_duplicate_links(links):
'\n Return a list of links, with duplicates removed and ordering preserved.\n '
return list(OrderedDict.fromkeys(links))
| -1,224,858,665,223,807,500
|
Return a list of links, with duplicates removed and ordering preserved.
|
src/pip/_internal/index/collector.py
|
_remove_duplicate_links
|
FFY00/pip
|
python
|
def _remove_duplicate_links(links):
'\n \n '
return list(OrderedDict.fromkeys(links))
|
def group_locations(locations, expand_dir=False):
'\n Divide a list of locations into two groups: "files" (archives) and "urls."\n\n :return: A pair of lists (files, urls).\n '
files = []
urls = []
def sort_path(path):
url = path_to_url(path)
if (mimetypes.guess_type(url, strict=False)[0] == 'text/html'):
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if (is_local_path or is_file_url):
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
else:
logger.warning("Path '{0}' is ignored: it is a directory.".format(path))
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning("Url '%s' is ignored: it is neither a file nor a directory.", url)
elif is_url(url):
urls.append(url)
else:
logger.warning("Url '%s' is ignored. It is either a non-existing path or lacks a specific scheme.", url)
return (files, urls)
| 4,359,161,462,114,489,300
|
Divide a list of locations into two groups: "files" (archives) and "urls."
:return: A pair of lists (files, urls).
|
src/pip/_internal/index/collector.py
|
group_locations
|
FFY00/pip
|
python
|
def group_locations(locations, expand_dir=False):
'\n Divide a list of locations into two groups: "files" (archives) and "urls."\n\n :return: A pair of lists (files, urls).\n '
files = []
urls = []
def sort_path(path):
url = path_to_url(path)
if (mimetypes.guess_type(url, strict=False)[0] == 'text/html'):
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if (is_local_path or is_file_url):
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
else:
logger.warning("Path '{0}' is ignored: it is a directory.".format(path))
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning("Url '%s' is ignored: it is neither a file nor a directory.", url)
elif is_url(url):
urls.append(url)
else:
logger.warning("Url '%s' is ignored. It is either a non-existing path or lacks a specific scheme.", url)
return (files, urls)
|
def __init__(self, content, encoding, url, cache_link_parsing=True):
"\n :param encoding: the encoding to decode the given content.\n :param url: the URL from which the HTML was downloaded.\n :param cache_link_parsing: whether links parsed from this page's url\n should be cached. PyPI index urls should\n have this set to False, for example.\n "
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
| 2,947,765,301,962,677,000
|
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
|
src/pip/_internal/index/collector.py
|
__init__
|
FFY00/pip
|
python
|
def __init__(self, content, encoding, url, cache_link_parsing=True):
"\n :param encoding: the encoding to decode the given content.\n :param url: the URL from which the HTML was downloaded.\n :param cache_link_parsing: whether links parsed from this page's url\n should be cached. PyPI index urls should\n have this set to False, for example.\n "
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
|
def __init__(self, files, find_links, project_urls):
'\n :param files: Links from file locations.\n :param find_links: Links from find_links.\n :param project_urls: URLs to HTML project pages, as described by\n the PEP 503 simple repository API.\n '
self.files = files
self.find_links = find_links
self.project_urls = project_urls
| -7,833,018,662,680,880,000
|
:param files: Links from file locations.
:param find_links: Links from find_links.
:param project_urls: URLs to HTML project pages, as described by
the PEP 503 simple repository API.
|
src/pip/_internal/index/collector.py
|
__init__
|
FFY00/pip
|
python
|
def __init__(self, files, find_links, project_urls):
'\n :param files: Links from file locations.\n :param find_links: Links from find_links.\n :param project_urls: URLs to HTML project pages, as described by\n the PEP 503 simple repository API.\n '
self.files = files
self.find_links = find_links
self.project_urls = project_urls
|
def fetch_page(self, location):
'\n Fetch an HTML page containing package links.\n '
return _get_html_page(location, session=self.session)
| 4,139,731,530,769,525,000
|
Fetch an HTML page containing package links.
|
src/pip/_internal/index/collector.py
|
fetch_page
|
FFY00/pip
|
python
|
def fetch_page(self, location):
'\n \n '
return _get_html_page(location, session=self.session)
|
def collect_links(self, project_name):
'Find all available links for the given project name.\n\n :return: All the Link objects (unfiltered), as a CollectedLinks object.\n '
search_scope = self.search_scope
index_locations = search_scope.get_index_urls_locations(project_name)
(index_file_loc, index_url_loc) = group_locations(index_locations)
(fl_file_loc, fl_url_loc) = group_locations(self.find_links, expand_dir=True)
file_links = [Link(url) for url in itertools.chain(index_file_loc, fl_file_loc)]
find_link_links = [Link(url, '-f') for url in self.find_links]
url_locations = [link for link in itertools.chain((Link(url, cache_link_parsing=False) for url in index_url_loc), (Link(url) for url in fl_url_loc)) if self.session.is_secure_origin(link)]
url_locations = _remove_duplicate_links(url_locations)
lines = ['{} location(s) to search for versions of {}:'.format(len(url_locations), project_name)]
for link in url_locations:
lines.append('* {}'.format(link))
logger.debug('\n'.join(lines))
return CollectedLinks(files=file_links, find_links=find_link_links, project_urls=url_locations)
| 8,680,116,040,653,082,000
|
Find all available links for the given project name.
:return: All the Link objects (unfiltered), as a CollectedLinks object.
|
src/pip/_internal/index/collector.py
|
collect_links
|
FFY00/pip
|
python
|
def collect_links(self, project_name):
'Find all available links for the given project name.\n\n :return: All the Link objects (unfiltered), as a CollectedLinks object.\n '
search_scope = self.search_scope
index_locations = search_scope.get_index_urls_locations(project_name)
(index_file_loc, index_url_loc) = group_locations(index_locations)
(fl_file_loc, fl_url_loc) = group_locations(self.find_links, expand_dir=True)
file_links = [Link(url) for url in itertools.chain(index_file_loc, fl_file_loc)]
find_link_links = [Link(url, '-f') for url in self.find_links]
url_locations = [link for link in itertools.chain((Link(url, cache_link_parsing=False) for url in index_url_loc), (Link(url) for url in fl_url_loc)) if self.session.is_secure_origin(link)]
url_locations = _remove_duplicate_links(url_locations)
lines = ['{} location(s) to search for versions of {}:'.format(len(url_locations), project_name)]
for link in url_locations:
lines.append('* {}'.format(link))
logger.debug('\n'.join(lines))
return CollectedLinks(files=file_links, find_links=find_link_links, project_urls=url_locations)
|
def fetch(self, is_dl_forced=False):
'\n :return: None\n\n '
self.get_files(is_dl_forced)
return
| 8,409,556,957,349,451,000
|
:return: None
|
dipper/sources/Panther.py
|
fetch
|
putmantime/dipper
|
python
|
def fetch(self, is_dl_forced=False):
'\n \n\n '
self.get_files(is_dl_forced)
return
|
def parse(self, limit=None):
'\n :return: None\n '
if self.testOnly:
self.testMode = True
if (self.tax_ids is None):
logger.info('No taxon filter set; Dumping all orthologous associations.')
else:
logger.info('Only the following taxa will be dumped: %s', str(self.tax_ids))
self._get_orthologs(limit)
return
| -2,486,379,677,687,401,000
|
:return: None
|
dipper/sources/Panther.py
|
parse
|
putmantime/dipper
|
python
|
def parse(self, limit=None):
'\n \n '
if self.testOnly:
self.testMode = True
if (self.tax_ids is None):
logger.info('No taxon filter set; Dumping all orthologous associations.')
else:
logger.info('Only the following taxa will be dumped: %s', str(self.tax_ids))
self._get_orthologs(limit)
return
|
def _get_orthologs(self, limit):
"\n This will process each of the specified pairwise orthology files,\n creating orthology associations based on the specified orthology code.\n this currently assumes that each of the orthology files is identically\n formatted. Relationships are made between genes here.\n\n There is also a nominal amount of identifier re-formatting:\n MGI:MGI --> MGI\n Ensembl --> ENSEMBL\n\n we skip any genes where we don't know how to map the gene identifiers.\n For example, Gene:Huwe1 for RAT is not an identifier, so we skip any\n mappings to this identifier. Often, the there are two entries for the\n same gene (base on equivalent Uniprot id), and so we are not actually\n losing any information.\n\n We presently have a hard-coded filter to select only orthology\n relationships where one of the pair is in our species of interest\n (Mouse and Human, for the moment).\n This will be added as a configurable parameter in the future.\n\n Genes are also added to a grouping class defined with a PANTHER id.\n\n Triples:\n <gene1_id> RO:othologous <gene2_id>\n <assoc_id> :hasSubject <gene1_id>\n <assoc_id> :hasObject <gene2_id>\n <assoc_id> :hasPredicate <RO:orthologous>\n <assoc_id> dc:evidence ECO:phylogenetic_evidence\n\n <panther_id> a DATA:gene_family\n <panther_id> RO:has_member <gene1_id>\n <panther_id> RO:has_member <gene2_id>\n\n :param limit:\n :return:\n\n "
logger.info('getting orthologs')
if self.testMode:
g = self.testgraph
else:
g = self.graph
model = Model(g)
unprocessed_gene_ids = set()
for k in self.files.keys():
f = '/'.join((self.rawdir, self.files[k]['file']))
matchcounter = 0
mytar = tarfile.open(f, 'r:gz')
fname = mytar.getmembers()[0]
logger.info('Parsing %s', fname.name)
line_counter = 0
with mytar.extractfile(fname) as csvfile:
for line in csvfile:
if re.match('^#', line.decode()):
logger.info('Skipping header line')
continue
line_counter += 1
if ((line_counter % 1000000) == 0):
logger.info('Processed %d lines from %s', line_counter, fname.name)
line = line.decode().strip()
(a, b, orthology_class, ancestor_taxon, panther_id) = line.split('\t')
(species_a, gene_a, protein_a) = a.split('|')
(species_b, gene_b, protein_b) = b.split('|')
if (self.testMode and (not ((re.sub('UniProtKB=', '', protein_a) in self.test_ids) or (re.sub('UniProtKB=', '', protein_b) in self.test_ids)))):
continue
taxon_a = self._map_taxon_abbr_to_id(species_a)
taxon_b = self._map_taxon_abbr_to_id(species_b)
if ((self.tax_ids is not None) and (int(re.sub('NCBITaxon:', '', taxon_a.rstrip())) not in self.tax_ids) and (int(re.sub('NCBITaxon:', '', taxon_b.rstrip())) not in self.tax_ids)):
continue
else:
matchcounter += 1
if ((limit is not None) and (matchcounter > limit)):
break
gene_a = re.sub('=', ':', gene_a)
gene_b = re.sub('=', ':', gene_b)
clean_gene = self._clean_up_gene_id(gene_a, species_a)
if (clean_gene is None):
unprocessed_gene_ids.add(gene_a)
gene_a = clean_gene
clean_gene = self._clean_up_gene_id(gene_b, species_b)
if (clean_gene is None):
unprocessed_gene_ids.add(gene_b)
gene_b = clean_gene
if ((gene_a is None) or (gene_b is None)):
continue
rel = self._map_orthology_code_to_RO(orthology_class)
evidence_id = 'ECO:0000080'
assoc = OrthologyAssoc(g, self.name, gene_a, gene_b, rel)
assoc.add_evidence(evidence_id)
model.addClassToGraph(gene_a, None)
model.addClassToGraph(gene_b, None)
g.addTriple(gene_a, model.object_properties['in_taxon'], taxon_a)
g.addTriple(gene_b, model.object_properties['in_taxon'], taxon_b)
assoc.add_association_to_graph()
assoc.add_gene_family_to_graph(':'.join(('PANTHER', panther_id)))
if ((not self.testMode) and (limit is not None) and (line_counter > limit)):
break
logger.info('finished processing %s', f)
logger.warning('The following gene ids were unable to be processed: %s', str(unprocessed_gene_ids))
return
| 7,377,265,323,652,030,000
|
This will process each of the specified pairwise orthology files,
creating orthology associations based on the specified orthology code.
this currently assumes that each of the orthology files is identically
formatted. Relationships are made between genes here.
There is also a nominal amount of identifier re-formatting:
MGI:MGI --> MGI
Ensembl --> ENSEMBL
we skip any genes where we don't know how to map the gene identifiers.
For example, Gene:Huwe1 for RAT is not an identifier, so we skip any
mappings to this identifier. Often, the there are two entries for the
same gene (base on equivalent Uniprot id), and so we are not actually
losing any information.
We presently have a hard-coded filter to select only orthology
relationships where one of the pair is in our species of interest
(Mouse and Human, for the moment).
This will be added as a configurable parameter in the future.
Genes are also added to a grouping class defined with a PANTHER id.
Triples:
<gene1_id> RO:othologous <gene2_id>
<assoc_id> :hasSubject <gene1_id>
<assoc_id> :hasObject <gene2_id>
<assoc_id> :hasPredicate <RO:orthologous>
<assoc_id> dc:evidence ECO:phylogenetic_evidence
<panther_id> a DATA:gene_family
<panther_id> RO:has_member <gene1_id>
<panther_id> RO:has_member <gene2_id>
:param limit:
:return:
|
dipper/sources/Panther.py
|
_get_orthologs
|
putmantime/dipper
|
python
|
def _get_orthologs(self, limit):
"\n This will process each of the specified pairwise orthology files,\n creating orthology associations based on the specified orthology code.\n this currently assumes that each of the orthology files is identically\n formatted. Relationships are made between genes here.\n\n There is also a nominal amount of identifier re-formatting:\n MGI:MGI --> MGI\n Ensembl --> ENSEMBL\n\n we skip any genes where we don't know how to map the gene identifiers.\n For example, Gene:Huwe1 for RAT is not an identifier, so we skip any\n mappings to this identifier. Often, the there are two entries for the\n same gene (base on equivalent Uniprot id), and so we are not actually\n losing any information.\n\n We presently have a hard-coded filter to select only orthology\n relationships where one of the pair is in our species of interest\n (Mouse and Human, for the moment).\n This will be added as a configurable parameter in the future.\n\n Genes are also added to a grouping class defined with a PANTHER id.\n\n Triples:\n <gene1_id> RO:othologous <gene2_id>\n <assoc_id> :hasSubject <gene1_id>\n <assoc_id> :hasObject <gene2_id>\n <assoc_id> :hasPredicate <RO:orthologous>\n <assoc_id> dc:evidence ECO:phylogenetic_evidence\n\n <panther_id> a DATA:gene_family\n <panther_id> RO:has_member <gene1_id>\n <panther_id> RO:has_member <gene2_id>\n\n :param limit:\n :return:\n\n "
logger.info('getting orthologs')
if self.testMode:
g = self.testgraph
else:
g = self.graph
model = Model(g)
unprocessed_gene_ids = set()
for k in self.files.keys():
f = '/'.join((self.rawdir, self.files[k]['file']))
matchcounter = 0
mytar = tarfile.open(f, 'r:gz')
fname = mytar.getmembers()[0]
logger.info('Parsing %s', fname.name)
line_counter = 0
with mytar.extractfile(fname) as csvfile:
for line in csvfile:
if re.match('^#', line.decode()):
logger.info('Skipping header line')
continue
line_counter += 1
if ((line_counter % 1000000) == 0):
logger.info('Processed %d lines from %s', line_counter, fname.name)
line = line.decode().strip()
(a, b, orthology_class, ancestor_taxon, panther_id) = line.split('\t')
(species_a, gene_a, protein_a) = a.split('|')
(species_b, gene_b, protein_b) = b.split('|')
if (self.testMode and (not ((re.sub('UniProtKB=', , protein_a) in self.test_ids) or (re.sub('UniProtKB=', , protein_b) in self.test_ids)))):
continue
taxon_a = self._map_taxon_abbr_to_id(species_a)
taxon_b = self._map_taxon_abbr_to_id(species_b)
if ((self.tax_ids is not None) and (int(re.sub('NCBITaxon:', , taxon_a.rstrip())) not in self.tax_ids) and (int(re.sub('NCBITaxon:', , taxon_b.rstrip())) not in self.tax_ids)):
continue
else:
matchcounter += 1
if ((limit is not None) and (matchcounter > limit)):
break
gene_a = re.sub('=', ':', gene_a)
gene_b = re.sub('=', ':', gene_b)
clean_gene = self._clean_up_gene_id(gene_a, species_a)
if (clean_gene is None):
unprocessed_gene_ids.add(gene_a)
gene_a = clean_gene
clean_gene = self._clean_up_gene_id(gene_b, species_b)
if (clean_gene is None):
unprocessed_gene_ids.add(gene_b)
gene_b = clean_gene
if ((gene_a is None) or (gene_b is None)):
continue
rel = self._map_orthology_code_to_RO(orthology_class)
evidence_id = 'ECO:0000080'
assoc = OrthologyAssoc(g, self.name, gene_a, gene_b, rel)
assoc.add_evidence(evidence_id)
model.addClassToGraph(gene_a, None)
model.addClassToGraph(gene_b, None)
g.addTriple(gene_a, model.object_properties['in_taxon'], taxon_a)
g.addTriple(gene_b, model.object_properties['in_taxon'], taxon_b)
assoc.add_association_to_graph()
assoc.add_gene_family_to_graph(':'.join(('PANTHER', panther_id)))
if ((not self.testMode) and (limit is not None) and (line_counter > limit)):
break
logger.info('finished processing %s', f)
logger.warning('The following gene ids were unable to be processed: %s', str(unprocessed_gene_ids))
return
|
@staticmethod
def _map_taxon_abbr_to_id(ptax):
'\n Will map the panther-specific taxon abbreviations to NCBI taxon numbers\n :param ptax:\n :return: NCBITaxon id\n '
taxid = None
ptax_to_taxid_map = {'ANOCA': 28377, 'ARATH': 3702, 'BOVIN': 9913, 'CAEEL': 6239, 'CANFA': 9615, 'CHICK': 9031, 'DANRE': 7955, 'DICDI': 44689, 'DROME': 7227, 'ECOLI': 562, 'HORSE': 9796, 'HUMAN': 9606, 'MACMU': 9544, 'MONDO': 13616, 'MOUSE': 10090, 'ORNAN': 9258, 'PANTR': 9598, 'PIG': 9823, 'RAT': 10116, 'SCHPO': 4896, 'TAKRU': 31033, 'XENTR': 8364, 'YEAST': 4932}
if (ptax in ptax_to_taxid_map):
taxid = ':'.join(('NCBITaxon', str(ptax_to_taxid_map.get(ptax))))
else:
logger.error('unmapped taxon code %s', ptax)
return taxid
| 6,544,314,553,461,517,000
|
Will map the panther-specific taxon abbreviations to NCBI taxon numbers
:param ptax:
:return: NCBITaxon id
|
dipper/sources/Panther.py
|
_map_taxon_abbr_to_id
|
putmantime/dipper
|
python
|
@staticmethod
def _map_taxon_abbr_to_id(ptax):
'\n Will map the panther-specific taxon abbreviations to NCBI taxon numbers\n :param ptax:\n :return: NCBITaxon id\n '
taxid = None
ptax_to_taxid_map = {'ANOCA': 28377, 'ARATH': 3702, 'BOVIN': 9913, 'CAEEL': 6239, 'CANFA': 9615, 'CHICK': 9031, 'DANRE': 7955, 'DICDI': 44689, 'DROME': 7227, 'ECOLI': 562, 'HORSE': 9796, 'HUMAN': 9606, 'MACMU': 9544, 'MONDO': 13616, 'MOUSE': 10090, 'ORNAN': 9258, 'PANTR': 9598, 'PIG': 9823, 'RAT': 10116, 'SCHPO': 4896, 'TAKRU': 31033, 'XENTR': 8364, 'YEAST': 4932}
if (ptax in ptax_to_taxid_map):
taxid = ':'.join(('NCBITaxon', str(ptax_to_taxid_map.get(ptax))))
else:
logger.error('unmapped taxon code %s', ptax)
return taxid
|
@staticmethod
def _map_orthology_code_to_RO(ortho):
'\n Map the panther-specific orthology code (P,O,LDO,X,LDX)\n to relationship-ontology\n identifiers.\n :param ortho: orthology code\n :return: RO identifier\n '
ortho_rel = OrthologyAssoc.ortho_rel
ro_id = ortho_rel['orthologous']
ortho_to_ro_map = {'P': ortho_rel['paralogous'], 'O': ortho_rel['orthologous'], 'LDO': ortho_rel['least_diverged_orthologous'], 'X': ortho_rel['xenologous'], 'LDX': ortho_rel['xenologous']}
if (ortho in ortho_to_ro_map):
ro_id = ortho_to_ro_map.get(ortho)
else:
logger.warning("unmapped orthology code %s. Defaulting to 'orthology'", ortho)
return ro_id
| 612,825,542,876,158,200
|
Map the panther-specific orthology code (P,O,LDO,X,LDX)
to relationship-ontology
identifiers.
:param ortho: orthology code
:return: RO identifier
|
dipper/sources/Panther.py
|
_map_orthology_code_to_RO
|
putmantime/dipper
|
python
|
@staticmethod
def _map_orthology_code_to_RO(ortho):
'\n Map the panther-specific orthology code (P,O,LDO,X,LDX)\n to relationship-ontology\n identifiers.\n :param ortho: orthology code\n :return: RO identifier\n '
ortho_rel = OrthologyAssoc.ortho_rel
ro_id = ortho_rel['orthologous']
ortho_to_ro_map = {'P': ortho_rel['paralogous'], 'O': ortho_rel['orthologous'], 'LDO': ortho_rel['least_diverged_orthologous'], 'X': ortho_rel['xenologous'], 'LDX': ortho_rel['xenologous']}
if (ortho in ortho_to_ro_map):
ro_id = ortho_to_ro_map.get(ortho)
else:
logger.warning("unmapped orthology code %s. Defaulting to 'orthology'", ortho)
return ro_id
|
@staticmethod
def _clean_up_gene_id(geneid, sp):
'\n A series of identifier rewriting to conform with\n standard gene identifiers.\n :param geneid:\n :param sp:\n :return:\n '
geneid = re.sub('MGI:MGI:', 'MGI:', geneid)
geneid = re.sub('Ensembl', 'ENSEMBL', geneid)
geneid = re.sub('Gene:CELE', 'WormBase:', geneid)
if (sp == 'CAEEL'):
if re.match('(Gene|ENSEMBLGenome):\\w+\\.\\d+', geneid):
geneid = re.sub('(?:Gene|ENSEMBLGenome):(\\w+\\.\\d+)', 'WormBase:\\1', geneid)
if (sp == 'DROME'):
if re.match('(ENSEMBLGenome):\\w+\\.\\d+', geneid):
geneid = re.sub('(?:ENSEMBLGenome):(\\w+\\.\\d+)', 'FlyBase:\\1', geneid)
geneid = re.sub('GeneID', 'NCBIGene', geneid)
geneid = re.sub('Gene:Dmel_', 'FlyBase:', geneid)
geneid = re.sub('Gene:CG', 'FlyBase:CG', geneid)
geneid = re.sub('ENSEMBLGenome:FBgn', 'FlyBase:FBgn', geneid)
geneid = re.sub('Gene:ENS', 'ENSEMBL:ENS', geneid)
geneid = re.sub('Gene:Xenbase:', 'Xenbase:', geneid)
pfxlcl = re.split(':', geneid)
pfx = pfxlcl[0]
if ((pfx is None) or (pfx not in curie_map.get())):
logger.warning('No curie prefix for (species %s): %s', sp, geneid)
geneid = None
return geneid
| 3,802,867,241,168,685,000
|
A series of identifier rewriting to conform with
standard gene identifiers.
:param geneid:
:param sp:
:return:
|
dipper/sources/Panther.py
|
_clean_up_gene_id
|
putmantime/dipper
|
python
|
@staticmethod
def _clean_up_gene_id(geneid, sp):
'\n A series of identifier rewriting to conform with\n standard gene identifiers.\n :param geneid:\n :param sp:\n :return:\n '
geneid = re.sub('MGI:MGI:', 'MGI:', geneid)
geneid = re.sub('Ensembl', 'ENSEMBL', geneid)
geneid = re.sub('Gene:CELE', 'WormBase:', geneid)
if (sp == 'CAEEL'):
if re.match('(Gene|ENSEMBLGenome):\\w+\\.\\d+', geneid):
geneid = re.sub('(?:Gene|ENSEMBLGenome):(\\w+\\.\\d+)', 'WormBase:\\1', geneid)
if (sp == 'DROME'):
if re.match('(ENSEMBLGenome):\\w+\\.\\d+', geneid):
geneid = re.sub('(?:ENSEMBLGenome):(\\w+\\.\\d+)', 'FlyBase:\\1', geneid)
geneid = re.sub('GeneID', 'NCBIGene', geneid)
geneid = re.sub('Gene:Dmel_', 'FlyBase:', geneid)
geneid = re.sub('Gene:CG', 'FlyBase:CG', geneid)
geneid = re.sub('ENSEMBLGenome:FBgn', 'FlyBase:FBgn', geneid)
geneid = re.sub('Gene:ENS', 'ENSEMBL:ENS', geneid)
geneid = re.sub('Gene:Xenbase:', 'Xenbase:', geneid)
pfxlcl = re.split(':', geneid)
pfx = pfxlcl[0]
if ((pfx is None) or (pfx not in curie_map.get())):
logger.warning('No curie prefix for (species %s): %s', sp, geneid)
geneid = None
return geneid
|
@deprecated_args(family=None)
def translate(page=None, hints=None, auto=True, removebrackets=False, site=None):
'\n Return a list of links to pages on other sites based on hints.\n\n Entries for single page titles list those pages. Page titles for entries\n such as "all:" or "xyz:" or "20:" are first built from the page title of\n \'page\' and then listed. When \'removebrackets\' is True, a trailing pair of\n brackets and the text between them is removed from the page title.\n If \'auto\' is true, known year and date page titles are autotranslated\n to all known target languages and inserted into the list.\n\n '
result = set()
assert (page or site)
if ((site is None) and page):
site = page.site
if hints:
for h in hints:
if (':' not in h):
codes = h
newname = ''
else:
(codes, newname) = h.split(':', 1)
if (newname == ''):
if (page is None):
continue
newname = page.title(withNamespace=False)
if removebrackets:
newname = re.sub(re.compile('\\W*?\\(.*?\\)\\W*?', re.UNICODE), u' ', newname)
try:
number = int(codes)
codes = site.family.languages_by_size[:number]
except ValueError:
if (codes == 'all'):
codes = site.family.languages_by_size
elif (codes in site.family.language_groups):
codes = site.family.language_groups[codes]
else:
codes = codes.split(',')
for newcode in codes:
if (newcode in site.languages()):
if (newcode != site.code):
ns = (page.namespace() if page else 0)
x = pywikibot.Link(newname, site.getSite(code=newcode), defaultNamespace=ns)
result.add(x)
elif config.verbose_output:
pywikibot.output((u'Ignoring unknown language code %s' % newcode))
if (auto and page):
sitelang = page.site.code
(dictName, value) = date.getAutoFormat(sitelang, page.title())
if dictName:
if True:
pywikibot.output((u'TitleTranslate: %s was recognized as %s with value %d' % (page.title(), dictName, value)))
for (entryLang, entry) in date.formats[dictName].items():
if (entryLang not in site.languages()):
continue
if (entryLang != sitelang):
if True:
newname = entry(value)
x = pywikibot.Link(newname, pywikibot.Site(code=entryLang, fam=site.family))
result.add(x)
return list(result)
| -3,098,732,767,313,872,000
|
Return a list of links to pages on other sites based on hints.
Entries for single page titles list those pages. Page titles for entries
such as "all:" or "xyz:" or "20:" are first built from the page title of
'page' and then listed. When 'removebrackets' is True, a trailing pair of
brackets and the text between them is removed from the page title.
If 'auto' is true, known year and date page titles are autotranslated
to all known target languages and inserted into the list.
|
pywikibot/titletranslate.py
|
translate
|
h4ck3rm1k3/pywikibot-core
|
python
|
@deprecated_args(family=None)
def translate(page=None, hints=None, auto=True, removebrackets=False, site=None):
'\n Return a list of links to pages on other sites based on hints.\n\n Entries for single page titles list those pages. Page titles for entries\n such as "all:" or "xyz:" or "20:" are first built from the page title of\n \'page\' and then listed. When \'removebrackets\' is True, a trailing pair of\n brackets and the text between them is removed from the page title.\n If \'auto\' is true, known year and date page titles are autotranslated\n to all known target languages and inserted into the list.\n\n '
result = set()
assert (page or site)
if ((site is None) and page):
site = page.site
if hints:
for h in hints:
if (':' not in h):
codes = h
newname =
else:
(codes, newname) = h.split(':', 1)
if (newname == ):
if (page is None):
continue
newname = page.title(withNamespace=False)
if removebrackets:
newname = re.sub(re.compile('\\W*?\\(.*?\\)\\W*?', re.UNICODE), u' ', newname)
try:
number = int(codes)
codes = site.family.languages_by_size[:number]
except ValueError:
if (codes == 'all'):
codes = site.family.languages_by_size
elif (codes in site.family.language_groups):
codes = site.family.language_groups[codes]
else:
codes = codes.split(',')
for newcode in codes:
if (newcode in site.languages()):
if (newcode != site.code):
ns = (page.namespace() if page else 0)
x = pywikibot.Link(newname, site.getSite(code=newcode), defaultNamespace=ns)
result.add(x)
elif config.verbose_output:
pywikibot.output((u'Ignoring unknown language code %s' % newcode))
if (auto and page):
sitelang = page.site.code
(dictName, value) = date.getAutoFormat(sitelang, page.title())
if dictName:
if True:
pywikibot.output((u'TitleTranslate: %s was recognized as %s with value %d' % (page.title(), dictName, value)))
for (entryLang, entry) in date.formats[dictName].items():
if (entryLang not in site.languages()):
continue
if (entryLang != sitelang):
if True:
newname = entry(value)
x = pywikibot.Link(newname, pywikibot.Site(code=entryLang, fam=site.family))
result.add(x)
return list(result)
|
def get_command_line(only_print_help=False):
'\n Parse command line arguments when GoogleScraper is used as a CLI application.\n\n Returns:\n The configuration as a dictionary that determines the behaviour of the app.\n '
parser = argparse.ArgumentParser(prog='GoogleScraper', description='Scrapes the Google, Yandex, Bing and many other search engines by forging http requests that imitate browser searches or by using real browsers controlled by the selenium framework. Multithreading support.', epilog='GoogleScraper {version}. This program might infringe the TOS of the search engines. Please use it on your own risk. (c) by Nikolai Tschacher, 2012-2018. incolumitas.com'.format(version=__version__))
parser.add_argument('-m', '--scrape-method', type=str, default='http', help='The scraping type. There are currently three types: "http", "selenium" and "http-async". "Http" scrapes with raw http requests, whereas "selenium" uses the selenium framework to remotely control browsers. "http-async" makes use of gevent and is well suited for extremely fast and explosive scraping jobs. You may search more than 1000 requests per second if you have the necessary number of proxies available. ', choices=('http', 'selenium', 'http-async'))
parser.add_argument('--sel-browser', choices=['firefox', 'chrome'], default='chrome', help='The browser frontend for selenium scraping mode. Takes only effect if --scrape-method is set to "selenium"')
parser.add_argument('--browser-mode', choices=['normal', 'headless'], default='normal', help='In which mode the browser is started. Valid values = (normal, headless)')
keyword_group = parser.add_mutually_exclusive_group()
keyword_group.add_argument('-q', '--keyword', type=str, action='store', dest='keyword', help='The search keyword to scrape for. If you need to scrape multiple keywords, use the --keyword-file flag')
keyword_group.add_argument('--keyword-file', type=str, action='store', default='', help='Keywords to search for. One keyword per line. Empty lines are ignored. Alternatively, you may specify the path to an python module (must end with the .py suffix) where the keywords must be held in a dictionary with the name "scrape_jobs".')
parser.add_argument('-o-', '--output-filename', type=str, action='store', default='', help='The name of the output file. If the file ending is "json", write a json file, if the ending is "csv", write a csv file.')
parser.add_argument('--shell', action='store_true', default=False, help='Fire up a shell with a loaded sqlalchemy session.')
parser.add_argument('-n', '--num-results-per-page', type=int, action='store', default=10, help='The number of results per page. Must be smaller than 100, by default 50 for raw mode and 10 for selenium mode. Some search engines ignore this setting.')
parser.add_argument('-p', '--num-pages-for-keyword', type=int, action='store', default=1, help='The number of pages to request for each keyword. Each page is requested by a unique connection and if possible by a unique IP (at least in "http" mode).')
parser.add_argument('-z', '--num-workers', type=int, default=1, action='store', help='This arguments sets the number of browser instances for selenium mode or the number of worker threads in http mode.')
parser.add_argument('-t', '--search-type', type=str, action='store', default='normal', help='The searchtype to launch. May be normal web search, image search, news search or video search.')
parser.add_argument('--proxy-file', type=str, dest='proxy_file', action='store', required=False, help='A filename for a list of proxies (supported are HTTP PROXIES, SOCKS4/5) with the following format: "Proxyprotocol (proxy_ip|proxy_host):Port\n"Example file: socks4 127.0.0.1:99\nsocks5 0.0.0.0:1080\n')
parser.add_argument('--config-file', type=str, dest='config_file', action='store', help='The path to the configuration file for GoogleScraper. Normally you won\'t need this, because GoogleScrape comes shipped with a thoroughly commented configuration file named "scrape_config.py"')
parser.add_argument('--check-detection', type=str, dest='check_detection', action='store', help='Check if the given search engine blocked you from scrapign. Often detection can be determinedif you have to solve a captcha.')
parser.add_argument('--simulate', action='store_true', default=False, required=False, help='If this flag is set, the scrape job and its estimated length will be printed.')
loglevel_help = '\nSet the debug level of the application. Use the string representation\ninstead of the numbers. High numbers will output less, low numbers more.\nCRITICAL = 50,\nFATAL = CRITICAL,\nERROR = 40,\nWARNING = 30,\nWARN = WARNING,\nINFO = 20,\nDEBUG = 10,\nNOTSET = 0\n '
parser.add_argument('-v', '--verbosity', '--loglevel', dest='log_level', default='INFO', type=str.lower, choices=['debug', 'info', 'warning', 'warn', 'error', 'critical', 'fatal'], help=loglevel_help)
parser.add_argument('--print-results', choices=['all', 'summarize'], default='all', help='Whether to print all results ("all"), or only print a summary ("summarize")')
parser.add_argument('--view-config', action='store_true', default=False, help='Print the current configuration to stdout. You may use it to create and tweak your own config file from it.')
parser.add_argument('-V', '--v', '--version', action='store_true', default=False, dest='version', help='Prints the version of GoogleScraper')
parser.add_argument('--clean', action='store_true', default=False, help='Cleans all stored data. Please be very careful when you use this flag.')
parser.add_argument('--mysql-proxy-db', action='store', help='A mysql connection string for proxies to use. Format: mysql://<username>:<password>@<host>/<dbname>. Has precedence over proxy files.')
parser.add_argument('-s', '--search-engines', action='store', default=['google'], help='What search engines to use (See GoogleScraper --config for the all supported). If you want to use more than one at the same time, just separate with commatas: "google, bing, yandex". If you want to use all search engines that are available, give \'*\' as argument.')
parser.add_argument('--proxy_chain_ips', type=str, action='store', default='local', help='proxy_chain_ips to forward requests')
parser.add_argument('--strict', action='store_true', default=False, help='Defines strict google / bing search')
parser.add_argument('--no-cache', action='store_true', default=False, help='Disable caching')
if only_print_help:
parser.print_help()
else:
args = parser.parse_args()
return vars(args)
| 5,549,819,006,311,449,000
|
Parse command line arguments when GoogleScraper is used as a CLI application.
Returns:
The configuration as a dictionary that determines the behaviour of the app.
|
GoogleScraper/commandline.py
|
get_command_line
|
hnhnarek/GoogleScraper
|
python
|
def get_command_line(only_print_help=False):
'\n Parse command line arguments when GoogleScraper is used as a CLI application.\n\n Returns:\n The configuration as a dictionary that determines the behaviour of the app.\n '
parser = argparse.ArgumentParser(prog='GoogleScraper', description='Scrapes the Google, Yandex, Bing and many other search engines by forging http requests that imitate browser searches or by using real browsers controlled by the selenium framework. Multithreading support.', epilog='GoogleScraper {version}. This program might infringe the TOS of the search engines. Please use it on your own risk. (c) by Nikolai Tschacher, 2012-2018. incolumitas.com'.format(version=__version__))
parser.add_argument('-m', '--scrape-method', type=str, default='http', help='The scraping type. There are currently three types: "http", "selenium" and "http-async". "Http" scrapes with raw http requests, whereas "selenium" uses the selenium framework to remotely control browsers. "http-async" makes use of gevent and is well suited for extremely fast and explosive scraping jobs. You may search more than 1000 requests per second if you have the necessary number of proxies available. ', choices=('http', 'selenium', 'http-async'))
parser.add_argument('--sel-browser', choices=['firefox', 'chrome'], default='chrome', help='The browser frontend for selenium scraping mode. Takes only effect if --scrape-method is set to "selenium"')
parser.add_argument('--browser-mode', choices=['normal', 'headless'], default='normal', help='In which mode the browser is started. Valid values = (normal, headless)')
keyword_group = parser.add_mutually_exclusive_group()
keyword_group.add_argument('-q', '--keyword', type=str, action='store', dest='keyword', help='The search keyword to scrape for. If you need to scrape multiple keywords, use the --keyword-file flag')
keyword_group.add_argument('--keyword-file', type=str, action='store', default=, help='Keywords to search for. One keyword per line. Empty lines are ignored. Alternatively, you may specify the path to an python module (must end with the .py suffix) where the keywords must be held in a dictionary with the name "scrape_jobs".')
parser.add_argument('-o-', '--output-filename', type=str, action='store', default=, help='The name of the output file. If the file ending is "json", write a json file, if the ending is "csv", write a csv file.')
parser.add_argument('--shell', action='store_true', default=False, help='Fire up a shell with a loaded sqlalchemy session.')
parser.add_argument('-n', '--num-results-per-page', type=int, action='store', default=10, help='The number of results per page. Must be smaller than 100, by default 50 for raw mode and 10 for selenium mode. Some search engines ignore this setting.')
parser.add_argument('-p', '--num-pages-for-keyword', type=int, action='store', default=1, help='The number of pages to request for each keyword. Each page is requested by a unique connection and if possible by a unique IP (at least in "http" mode).')
parser.add_argument('-z', '--num-workers', type=int, default=1, action='store', help='This arguments sets the number of browser instances for selenium mode or the number of worker threads in http mode.')
parser.add_argument('-t', '--search-type', type=str, action='store', default='normal', help='The searchtype to launch. May be normal web search, image search, news search or video search.')
parser.add_argument('--proxy-file', type=str, dest='proxy_file', action='store', required=False, help='A filename for a list of proxies (supported are HTTP PROXIES, SOCKS4/5) with the following format: "Proxyprotocol (proxy_ip|proxy_host):Port\n"Example file: socks4 127.0.0.1:99\nsocks5 0.0.0.0:1080\n')
parser.add_argument('--config-file', type=str, dest='config_file', action='store', help='The path to the configuration file for GoogleScraper. Normally you won\'t need this, because GoogleScrape comes shipped with a thoroughly commented configuration file named "scrape_config.py"')
parser.add_argument('--check-detection', type=str, dest='check_detection', action='store', help='Check if the given search engine blocked you from scrapign. Often detection can be determinedif you have to solve a captcha.')
parser.add_argument('--simulate', action='store_true', default=False, required=False, help='If this flag is set, the scrape job and its estimated length will be printed.')
loglevel_help = '\nSet the debug level of the application. Use the string representation\ninstead of the numbers. High numbers will output less, low numbers more.\nCRITICAL = 50,\nFATAL = CRITICAL,\nERROR = 40,\nWARNING = 30,\nWARN = WARNING,\nINFO = 20,\nDEBUG = 10,\nNOTSET = 0\n '
parser.add_argument('-v', '--verbosity', '--loglevel', dest='log_level', default='INFO', type=str.lower, choices=['debug', 'info', 'warning', 'warn', 'error', 'critical', 'fatal'], help=loglevel_help)
parser.add_argument('--print-results', choices=['all', 'summarize'], default='all', help='Whether to print all results ("all"), or only print a summary ("summarize")')
parser.add_argument('--view-config', action='store_true', default=False, help='Print the current configuration to stdout. You may use it to create and tweak your own config file from it.')
parser.add_argument('-V', '--v', '--version', action='store_true', default=False, dest='version', help='Prints the version of GoogleScraper')
parser.add_argument('--clean', action='store_true', default=False, help='Cleans all stored data. Please be very careful when you use this flag.')
parser.add_argument('--mysql-proxy-db', action='store', help='A mysql connection string for proxies to use. Format: mysql://<username>:<password>@<host>/<dbname>. Has precedence over proxy files.')
parser.add_argument('-s', '--search-engines', action='store', default=['google'], help='What search engines to use (See GoogleScraper --config for the all supported). If you want to use more than one at the same time, just separate with commatas: "google, bing, yandex". If you want to use all search engines that are available, give \'*\' as argument.')
parser.add_argument('--proxy_chain_ips', type=str, action='store', default='local', help='proxy_chain_ips to forward requests')
parser.add_argument('--strict', action='store_true', default=False, help='Defines strict google / bing search')
parser.add_argument('--no-cache', action='store_true', default=False, help='Disable caching')
if only_print_help:
parser.print_help()
else:
args = parser.parse_args()
return vars(args)
|
def main(self, regex_string):
'\n regex string input\n :regex_string: regex match string\n :return:\n '
pass
| -2,005,913,430,576,091,400
|
regex string input
:regex_string: regex match string
:return:
|
rules/javascript/CVI_3003.py
|
main
|
Afant1/Kunlun-M
|
python
|
def main(self, regex_string):
'\n regex string input\n :regex_string: regex match string\n :return:\n '
pass
|
@classmethod
def new_game(cls, user, attempts, deck, disp_deck, attempts_made, match_list, match_list_int, matches_found, guess1_or_guess2, guess_history):
'Create and return a new game'
if ((attempts < 30) or (attempts > 60)):
raise ValueError('Number of attempts must be more than 29 and less than 61')
game = Game(user=user, deck=deck, attempts_allowed=attempts, attempts_remaining=attempts, disp_deck=disp_deck, attempts_made=attempts_made, match_list=match_list, match_list_int=match_list_int, matches_found=matches_found, guess1_or_guess2=guess1_or_guess2, game_over=False, cancelled=False, guess_history=guess_history, time_created=str(datetime.now()))
game.put()
return game
| 1,032,905,783,144,670,100
|
Create and return a new game
|
models/game.py
|
new_game
|
bencam/pelmanism
|
python
|
@classmethod
def new_game(cls, user, attempts, deck, disp_deck, attempts_made, match_list, match_list_int, matches_found, guess1_or_guess2, guess_history):
if ((attempts < 30) or (attempts > 60)):
raise ValueError('Number of attempts must be more than 29 and less than 61')
game = Game(user=user, deck=deck, attempts_allowed=attempts, attempts_remaining=attempts, disp_deck=disp_deck, attempts_made=attempts_made, match_list=match_list, match_list_int=match_list_int, matches_found=matches_found, guess1_or_guess2=guess1_or_guess2, game_over=False, cancelled=False, guess_history=guess_history, time_created=str(datetime.now()))
game.put()
return game
|
def to_form(self, message):
'Return a GameForm representation of the game'
form = GameForm()
form.urlsafe_key = self.key.urlsafe()
form.user_name = self.user.get().name
form.attempts_remaining = self.attempts_remaining
form.game_over = self.game_over
form.cancelled = self.cancelled
form.disp_deck = self.disp_deck
form.attempts_made = self.attempts_made
form.match_list = self.match_list
form.matches_found = self.matches_found
form.time_created = self.time_created
form.message = message
return form
| -4,697,406,924,980,220,000
|
Return a GameForm representation of the game
|
models/game.py
|
to_form
|
bencam/pelmanism
|
python
|
def to_form(self, message):
form = GameForm()
form.urlsafe_key = self.key.urlsafe()
form.user_name = self.user.get().name
form.attempts_remaining = self.attempts_remaining
form.game_over = self.game_over
form.cancelled = self.cancelled
form.disp_deck = self.disp_deck
form.attempts_made = self.attempts_made
form.match_list = self.match_list
form.matches_found = self.matches_found
form.time_created = self.time_created
form.message = message
return form
|
def to_form_user_games(self):
'Return a GameFormUserGame representation of the game;\n this form displays a custom list of the game entities and is\n used in the get_user_games endpoint'
return GameFormUserGame(urlsafe_key=self.key.urlsafe(), user_name=self.user.get().name, attempts_remaining=self.attempts_remaining, game_over=self.game_over, disp_deck=self.disp_deck, attempts_made=self.attempts_made, match_list=self.match_list, matches_found=self.matches_found, time_created=self.time_created)
| 5,650,300,017,706,337,000
|
Return a GameFormUserGame representation of the game;
this form displays a custom list of the game entities and is
used in the get_user_games endpoint
|
models/game.py
|
to_form_user_games
|
bencam/pelmanism
|
python
|
def to_form_user_games(self):
'Return a GameFormUserGame representation of the game;\n this form displays a custom list of the game entities and is\n used in the get_user_games endpoint'
return GameFormUserGame(urlsafe_key=self.key.urlsafe(), user_name=self.user.get().name, attempts_remaining=self.attempts_remaining, game_over=self.game_over, disp_deck=self.disp_deck, attempts_made=self.attempts_made, match_list=self.match_list, matches_found=self.matches_found, time_created=self.time_created)
|
def to_form_game_history(self, message):
'Return a GameHistory representation of the game;\n this form displays a custom list of the game entities and is\n used in the get_game_history endpoint'
return GameHistory(user_name=self.user.get().name, guess_history=self.guess_history, attempts_made=self.attempts_made, match_list=self.match_list, matches_found=self.matches_found, deck=self.deck, time_created=self.time_created, message=message)
| 2,492,024,020,348,699,000
|
Return a GameHistory representation of the game;
this form displays a custom list of the game entities and is
used in the get_game_history endpoint
|
models/game.py
|
to_form_game_history
|
bencam/pelmanism
|
python
|
def to_form_game_history(self, message):
'Return a GameHistory representation of the game;\n this form displays a custom list of the game entities and is\n used in the get_game_history endpoint'
return GameHistory(user_name=self.user.get().name, guess_history=self.guess_history, attempts_made=self.attempts_made, match_list=self.match_list, matches_found=self.matches_found, deck=self.deck, time_created=self.time_created, message=message)
|
def end_game(self, won=False):
'End the game; if won is True, the player won;\n if won is False, the player lost'
self.game_over = True
self.put()
points = self.points = (500 - ((self.attempts_made - self.matches_found) * 10))
score = Score(user=self.user, time_completed=str(datetime.now()), won=won, attempts_made=self.attempts_made, game_deck=self.deck, matches_found=self.matches_found, points=points)
score.put()
| -163,030,221,212,685,150
|
End the game; if won is True, the player won;
if won is False, the player lost
|
models/game.py
|
end_game
|
bencam/pelmanism
|
python
|
def end_game(self, won=False):
'End the game; if won is True, the player won;\n if won is False, the player lost'
self.game_over = True
self.put()
points = self.points = (500 - ((self.attempts_made - self.matches_found) * 10))
score = Score(user=self.user, time_completed=str(datetime.now()), won=won, attempts_made=self.attempts_made, game_deck=self.deck, matches_found=self.matches_found, points=points)
score.put()
|
def test_open_with_plus(self):
'Opening with r+ is not allowed.'
with scratch_file('example.gz') as path:
with open(path, 'w+') as fout:
pass
with self.assertRaises(ValueError):
with gzippy.open(path, 'r+') as fin:
pass
| 1,762,328,780,264,199,700
|
Opening with r+ is not allowed.
|
test/test_gzippy.py
|
test_open_with_plus
|
seomoz/gzippy
|
python
|
def test_open_with_plus(self):
with scratch_file('example.gz') as path:
with open(path, 'w+') as fout:
pass
with self.assertRaises(ValueError):
with gzippy.open(path, 'r+') as fin:
pass
|
def test_open_with_append(self):
'Opening in append mode is not allowed.'
with scratch_file('example.gz') as path:
with open(path, 'w+') as fout:
pass
with self.assertRaises(ValueError):
with gzippy.open(path, 'ab') as fout:
pass
| 2,778,444,649,322,058,000
|
Opening in append mode is not allowed.
|
test/test_gzippy.py
|
test_open_with_append
|
seomoz/gzippy
|
python
|
def test_open_with_append(self):
with scratch_file('example.gz') as path:
with open(path, 'w+') as fout:
pass
with self.assertRaises(ValueError):
with gzippy.open(path, 'ab') as fout:
pass
|
@classmethod
def init_and_run(cls, *args, **kwargs):
'Instantiate and run `self.main()` using `curses.wrapper`.\n\n Parameters\n ----------\n *args : tuple\n Positional arguments to be passed to the CursesInterface constructor.\n **kwargs : dict, optional\n Keyword arguments to be passed to the CursesInterface constructor.\n\n Returns\n -------\n CursesInterface object\n An instance of the CursesInterface class.\n '
ui = cls(*args, **kwargs)
curses.wrapper(ui.main)
return ui
| 210,966,248,905,593,100
|
Instantiate and run `self.main()` using `curses.wrapper`.
Parameters
----------
*args : tuple
Positional arguments to be passed to the CursesInterface constructor.
**kwargs : dict, optional
Keyword arguments to be passed to the CursesInterface constructor.
Returns
-------
CursesInterface object
An instance of the CursesInterface class.
|
src/wordle_cheater/interface.py
|
init_and_run
|
edsq/wordle-cheater
|
python
|
@classmethod
def init_and_run(cls, *args, **kwargs):
'Instantiate and run `self.main()` using `curses.wrapper`.\n\n Parameters\n ----------\n *args : tuple\n Positional arguments to be passed to the CursesInterface constructor.\n **kwargs : dict, optional\n Keyword arguments to be passed to the CursesInterface constructor.\n\n Returns\n -------\n CursesInterface object\n An instance of the CursesInterface class.\n '
ui = cls(*args, **kwargs)
curses.wrapper(ui.main)
return ui
|
def main(self, stdscr):
'Run the interface.\n\n Should typically be called using `curses.wrapper`.\n\n Parameters\n ----------\n stdscr : curses.Window object\n The curses screen which the user interacts with.\n '
self.stdscr = stdscr
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_YELLOW)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_RED)
(height, width) = stdscr.getmaxyx()
self.results_window = curses.newwin((height - 12), width, 12, 0)
x0 = ((width // 2) - 3)
y0 = 5
self.print_title()
self.enter_letters(x0=x0, y0=y0)
self.print_results()
self.set_cursor_visibility(False)
self.get_key()
| -9,170,470,526,508,910,000
|
Run the interface.
Should typically be called using `curses.wrapper`.
Parameters
----------
stdscr : curses.Window object
The curses screen which the user interacts with.
|
src/wordle_cheater/interface.py
|
main
|
edsq/wordle-cheater
|
python
|
def main(self, stdscr):
'Run the interface.\n\n Should typically be called using `curses.wrapper`.\n\n Parameters\n ----------\n stdscr : curses.Window object\n The curses screen which the user interacts with.\n '
self.stdscr = stdscr
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_YELLOW)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_RED)
(height, width) = stdscr.getmaxyx()
self.results_window = curses.newwin((height - 12), width, 12, 0)
x0 = ((width // 2) - 3)
y0 = 5
self.print_title()
self.enter_letters(x0=x0, y0=y0)
self.print_results()
self.set_cursor_visibility(False)
self.get_key()
|
def center_print(self, y, string, *args, **kwargs):
'Print in the center of the screen.\n\n Parameters\n ----------\n y : int\n The vertical location at which to print.\n string : str\n The string to print.\n *args : tuple\n Additional arguments to be passed to `stdscr.addstr`.\n **kwargs : dict, optional\n Keyword arguments to be passed to `stdscr.addstr`.\n '
(height, width) = self.stdscr.getmaxyx()
str_length = len(string)
x_mid = (width // 2)
self.stdscr.addstr(y, (x_mid - (str_length // 2)), string, *args, **kwargs)
| -5,137,827,943,173,549,000
|
Print in the center of the screen.
Parameters
----------
y : int
The vertical location at which to print.
string : str
The string to print.
*args : tuple
Additional arguments to be passed to `stdscr.addstr`.
**kwargs : dict, optional
Keyword arguments to be passed to `stdscr.addstr`.
|
src/wordle_cheater/interface.py
|
center_print
|
edsq/wordle-cheater
|
python
|
def center_print(self, y, string, *args, **kwargs):
'Print in the center of the screen.\n\n Parameters\n ----------\n y : int\n The vertical location at which to print.\n string : str\n The string to print.\n *args : tuple\n Additional arguments to be passed to `stdscr.addstr`.\n **kwargs : dict, optional\n Keyword arguments to be passed to `stdscr.addstr`.\n '
(height, width) = self.stdscr.getmaxyx()
str_length = len(string)
x_mid = (width // 2)
self.stdscr.addstr(y, (x_mid - (str_length // 2)), string, *args, **kwargs)
|
def print_title(self):
'Print title and instructions.'
self.center_print(1, 'Wordle Cheater :(', curses.A_BOLD)
self.center_print(2, 'Enter guesses below.')
self.center_print(3, 'spacebar: change color', curses.A_DIM)
| -4,790,512,370,770,830,000
|
Print title and instructions.
|
src/wordle_cheater/interface.py
|
print_title
|
edsq/wordle-cheater
|
python
|
def print_title(self):
self.center_print(1, 'Wordle Cheater :(', curses.A_BOLD)
self.center_print(2, 'Enter guesses below.')
self.center_print(3, 'spacebar: change color', curses.A_DIM)
|
def print_results(self, sep=' '):
'Print possible solutions given guesses.\n\n Parameters\n ----------\n sep : str, optional\n The string to display between each possible solution.\n '
(height, width) = self.results_window.getmaxyx()
max_rows = (height - 1)
cols = (width // (5 + len(sep)))
out_str = self.get_results_string(max_rows=max_rows, max_cols=cols, sep=sep)
self.results_window.clear()
self.results_window.addstr(0, 0, 'Possible solutions:', curses.A_UNDERLINE)
self.results_window.addstr(1, 0, out_str)
self.results_window.refresh()
| 5,907,706,909,988,606,000
|
Print possible solutions given guesses.
Parameters
----------
sep : str, optional
The string to display between each possible solution.
|
src/wordle_cheater/interface.py
|
print_results
|
edsq/wordle-cheater
|
python
|
def print_results(self, sep=' '):
'Print possible solutions given guesses.\n\n Parameters\n ----------\n sep : str, optional\n The string to display between each possible solution.\n '
(height, width) = self.results_window.getmaxyx()
max_rows = (height - 1)
cols = (width // (5 + len(sep)))
out_str = self.get_results_string(max_rows=max_rows, max_cols=cols, sep=sep)
self.results_window.clear()
self.results_window.addstr(0, 0, 'Possible solutions:', curses.A_UNDERLINE)
self.results_window.addstr(1, 0, out_str)
self.results_window.refresh()
|
def print(self, x, y, string, c=None):
"Print `string` at coordinates `x`, `y`.\n\n Parameters\n ----------\n x : int\n Horizontal position at which to print the string.\n y : int\n Height at which to print the string.\n string : str\n The string to print.\n c : str, {None, 'black', 'yellow', 'green', 'red'}\n The color in which to print. Must be one of\n ['black', 'yellow', 'green', 'red'] or None. If `c` is None, it should\n print in the default color pair.\n "
if (c is None):
self.stdscr.addstr(y, x, string)
elif (c == 'black'):
self.stdscr.addstr(y, x, string, curses.color_pair(1))
elif (c == 'yellow'):
self.stdscr.addstr(y, x, string, curses.color_pair(2))
elif (c == 'green'):
self.stdscr.addstr(y, x, string, curses.color_pair(3))
elif (c == 'red'):
self.stdscr.addstr(y, x, string, curses.color_pair(4))
else:
raise ValueError("`c` must be one of ['black', 'yellow', 'green', 'red'] or none.")
| -7,147,534,154,132,215,000
|
Print `string` at coordinates `x`, `y`.
Parameters
----------
x : int
Horizontal position at which to print the string.
y : int
Height at which to print the string.
string : str
The string to print.
c : str, {None, 'black', 'yellow', 'green', 'red'}
The color in which to print. Must be one of
['black', 'yellow', 'green', 'red'] or None. If `c` is None, it should
print in the default color pair.
|
src/wordle_cheater/interface.py
|
print
|
edsq/wordle-cheater
|
python
|
def print(self, x, y, string, c=None):
"Print `string` at coordinates `x`, `y`.\n\n Parameters\n ----------\n x : int\n Horizontal position at which to print the string.\n y : int\n Height at which to print the string.\n string : str\n The string to print.\n c : str, {None, 'black', 'yellow', 'green', 'red'}\n The color in which to print. Must be one of\n ['black', 'yellow', 'green', 'red'] or None. If `c` is None, it should\n print in the default color pair.\n "
if (c is None):
self.stdscr.addstr(y, x, string)
elif (c == 'black'):
self.stdscr.addstr(y, x, string, curses.color_pair(1))
elif (c == 'yellow'):
self.stdscr.addstr(y, x, string, curses.color_pair(2))
elif (c == 'green'):
self.stdscr.addstr(y, x, string, curses.color_pair(3))
elif (c == 'red'):
self.stdscr.addstr(y, x, string, curses.color_pair(4))
else:
raise ValueError("`c` must be one of ['black', 'yellow', 'green', 'red'] or none.")
|
def sleep(self, ms):
'Temporarily suspend execution.\n\n Parameters\n ----------\n ms : int\n Number of miliseconds before execution resumes.\n '
curses.napms(ms)
self.stdscr.refresh()
| -479,470,963,160,505,860
|
Temporarily suspend execution.
Parameters
----------
ms : int
Number of miliseconds before execution resumes.
|
src/wordle_cheater/interface.py
|
sleep
|
edsq/wordle-cheater
|
python
|
def sleep(self, ms):
'Temporarily suspend execution.\n\n Parameters\n ----------\n ms : int\n Number of miliseconds before execution resumes.\n '
curses.napms(ms)
self.stdscr.refresh()
|
def move_cursor(self, x, y):
'Move cursor to position `x`, `y`.\n\n Parameters\n ----------\n x : int\n Desired horizontal position of cursor.\n y : int\n Desired vertical position of cursor.\n '
self.stdscr.move(y, x)
| -8,032,974,080,929,446,000
|
Move cursor to position `x`, `y`.
Parameters
----------
x : int
Desired horizontal position of cursor.
y : int
Desired vertical position of cursor.
|
src/wordle_cheater/interface.py
|
move_cursor
|
edsq/wordle-cheater
|
python
|
def move_cursor(self, x, y):
'Move cursor to position `x`, `y`.\n\n Parameters\n ----------\n x : int\n Desired horizontal position of cursor.\n y : int\n Desired vertical position of cursor.\n '
self.stdscr.move(y, x)
|
def set_cursor_visibility(self, visible):
'Set cursor visibility.\n\n Parameters\n ----------\n visible : bool\n Whether or not the cursor is visible.\n '
curses.curs_set(visible)
| -411,079,154,944,363,970
|
Set cursor visibility.
Parameters
----------
visible : bool
Whether or not the cursor is visible.
|
src/wordle_cheater/interface.py
|
set_cursor_visibility
|
edsq/wordle-cheater
|
python
|
def set_cursor_visibility(self, visible):
'Set cursor visibility.\n\n Parameters\n ----------\n visible : bool\n Whether or not the cursor is visible.\n '
curses.curs_set(visible)
|
def get_key(self):
'Get a key press.\n\n Returns\n -------\n key : str\n The key that was pressed.\n '
return self.stdscr.getkey()
| -7,643,117,547,012,769,000
|
Get a key press.
Returns
-------
key : str
The key that was pressed.
|
src/wordle_cheater/interface.py
|
get_key
|
edsq/wordle-cheater
|
python
|
def get_key(self):
'Get a key press.\n\n Returns\n -------\n key : str\n The key that was pressed.\n '
return self.stdscr.getkey()
|
def is_enter(self, key):
'Check if `key` is the enter/return key.\n\n Parameters\n ----------\n key : str\n The key to check.\n\n Returns\n -------\n is_enter : bool\n True if `key` is the enter or return key, False otherwise.\n '
if ((key == curses.KEY_ENTER) or (key == '\n') or (key == '\r')):
return True
else:
return False
| -7,837,240,422,818,573,000
|
Check if `key` is the enter/return key.
Parameters
----------
key : str
The key to check.
Returns
-------
is_enter : bool
True if `key` is the enter or return key, False otherwise.
|
src/wordle_cheater/interface.py
|
is_enter
|
edsq/wordle-cheater
|
python
|
def is_enter(self, key):
'Check if `key` is the enter/return key.\n\n Parameters\n ----------\n key : str\n The key to check.\n\n Returns\n -------\n is_enter : bool\n True if `key` is the enter or return key, False otherwise.\n '
if ((key == curses.KEY_ENTER) or (key == '\n') or (key == '\r')):
return True
else:
return False
|
def is_backspace(self, key):
'Check if `key` is the backspace/delete key.\n\n Parameters\n ----------\n key : str\n The key to check.\n\n Returns\n -------\n is_backspace : bool\n True if `key` is the backspace or delete key, False otherwise.\n '
if ((key == curses.KEY_BACKSPACE) or (key == '\x08') or (key == '\x7f')):
return True
else:
return False
| -1,635,305,886,461,331,500
|
Check if `key` is the backspace/delete key.
Parameters
----------
key : str
The key to check.
Returns
-------
is_backspace : bool
True if `key` is the backspace or delete key, False otherwise.
|
src/wordle_cheater/interface.py
|
is_backspace
|
edsq/wordle-cheater
|
python
|
def is_backspace(self, key):
'Check if `key` is the backspace/delete key.\n\n Parameters\n ----------\n key : str\n The key to check.\n\n Returns\n -------\n is_backspace : bool\n True if `key` is the backspace or delete key, False otherwise.\n '
if ((key == curses.KEY_BACKSPACE) or (key == '\x08') or (key == '\x7f')):
return True
else:
return False
|
@property
def curs_xy(self):
'Location of cursor.'
return self._curs_xy
| -8,168,196,711,324,319,000
|
Location of cursor.
|
src/wordle_cheater/interface.py
|
curs_xy
|
edsq/wordle-cheater
|
python
|
@property
def curs_xy(self):
return self._curs_xy
|
@curs_xy.setter
def curs_xy(self, xy):
'Update max line lengths when we update cursor position.'
(x, y) = xy
if (y > (len(self.line_lengths) - 1)):
self.line_lengths += [0 for i in range(((y - len(self.line_lengths)) + 1))]
if (x > self.line_lengths[y]):
self.line_lengths[y] = x
self._curs_xy = xy
| 1,690,258,665,582,618,400
|
Update max line lengths when we update cursor position.
|
src/wordle_cheater/interface.py
|
curs_xy
|
edsq/wordle-cheater
|
python
|
@curs_xy.setter
def curs_xy(self, xy):
(x, y) = xy
if (y > (len(self.line_lengths) - 1)):
self.line_lengths += [0 for i in range(((y - len(self.line_lengths)) + 1))]
if (x > self.line_lengths[y]):
self.line_lengths[y] = x
self._curs_xy = xy
|
def main(self):
'Run the interface.'
try:
self.print_title()
self.enter_letters(x0=self.x0, y0=self.y0)
self.print_results()
finally:
self.set_cursor_visibility(True)
| -3,395,075,301,970,717,000
|
Run the interface.
|
src/wordle_cheater/interface.py
|
main
|
edsq/wordle-cheater
|
python
|
def main(self):
try:
self.print_title()
self.enter_letters(x0=self.x0, y0=self.y0)
self.print_results()
finally:
self.set_cursor_visibility(True)
|
def print_title(self):
'Print title and instructions.'
self.print(0, 0, 'Wordle Cheater :(', bold=True)
self.print(0, 1, 'Enter guesses below.')
self.print(0, 2, 'spacebar: change color', dim=True)
| 4,562,588,889,675,491,000
|
Print title and instructions.
|
src/wordle_cheater/interface.py
|
print_title
|
edsq/wordle-cheater
|
python
|
def print_title(self):
self.print(0, 0, 'Wordle Cheater :(', bold=True)
self.print(0, 1, 'Enter guesses below.')
self.print(0, 2, 'spacebar: change color', dim=True)
|
def print_results(self):
'Print possible solutions given guesses.'
if self.entering_letters:
return
out_str = self.get_results_string(max_rows=self.max_rows, max_cols=self.max_cols, sep=' ')
self.move_cursor(0, (self.curs_xy[1] + 1))
click.secho('Possible solutions:', underline=True)
click.echo(out_str)
| -5,675,137,717,710,156,000
|
Print possible solutions given guesses.
|
src/wordle_cheater/interface.py
|
print_results
|
edsq/wordle-cheater
|
python
|
def print_results(self):
if self.entering_letters:
return
out_str = self.get_results_string(max_rows=self.max_rows, max_cols=self.max_cols, sep=' ')
self.move_cursor(0, (self.curs_xy[1] + 1))
click.secho('Possible solutions:', underline=True)
click.echo(out_str)
|
def print(self, x, y, string, c=None, *args, **kwargs):
"Print `string` at coordinates `x`, `y`.\n\n Parameters\n ----------\n x : int\n Horizontal position at which to print the string.\n y : int\n Height at which to print the string.\n string : str\n The string to print.\n c : str, {None, 'black', 'yellow', 'green', 'red'}\n The color in which to print. Must be one of\n ['black', 'yellow', 'green', 'red'] or None. If `c` is None, it should\n print in the default color pair.\n *args : tuple\n Additional arguments to be passed to `click.secho`.\n **kwargs : dict, optional\n Keyword arguments to be passed to `click.secho`.\n "
self.move_cursor(x, y)
if (c is None):
click.secho(string, *args, nl=False, **kwargs)
elif (c == 'black'):
click.secho(string, fg='white', bg='black', nl=False)
elif (c == 'yellow'):
click.secho(string, fg='black', bg='yellow', nl=False)
elif (c == 'green'):
click.secho(string, fg='black', bg='green', nl=False)
elif (c == 'red'):
click.secho(string, fg='black', bg='red', nl=False)
else:
raise ValueError("`c` must be one of ['black', 'yellow', 'green', 'red'] or none.")
self.curs_xy = ((self.curs_xy[0] + len(string)), self.curs_xy[1])
| 733,578,645,901,021,300
|
Print `string` at coordinates `x`, `y`.
Parameters
----------
x : int
Horizontal position at which to print the string.
y : int
Height at which to print the string.
string : str
The string to print.
c : str, {None, 'black', 'yellow', 'green', 'red'}
The color in which to print. Must be one of
['black', 'yellow', 'green', 'red'] or None. If `c` is None, it should
print in the default color pair.
*args : tuple
Additional arguments to be passed to `click.secho`.
**kwargs : dict, optional
Keyword arguments to be passed to `click.secho`.
|
src/wordle_cheater/interface.py
|
print
|
edsq/wordle-cheater
|
python
|
def print(self, x, y, string, c=None, *args, **kwargs):
"Print `string` at coordinates `x`, `y`.\n\n Parameters\n ----------\n x : int\n Horizontal position at which to print the string.\n y : int\n Height at which to print the string.\n string : str\n The string to print.\n c : str, {None, 'black', 'yellow', 'green', 'red'}\n The color in which to print. Must be one of\n ['black', 'yellow', 'green', 'red'] or None. If `c` is None, it should\n print in the default color pair.\n *args : tuple\n Additional arguments to be passed to `click.secho`.\n **kwargs : dict, optional\n Keyword arguments to be passed to `click.secho`.\n "
self.move_cursor(x, y)
if (c is None):
click.secho(string, *args, nl=False, **kwargs)
elif (c == 'black'):
click.secho(string, fg='white', bg='black', nl=False)
elif (c == 'yellow'):
click.secho(string, fg='black', bg='yellow', nl=False)
elif (c == 'green'):
click.secho(string, fg='black', bg='green', nl=False)
elif (c == 'red'):
click.secho(string, fg='black', bg='red', nl=False)
else:
raise ValueError("`c` must be one of ['black', 'yellow', 'green', 'red'] or none.")
self.curs_xy = ((self.curs_xy[0] + len(string)), self.curs_xy[1])
|
def sleep(self, ms):
'Temporarily suspend execution.\n\n Parameters\n ----------\n ms : int\n Number of miliseconds before execution resumes.\n '
time.sleep((ms / 1000))
| 3,032,397,926,492,459,000
|
Temporarily suspend execution.
Parameters
----------
ms : int
Number of miliseconds before execution resumes.
|
src/wordle_cheater/interface.py
|
sleep
|
edsq/wordle-cheater
|
python
|
def sleep(self, ms):
'Temporarily suspend execution.\n\n Parameters\n ----------\n ms : int\n Number of miliseconds before execution resumes.\n '
time.sleep((ms / 1000))
|
def move_cursor(self, x, y):
'Move cursor to position `x`, `y`.\n\n Parameters\n ----------\n x : int\n Desired horizontal position of cursor.\n y : int\n Desired vertical position of cursor.\n '
if (self.curs_xy[1] > y):
click.echo(f'{self.esc}[{(self.curs_xy[1] - y)}A', nl=False)
elif (self.curs_xy[1] < y):
if ((len(self.line_lengths) - 1) < y):
click.echo(f'{self.esc}[{((len(self.line_lengths) - 1) - self.curs_xy[1])}B', nl=False)
click.echo(('\n' * (y - (len(self.line_lengths) - 1))), nl=False)
click.echo((' ' * x), nl=False)
self.curs_xy = (x, y)
return
else:
click.echo(f'{self.esc}[{(y - self.curs_xy[1])}B', nl=False)
if (self.curs_xy[0] > x):
click.echo(f'{self.esc}[{(self.curs_xy[0] - x)}D', nl=False)
elif (self.curs_xy[0] < x):
if (self.line_lengths[y] > x):
click.echo(f'{self.esc}[{(self.line_lengths[y] - self.curs_xy[0])}C', nl=False)
click.echo((' ' * (x - self.line_lengths[y])), nl=False)
else:
click.echo(f'{self.esc}[{(x - self.curs_xy[0])}C', nl=False)
self.curs_xy = (x, y)
| 1,471,144,024,528,282,600
|
Move cursor to position `x`, `y`.
Parameters
----------
x : int
Desired horizontal position of cursor.
y : int
Desired vertical position of cursor.
|
src/wordle_cheater/interface.py
|
move_cursor
|
edsq/wordle-cheater
|
python
|
def move_cursor(self, x, y):
'Move cursor to position `x`, `y`.\n\n Parameters\n ----------\n x : int\n Desired horizontal position of cursor.\n y : int\n Desired vertical position of cursor.\n '
if (self.curs_xy[1] > y):
click.echo(f'{self.esc}[{(self.curs_xy[1] - y)}A', nl=False)
elif (self.curs_xy[1] < y):
if ((len(self.line_lengths) - 1) < y):
click.echo(f'{self.esc}[{((len(self.line_lengths) - 1) - self.curs_xy[1])}B', nl=False)
click.echo(('\n' * (y - (len(self.line_lengths) - 1))), nl=False)
click.echo((' ' * x), nl=False)
self.curs_xy = (x, y)
return
else:
click.echo(f'{self.esc}[{(y - self.curs_xy[1])}B', nl=False)
if (self.curs_xy[0] > x):
click.echo(f'{self.esc}[{(self.curs_xy[0] - x)}D', nl=False)
elif (self.curs_xy[0] < x):
if (self.line_lengths[y] > x):
click.echo(f'{self.esc}[{(self.line_lengths[y] - self.curs_xy[0])}C', nl=False)
click.echo((' ' * (x - self.line_lengths[y])), nl=False)
else:
click.echo(f'{self.esc}[{(x - self.curs_xy[0])}C', nl=False)
self.curs_xy = (x, y)
|
def set_cursor_visibility(self, visible):
'Set cursor visibility.\n\n Parameters\n ----------\n visible : bool\n Whether or not the cursor is visible.\n '
if visible:
click.echo(f'{self.esc}[?25h', nl=False)
else:
click.echo(f'{self.esc}[?25l', nl=False)
| 396,357,219,551,274,560
|
Set cursor visibility.
Parameters
----------
visible : bool
Whether or not the cursor is visible.
|
src/wordle_cheater/interface.py
|
set_cursor_visibility
|
edsq/wordle-cheater
|
python
|
def set_cursor_visibility(self, visible):
'Set cursor visibility.\n\n Parameters\n ----------\n visible : bool\n Whether or not the cursor is visible.\n '
if visible:
click.echo(f'{self.esc}[?25h', nl=False)
else:
click.echo(f'{self.esc}[?25l', nl=False)
|
def get_key(self):
'Get a key press.\n\n Returns\n -------\n key : str\n The key that was pressed.\n '
return click.getchar()
| -1,230,420,192,345,417,700
|
Get a key press.
Returns
-------
key : str
The key that was pressed.
|
src/wordle_cheater/interface.py
|
get_key
|
edsq/wordle-cheater
|
python
|
def get_key(self):
'Get a key press.\n\n Returns\n -------\n key : str\n The key that was pressed.\n '
return click.getchar()
|
def is_enter(self, key):
'Check if `key` is the enter/return key.\n\n Parameters\n ----------\n key : str\n The key to check.\n\n Returns\n -------\n is_enter : bool\n True if `key` is the enter or return key, False otherwise.\n '
if ((key == '\r') or (key == '\n')):
return True
else:
return False
| 1,334,841,429,322,325,000
|
Check if `key` is the enter/return key.
Parameters
----------
key : str
The key to check.
Returns
-------
is_enter : bool
True if `key` is the enter or return key, False otherwise.
|
src/wordle_cheater/interface.py
|
is_enter
|
edsq/wordle-cheater
|
python
|
def is_enter(self, key):
'Check if `key` is the enter/return key.\n\n Parameters\n ----------\n key : str\n The key to check.\n\n Returns\n -------\n is_enter : bool\n True if `key` is the enter or return key, False otherwise.\n '
if ((key == '\r') or (key == '\n')):
return True
else:
return False
|
def is_backspace(self, key):
'Check if `key` is the backspace/delete key.\n\n Parameters\n ----------\n key : str\n The key to check.\n\n Returns\n -------\n is_backspace : bool\n True if `key` is the backspace or delete key, False otherwise.\n '
if ((key == '\x08') or (key == '\x7f')):
return True
else:
return False
| -1,771,721,439,019,337,500
|
Check if `key` is the backspace/delete key.
Parameters
----------
key : str
The key to check.
Returns
-------
is_backspace : bool
True if `key` is the backspace or delete key, False otherwise.
|
src/wordle_cheater/interface.py
|
is_backspace
|
edsq/wordle-cheater
|
python
|
def is_backspace(self, key):
'Check if `key` is the backspace/delete key.\n\n Parameters\n ----------\n key : str\n The key to check.\n\n Returns\n -------\n is_backspace : bool\n True if `key` is the backspace or delete key, False otherwise.\n '
if ((key == '\x08') or (key == '\x7f')):
return True
else:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.