repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/components/robot/dynamixel_client_test.py | robel/components/robot/dynamixel_client_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for DynamixelClient."""
from absl.testing import absltest
from robel.components.robot.dynamixel_client import DynamixelClient
from robel.utils.testing.mock_dynamixel_sdk import patch_dynamixel
class DynamixelClientTest(absltest.TestCase):
"""Unit test class for DynamixelClient."""
@patch_dynamixel(test=[1, 2, 3, 4])
def test_connect(self, sdk):
client = DynamixelClient([1, 2, 3, 4], port='test')
self.assertFalse(client.is_connected)
client.connect()
self.assertIn('test', sdk.used_ports)
self.assertListEqual(sdk.get_enabled_motors('test'), [1, 2, 3, 4])
client.disconnect()
@patch_dynamixel(test=[1, 2, 3, 4])
def test_torque_enabled(self, sdk):
client = DynamixelClient([1, 2, 3, 4], port='test')
client.connect()
self.assertListEqual(sdk.get_enabled_motors('test'), [1, 2, 3, 4])
client.set_torque_enabled([1, 3], False)
self.assertListEqual(sdk.get_enabled_motors('test'), [2, 4])
client.set_torque_enabled([1, 2], True)
self.assertListEqual(sdk.get_enabled_motors('test'), [1, 2, 4])
client.disconnect()
self.assertListEqual(sdk.get_enabled_motors('test'), [])
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/components/robot/robot_test.py | robel/components/robot/robot_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for RobotComponent and RobotGroupConfig."""
from typing import Any
from absl.testing import absltest
import numpy as np
from robel.components.robot.robot import ControlMode, RobotComponent
from robel.utils.testing.mock_sim_scene import MockSimScene
class RobotComponentTest(absltest.TestCase):
"""Unit test class for RobotComponent."""
def test_get_state(self):
"""Tests querying the state of multiple groups."""
sim_scene = MockSimScene(nq=10) # type: Any
sim_scene.data.qpos[:] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sim_scene.data.qvel[:] = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
robot = RobotComponent(
sim_scene,
groups={
'a': {
'qpos_indices': [0, 1, 3, 5],
},
'b': {
'qpos_indices': [2, 6],
'qvel_indices': [7, 8, 9],
},
})
a_state, b_state = robot.get_state(['a', 'b'])
np.testing.assert_array_equal(a_state.qpos, [1, 2, 4, 6])
np.testing.assert_array_equal(a_state.qvel, [11, 12, 14, 16])
np.testing.assert_array_equal(b_state.qpos, [3, 7])
np.testing.assert_array_equal(b_state.qvel, [18, 19, 20])
def test_step(self):
"""Tests stepping with an action for multiple groups."""
sim_scene = MockSimScene(nq=10, ctrl_range=[-1, 1]) # type: Any
robot = RobotComponent(
sim_scene,
groups={
'a': {
'qpos_indices': [0, 1, 3, 5],
},
'b': {
'actuator_indices': [7, 8, 9],
},
})
robot.step({
'a': np.array([.2, .4, .6, .8]),
'b': np.array([.1, .3, .5])
})
np.testing.assert_allclose(sim_scene.data.ctrl,
[.2, .4, 0, .6, 0, .8, 0, .1, .3, .5])
self.assertEqual(sim_scene.advance.call_count, 1)
def test_step_denormalize(self):
"""Tests denormalizing the actions to the sim control range."""
sim_scene = MockSimScene(nq=5, ctrl_range=[0, 10]) # type: Any
robot = RobotComponent(
sim_scene, groups={'a': {
'qpos_indices': [0, 1, 2, 3, 4],
}})
robot.step({
'a': np.array([-1, 1, -0.5, 0.5, 0]),
})
np.testing.assert_allclose(sim_scene.data.ctrl, [0, 10, 2.5, 7.5, 5])
def test_step_position_control_bounds(self):
"""Tests action clamping when doing position control."""
sim_scene = MockSimScene(nq=5, ctrl_range=[-1, 1]) # type: Any
sim_scene.data.qpos[:] = [-0.4, -0.2, 0, 0.2, 0.4]
robot = RobotComponent(
sim_scene,
groups={
'a': {
'qpos_indices': [0, 1, 2, 3, 4],
'qpos_range': [(-0.5, 0.5)] * 5,
'qvel_range': [(-0.2, 0.2)] * 5,
}
})
robot.step({'a': np.array([-1, -1, 0.2, 1, 1])})
np.testing.assert_allclose(sim_scene.data.ctrl,
[-0.5, -0.4, 0.1, 0.4, 0.5])
def test_step_velocity_control_bounds(self):
"""Tests action clamping when doing velocity control."""
sim_scene = MockSimScene(nq=3, ctrl_range=[-10, 10]) # type: Any
robot = RobotComponent(
sim_scene,
groups={
'a': {
'control_mode': ControlMode.JOINT_VELOCITY,
'qpos_indices': [0, 1, 2],
'qvel_range': [(-2, 2), (-1, 5), (-7, -4)],
}
})
robot.step({'a': np.array([-0.5, 1, -1])})
np.testing.assert_allclose(sim_scene.data.ctrl, [-1, 5, -7])
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/components/robot/builder.py | robel/components/robot/builder.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builder-specific logic for creating RobotComponents."""
from robel.components.builder import ComponentBuilder
from robel.components.robot.dynamixel_utils import CalibrationMap
class RobotComponentBuilder(ComponentBuilder):
"""Builds a RobotComponent."""
def __init__(self):
super().__init__()
self._dxl_device_path = None
self._calibration_map = None
def build(self, *args, **kwargs):
"""Builds the component."""
if self._dxl_device_path:
if self._calibration_map is not None:
self._calibration_map.update_group_configs(self.group_configs)
from robel.components.robot.dynamixel_robot import (
DynamixelRobotComponent)
return DynamixelRobotComponent(
*args,
groups=self.group_configs,
device_path=self._dxl_device_path,
**kwargs)
from robel.components.robot.robot import RobotComponent
return RobotComponent(*args, groups=self.group_configs, **kwargs)
def set_dynamixel_device_path(self, device_path: str):
"""Sets the device path for a hardware robot component.
If set, the builder will build a DynamixelRobotComponent.
Args:
device_path: The device path to the Dynamixel device.
"""
self._dxl_device_path = device_path
def set_hardware_calibration_map(self, calibration_map: CalibrationMap):
"""Sets the calibration map for hardware."""
self._calibration_map = calibration_map
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/components/robot/hardware_robot_test.py | robel/components/robot/hardware_robot_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for RobotComponent and RobotGroupConfig."""
from typing import Any
from absl.testing import absltest
import numpy as np
from robel.components.robot.hardware_robot import (HardwareRobotComponent,
RobotState)
from robel.utils.testing.mock_sim_scene import MockSimScene
from robel.utils.testing.mock_time import patch_time
class DummyHardwareRobotComponent(HardwareRobotComponent):
"""Test implementation of HardwareRobotComponent."""
def calibrate_state(self, state: RobotState, group_name: str):
self._calibrate_state(state, self.get_config(group_name))
def decalibrate_qpos(self, qpos: np.ndarray, group_name: str):
return self._decalibrate_qpos(qpos, self.get_config(group_name))
def do_timestep(self):
self._synchronize_timestep()
class HardwareRobotComponentTest(absltest.TestCase):
"""Unit test class for HardwareRobotComponent."""
def test_calibrate_state(self):
"""Converts a state to component space."""
sim_scene = MockSimScene(nq=1) # type: Any
robot = DummyHardwareRobotComponent(
sim_scene,
groups={
'a': {
'calib_scale': [0.5, -1, 1],
'calib_offset': [2, 0, -0.5],
}
})
state = RobotState(
qpos=np.array([1., 1., 1.]), qvel=np.array([1., 1., 1.]))
robot.calibrate_state(state, 'a')
np.testing.assert_allclose(state.qpos, [2.5, -1, 0.5])
np.testing.assert_allclose(state.qvel, [0.5, -1, 1])
def test_decalibrate_qpos(self):
"""Converts a component state qpos to hardware space."""
sim_scene = MockSimScene(nq=1) # type: Any
robot = DummyHardwareRobotComponent(
sim_scene,
groups={
'a': {
'calib_scale': [0.5, -1, 1],
'calib_offset': [2, 0, -0.5],
}
})
qpos = robot.decalibrate_qpos(np.array([1., 2., 3.]), 'a')
np.testing.assert_allclose(qpos, [-2, -2, 3.5])
def test_timestep(self):
"""Tests advancement of time when doing timesteps."""
with patch_time(
'robel.components.robot.hardware_robot.time',
initial_time=0) as mock_time:
sim_scene = MockSimScene(nq=1, step_duration=0.5) # type: Any
robot = DummyHardwareRobotComponent(sim_scene, groups={})
self.assertEqual(robot.time, 0)
robot.do_timestep()
self.assertAlmostEqual(robot.time, 0.5)
mock_time.sleep(0.25)
robot.do_timestep()
self.assertAlmostEqual(robot.time, 1.0)
mock_time.sleep(0.6)
robot.do_timestep()
self.assertAlmostEqual(robot.time, 1.6)
robot.reset_time()
self.assertEqual(robot.time, 0)
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/components/robot/group_config_test.py | robel/components/robot/group_config_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RobotGroupConfig."""
from typing import Any
from absl.testing import absltest
import numpy as np
from robel.components.robot.group_config import RobotGroupConfig
from robel.utils.testing.mock_sim_scene import MockSimScene
class RobotGroupConfigTest(absltest.TestCase):
"""Unit tests for RobotGroupConfig."""
def test_qpos_indices(self):
"""Checks defaults when nq == nv == nu."""
sim_scene = MockSimScene(nq=5, ctrl_range=(-1, 1)) # type: Any
config = RobotGroupConfig(sim_scene, qpos_indices=range(5))
result = np.array([0, 1, 2, 3, 4], dtype=int)
np.testing.assert_array_equal(config.qpos_indices, result)
np.testing.assert_array_equal(config.qvel_indices, result)
np.testing.assert_array_equal(config.actuator_indices, result)
np.testing.assert_array_equal(config.denormalize_center, np.zeros(5))
np.testing.assert_array_equal(config.denormalize_range, np.ones(5))
def test_qpos_out_of_range(self):
"""Ensures error when qpos indices are out of bounds."""
sim_scene = MockSimScene(nq=3) # type: Any
with self.assertRaises(AssertionError):
RobotGroupConfig(sim_scene, qpos_indices=[3])
with self.assertRaises(AssertionError):
RobotGroupConfig(sim_scene, qpos_indices=[-4])
def test_qvel_indices(self):
"""Checks defaults when nq == nu != nv."""
sim_scene = MockSimScene(nq=3, nv=5) # type: Any
config = RobotGroupConfig(
sim_scene, qpos_indices=[-1], qvel_indices=[3, 4])
np.testing.assert_array_equal(config.qpos_indices, [-1])
np.testing.assert_array_equal(config.qvel_indices, [3, 4])
np.testing.assert_array_equal(config.actuator_indices, [-1])
def test_qvel_out_of_range(self):
"""Ensures error when qvel indices are out of bounds."""
sim_scene = MockSimScene(nq=1, nv=3) # type: Any
with self.assertRaises(AssertionError):
RobotGroupConfig(sim_scene, qvel_indices=[3])
with self.assertRaises(AssertionError):
RobotGroupConfig(sim_scene, qvel_indices=[-4])
def test_qpos_range(self):
"""Checks presence of qpos_range when provided."""
sim_scene = MockSimScene(nq=2) # type: Any
config = RobotGroupConfig(
sim_scene, qpos_indices=[0, 1], qpos_range=[(0, 1)] * 2)
np.testing.assert_array_equal(config.qpos_range,
np.array([(0, 1), (0, 1)]))
def test_qpos_invalid_range(self):
"""Ensures error when invalid qpos range is given."""
sim_scene = MockSimScene(nq=2) # type: Any
with self.assertRaises(AssertionError):
RobotGroupConfig(
sim_scene, qpos_indices=[0, 1], qpos_range=[(0, 1)])
with self.assertRaises(AssertionError):
RobotGroupConfig(sim_scene, qpos_indices=[0], qpos_range=[(1, 0)])
def test_qvel_range(self):
"""Checks presence of qvel_range when provided."""
sim_scene = MockSimScene(nq=2) # type: Any
config = RobotGroupConfig(
sim_scene,
qpos_indices=[0],
qvel_indices=[0, 1],
qvel_range=[(0, 1)] * 2)
np.testing.assert_array_equal(config.qvel_range,
np.array([(0, 1), (0, 1)]))
def test_qvel_invalid_range(self):
"""Ensures error when invalid qvel range is given."""
sim_scene = MockSimScene(nq=2, nv=3) # type: Any
with self.assertRaises(AssertionError):
RobotGroupConfig(
sim_scene, qvel_indices=[0, 2], qvel_range=[(-1, 1)] * 3)
with self.assertRaises(AssertionError):
RobotGroupConfig(sim_scene, qvel_indices=[0], qpos_range=[(-1, -2)])
def test_actuator_range(self):
"""Checks presence of actuator_range when provided."""
sim_scene = MockSimScene(nq=2, nu=3) # type: Any
config = RobotGroupConfig(
sim_scene,
qpos_indices=[0, 1],
qpos_range=[(0, 1)] * 2,
actuator_indices=[0, 1, 2],
actuator_range=[(-1, 3)] * 3,
)
np.testing.assert_array_equal(config.actuator_range, [(-1, 3)] * 3)
np.testing.assert_array_equal(config.denormalize_center, [1.] * 3)
np.testing.assert_array_equal(config.denormalize_range, [2.] * 3)
def test_actuator_range_default(self):
"""Checks that actuator_range uses the simulation range by default."""
sim_scene = MockSimScene(nq=2) # type: Any
config = RobotGroupConfig(sim_scene, qpos_indices=[0, 1])
np.testing.assert_array_equal(config.actuator_range, [(-1, 1)] * 2)
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/reset_procedure.py | robel/utils/reset_procedure.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages resetting functionality."""
import abc
from robel.components.builder import ComponentBuilder
class ResetProcedure(metaclass=abc.ABCMeta):
"""Implements a reset procedure for a robot."""
def __init__(self):
"""Creates a new reset procedure."""
@abc.abstractmethod
def configure_reset_groups(self, builder: ComponentBuilder):
"""Configures the component groups needed for reset."""
@abc.abstractmethod
def reset(self, **kwargs):
"""Performs the reset procedure."""
def finish(self):
"""Called when the reset is complete."""
class ManualResetProcedure(ResetProcedure):
"""Reset procedure that waits for the user to press enter."""
def configure_reset_groups(self, builder: ComponentBuilder):
"""Configures the component groups needed for reset."""
def reset(self, **kwargs):
"""Performs the reset procedure."""
def finish(self):
"""Called when the reset is complete."""
input('Press Enter to start the episode...')
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/plotting.py | robel/utils/plotting.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods and classes for plotting data."""
from typing import Optional, Sequence
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class AnimatedPlot:
"""Plots data that can be updated over time."""
def __init__(self, bounds: Optional[Sequence[float]] = None):
"""Initializes a new plot."""
self.fig, self.ax = plt.subplots()
self.ani = None
self.update_fn = None
self.iteration = 0
self.elements = []
if bounds:
self.ax.axis(bounds)
self.ax.grid()
@property
def is_open(self) -> bool:
"""Returns True if the figure window is open."""
return plt.fignum_exists(self.fig.number)
def add(self, element):
"""Adds an element for animation."""
self.elements.append(element)
def show(self, frame_rate: int = 30, blocking: bool = False):
"""Displays the plot."""
self.ani = animation.FuncAnimation(
self.fig,
self._update,
interval=1000 // frame_rate,
init_func=self._init,
blit=False,
)
plt.show(block=blocking)
def refresh(self):
"""Allows the GUI to update."""
self.fig.canvas.draw()
self.fig.canvas.start_event_loop(0.0001)
def _init(self):
return self.elements
def _update(self, iteration):
self.iteration = iteration
if self.update_fn is not None:
self.update_fn() # pylint: disable=not-callable
return self.elements
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/registration.py | robel/utils/registration.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for Gym environment registration."""
import logging
from gym.envs import registration as gym_reg
def register(env_id: str, class_path: str, **kwargs):
"""Registers the given class path as a Gym environment.
Args:
env_id: The ID to register the environment as.
class_path: The fully-qualified class path of the environment.
**kwargs: Key-word arguments to pass to gym's register function.
"""
if env_id in gym_reg.registry.env_specs:
# This may happen during test discovery.
logging.warning('Re-registering environment %s', env_id)
del gym_reg.registry.env_specs[env_id]
gym_reg.register(env_id, entry_point=class_path, **kwargs)
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/configurable_test.py | robel/utils/configurable_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for configurable."""
import pickle
import tempfile
from absl.testing import absltest
from robel.utils.configurable import configurable
TEST_CONFIGS = {}
@configurable(config_cache=TEST_CONFIGS)
class DummyWithConfig(object):
def __init__(self, a=1, b=2, c=3):
self.a = a
self.b = b
self.c = c
class ChildDummyWithConfig(DummyWithConfig):
pass
@configurable(pickleable=True, config_cache=TEST_CONFIGS)
class DummyWithConfigPickleable(object):
def __init__(self, a=1, b=2, c=3):
self.a = a
self.b = b
self.c = c
class TestConfigurable(absltest.TestCase):
"""Unit tests for configurable."""
def setUp(self):
TEST_CONFIGS.clear()
def test_instance(self):
"""Tests default values."""
d = DummyWithConfig()
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 2)
self.assertEqual(d.c, 3)
def test_set_config(self):
"""Tests setting a config values."""
TEST_CONFIGS[DummyWithConfig] = {'a': 4, 'c': 5}
d = DummyWithConfig()
self.assertEqual(d.a, 4)
self.assertEqual(d.b, 2)
self.assertEqual(d.c, 5)
def test_set_config_kwargs(self):
"""Tests overriding a config with kwargs."""
TEST_CONFIGS[DummyWithConfig] = {'a': 4, 'c': 5}
d = DummyWithConfig(a=7)
self.assertEqual(d.a, 7)
self.assertEqual(d.b, 2)
self.assertEqual(d.c, 5)
def test_set_config_inheritance(self):
"""Tests config values for a child class."""
TEST_CONFIGS[ChildDummyWithConfig] = {'a': 4, 'c': 5}
d1 = ChildDummyWithConfig()
self.assertEqual(d1.a, 4)
self.assertEqual(d1.b, 2)
self.assertEqual(d1.c, 5)
d2 = DummyWithConfig()
self.assertEqual(d2.a, 1)
self.assertEqual(d2.b, 2)
self.assertEqual(d2.c, 3)
def test_pickle(self):
"""Tests loading from a pickled object."""
TEST_CONFIGS[DummyWithConfigPickleable] = {'a': 4, 'c': 5}
d = DummyWithConfigPickleable(b=8)
TEST_CONFIGS.clear()
with tempfile.TemporaryFile() as f:
pickle.dump(d, f)
f.seek(0)
d2 = pickle.load(f)
self.assertEqual(d2.a, 4)
self.assertEqual(d2.b, 8)
self.assertEqual(d2.c, 5)
def test_pickle_override(self):
"""Tests overriding serialized parameters."""
TEST_CONFIGS[DummyWithConfigPickleable] = {'a': 4, 'c': 5}
d = DummyWithConfigPickleable(c=1)
self.assertEqual(d.a, 4)
self.assertEqual(d.b, 2)
self.assertEqual(d.c, 1)
with tempfile.TemporaryFile() as f:
pickle.dump(d, f)
f.seek(0)
TEST_CONFIGS[DummyWithConfigPickleable] = {'b': 5}
d2 = pickle.load(f)
self.assertEqual(d2.a, 4)
self.assertEqual(d2.b, 5)
self.assertEqual(d2.c, 1)
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/math_utils_test.py | robel/utils/math_utils_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for math_utils."""
from absl.testing import absltest
import numpy as np
from transforms3d.euler import euler2quat, quat2euler
from robel.utils.math_utils import average_quaternions, calculate_cosine
class AverageQuaternionsTest(absltest.TestCase):
"""Tests for `average_quaternions`."""
def test_identity(self):
"""Average one quaternion should equal itself."""
test_quat = euler2quat(np.pi / 4, np.pi / 4, np.pi / 4)
avg_quat = average_quaternions([test_quat])
np.testing.assert_array_almost_equal(avg_quat, test_quat)
def test_multiple_identity(self):
"""Average multiple copies of a quaternion should equal itself."""
test_quat = euler2quat(np.pi / 4, np.pi / 4, np.pi / 4)
avg_quat = average_quaternions([test_quat, test_quat, test_quat])
np.testing.assert_array_almost_equal(avg_quat, test_quat)
def test_average_two(self):
"""Averaging two different quaternions."""
quat1 = euler2quat(np.pi / 4, 0, 0)
quat2 = euler2quat(-np.pi / 4, 0, 0)
avg_quat = average_quaternions([quat1, quat2])
result = quat2euler(avg_quat)
np.testing.assert_array_almost_equal(result, [0, 0, 0])
class CalculateCosineTest(absltest.TestCase):
"""Tests for `calculate_cosine`."""
def test_identical(self):
"""Two of the same vectors are completely aligned."""
v1 = np.array([1, 0])
self.assertAlmostEqual(calculate_cosine(v1, v1), 1)
def test_parallel(self):
"""Two parallel vectors."""
v1 = np.array([1, 2])
v2 = np.array([2, 4])
self.assertAlmostEqual(calculate_cosine(v1, v2), 1)
def test_opposite(self):
"""Two parallel vectors."""
v1 = np.array([1, 2])
v2 = np.array([-1, -2])
self.assertAlmostEqual(calculate_cosine(v1, v2), -1)
def test_orthogonal(self):
"""Two orthogonal vectors."""
v1 = np.array([1, 1])
v2 = np.array([1, -1])
self.assertAlmostEqual(calculate_cosine(v1, v2), 0)
def test_batched(self):
"""Multiple vectors."""
v1 = np.array([[1, 1], [2, 2]])
v2 = np.array([[1, -1], [3, 3]])
np.testing.assert_array_almost_equal(calculate_cosine(v1, v2), [0, 1])
def test_zero(self):
"""Tests when the norm is 0."""
v1 = np.array([1, 0])
v2 = np.array([0, 0])
self.assertAlmostEqual(calculate_cosine(v1, v2), 0)
def test_zero_batched(self):
"""Tests when the norm is 0."""
v1 = np.array([[1, 0], [1, 1]])
v2 = np.array([[0, 0], [2, 2]])
np.testing.assert_array_almost_equal(calculate_cosine(v1, v2), [0, 1])
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/resources.py | robel/utils/resources.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods to locate asset files."""
import collections
import logging
import os
import shutil
import tempfile
from typing import Callable, Dict, Optional
_MODULE_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
_SCENES_DIR_COMPONENT = 'robel'
def get_asset_path(path: str):
"""Returns the absolute path of the given fully-qualified resource path.
Example:
>>> get_asset_path('robel/dclaw/assets/')
Args:
path: The path to the resource, with components separated by slashes.
"""
if path.startswith('robel-scenes'):
asset_path = os.path.join(_MODULE_DIR, _SCENES_DIR_COMPONENT, path)
elif path.startswith('robel'):
asset_path = os.path.join(_MODULE_DIR, path)
else:
raise ValueError('Unknown path root: ' + path)
asset_path = os.path.normpath(asset_path)
return asset_path
def get_resource(path: str, mode: str = 'rb'):
"""Returns the contents of the given resource file path."""
with open(path, mode=mode) as f:
return f.read()
class AssetBundle:
"""Represents a bundle of assets files"""
def __init__(self,
dest_path: Optional[str] = None,
resource_fn: Callable[[str], bytes] = get_resource,
dry_run: bool = False,
verbose: bool = False):
"""Creates a new asset bundle.
Args:
dest_path: The destination directory to copy the bundle to.
resource_fn: The function used to get the contents of the file.
dry_run: If True, does not write files to the destination.
verbose: If True, logs copied files.
"""
self._resource_fn = resource_fn
self._dry_run = dry_run
self._verbose = verbose
self._copied_resources = collections.OrderedDict()
self._needs_cleanup = False
if dest_path is None and not dry_run:
dest_path = tempfile.mkdtemp()
self._needs_cleanup = True
self._dest_path = dest_path or ''
@property
def copied_paths(self) -> Dict[str, str]:
"""Returns the copied resource paths."""
return self._copied_resources
def cleanup(self):
"""Removes the temporary directory."""
if self._needs_cleanup and self._dest_path:
shutil.rmtree(self._dest_path)
self._needs_cleanup = False
def add_mujoco(self, main_path: str) -> str:
"""Adds the given MuJoCo XML file to the bundle."""
from xml.etree import ElementTree as etree
main_path = os.path.normpath(main_path)
main_dir = os.path.dirname(main_path)
directory_context = {
'mesh': main_dir,
'texture': main_dir,
}
# Traverse the XML tree depth-first.
node_stack = []
node_stack.append((directory_context, main_path))
while node_stack:
directories, file_path = node_stack.pop()
base_dir = os.path.dirname(file_path)
xml_contents = self._copy_asset(file_path)
node = etree.fromstring(xml_contents)
children = []
# Update the directories if a compiler tag is present.
for child in node.iter('compiler'):
if 'meshdir' in child.attrib:
directories['mesh'] = os.path.join(base_dir,
child.attrib['meshdir'])
if 'texturedir' in child.attrib:
directories['texture'] = os.path.join(
base_dir, child.attrib['texturedir'])
for child in node.iter():
# Resolve mesh and texture children with file tags.
if child.tag in directories:
if 'file' in child.attrib:
asset_path = os.path.join(directories[child.tag],
child.attrib['file'])
if asset_path not in self._copied_resources:
self._copy_asset(asset_path)
# Traverse includes.
elif child.tag == 'include':
child_path = os.path.join(base_dir, child.attrib['file'])
children.append((directories.copy(), child_path))
# Traverse children in visit order.
node_stack.extend(reversed(children))
return self._copied_resources[main_path]
def _copy_asset(self, asset_path: str) -> bytes:
"""Copies an asset and returns its contents."""
assert not asset_path.startswith('/'), asset_path
asset_path = os.path.normpath(asset_path)
if self._verbose:
logging.info('Found asset: %s', asset_path)
contents = self._resource_fn(asset_path)
# Copy the asset to the destination.
if asset_path not in self._copied_resources:
copy_path = os.path.join(self._dest_path, asset_path)
if not self._dry_run:
self._write_asset(copy_path, contents)
self._copied_resources[asset_path] = copy_path
return contents
def _write_asset(self, write_path: str, contents: bytes):
"""Writes the contents to the given path."""
copy_dir = os.path.dirname(write_path)
if not os.path.isdir(copy_dir):
os.makedirs(copy_dir)
with open(write_path, 'wb') as f:
f.write(contents)
def __enter__(self):
"""Enables use as a context manager."""
return self
def __exit__(self, *args):
"""Enables use as a context manager."""
self.cleanup()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/math_utils.py | robel/utils/math_utils.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions relating to math."""
import logging
from typing import Sequence
import numpy as np
def average_quaternions(quaternions: Sequence[np.ndarray]) -> np.ndarray:
"""Returns the average of the given quaternions.
Args:
quaternions: A list of quaternions to average.
Returns:
The averaged quaternion.
"""
# Implements the algorithm from:
# Markley, F. L., Cheng, Y., Crassidis, J. L., & Oshman, Y. (2007).
# Averaging quaternions. Journal of Guidance, Control, and Dynamics,
# 30(4), 1193-1197.
n_quat = len(quaternions)
assert n_quat > 0, 'Must provide at least one quaternion.'
weight = 1.0 / n_quat # Uniform weighting for all quaternions.
q_matrix = np.vstack(quaternions)
assert q_matrix.shape == (n_quat, 4)
m_matrix = np.matmul(weight * np.transpose(q_matrix), q_matrix)
_, eig_vecs = np.linalg.eigh(m_matrix)
# The final eigenvector corresponds to the largest eigenvalue.
return eig_vecs[:, -1]
def calculate_cosine(vec1: np.ndarray, vec2: np.ndarray) -> np.ndarray:
"""Calculates the cosine angle between two vectors.
This computes cos(theta) = dot(v1, v2) / (norm(v1) * norm(v2))
Args:
vec1: The first vector. This can have a batch dimension.
vec2: The second vector. This can have a batch dimension.
Returns:
The cosine angle between the two vectors, with the same batch dimension
as the given vectors.
"""
if np.shape(vec1) != np.shape(vec2):
raise ValueError('{} must have the same shape as {}'.format(vec1, vec2))
ndim = np.ndim(vec1)
if ndim < 1 or ndim > 2:
raise ValueError('{} must be 1 or 2 dimensions'.format(vec1))
axis = 1 if ndim == 2 else 0
norm_product = (
np.linalg.norm(vec1, axis=axis) * np.linalg.norm(vec2, axis=axis))
zero_norms = norm_product == 0
if np.any(zero_norms):
logging.warning(
'%s or %s is all 0s; this may be normal during initialization.',
str(vec1), str(vec2))
if ndim == 2:
norm_product[zero_norms] = 1
else:
norm_product = 1
# Return the batched dot product.
return np.einsum('...i,...i', vec1, vec2) / norm_product
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/__init__.py | robel/utils/__init__.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/resources_test.py | robel/utils/resources_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for resources."""
from absl.testing import absltest
from robel.utils.resources import AssetBundle
class DummyResources:
"""Dummy cache of resources."""
def __init__(self, assets):
self.assets = assets
def get_resource(self, path: str):
return self.assets[path]
class TestAssetBundle(absltest.TestCase):
"""Unit tests for configurable."""
def test_add_mujoco(self):
"""Tests adding a MuJoCo file."""
resources = DummyResources({
'a/b/main.xml': '<mujoco><include file="../child1.xml"/></mujoco>',
'a/child1.xml': """
<mujoco>
<compiler meshdir="c"/>
<asset>
<mesh name="a1" file="hello.stl"/>
<mesh name="a2" file="world.stl"/>
</asset>
</mujoco>
""",
'a/c/hello.stl': 'Hello!',
'a/c/world.stl': 'World!',
})
bundle = AssetBundle(
dest_path='test', dry_run=True, resource_fn=resources.get_resource)
transformed_path = bundle.add_mujoco('a/b/main.xml')
self.assertEqual(transformed_path, 'test/a/b/main.xml')
self.assertDictEqual(
bundle.copied_paths, {
'a/b/main.xml': 'test/a/b/main.xml',
'a/child1.xml': 'test/a/child1.xml',
'a/c/hello.stl': 'test/a/c/hello.stl',
'a/c/world.stl': 'test/a/c/world.stl',
})
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/configurable.py | robel/utils/configurable.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorator for passing parameters to environments.
This allows parameterizing environments via `gym.make` for older versions of
gym.
NOTE: From gym 0.12 and onwards, `gym.make` accepts `kwargs`.
Example usage:
>>> robel.set_env_params(
... 'DClawTurnStatic-v0', {'device_path': '/dev/ttyUSB0'})
>>> env = gym.make('DClawTurnStatic-v0')
"""
import importlib
import inspect
import logging
from typing import Any, Dict, Optional, Type
from gym.envs.registration import registry as gym_registry
# Global mapping of environment class to parameters.
_ENV_PARAMS = {}
def set_env_params(env_id: str, params: Dict[str, Any]):
"""Sets the parameters for the given environment ID."""
if env_id not in gym_registry.env_specs:
raise ValueError('Unregistered environment ID: {}'.format(env_id))
spec = gym_registry.env_specs[env_id]
# Fallback compatibility for older gym versions.
entry_point = getattr(spec, "entry_point",
getattr(spec, "_entry_point", None))
assert entry_point is not None
if not callable(entry_point):
assert isinstance(entry_point, str)
# Get the class handle of the entry-point string.
module_path, class_name = entry_point.split(":")
module = importlib.import_module(module_path)
entry_point = getattr(module, class_name)
_ENV_PARAMS[entry_point] = params
def configurable(pickleable: bool = False,
config_cache: Optional[Dict[Type, Dict[str, Any]]] = None):
"""Class decorator to allow injection of constructor arguments.
Example usage:
>>> @configurable()
... class A:
... def __init__(self, b=None, c=2, d='Wow'):
... ...
>>> set_env_params(A, {'b': 10, 'c': 20})
>>> a = A() # b=10, c=20, d='Wow'
>>> a = A(b=30) # b=30, c=20, d='Wow'
TODO(michaelahn): Add interop with gin-config.
Args:
pickleable: Whether this class is pickleable. If true, causes the pickle
state to include the config and constructor arguments.
config_cache: The dictionary of stored environment parameters to use.
If not explicitly provided, uses the default global dictionary.
"""
# pylint: disable=protected-access,invalid-name
if config_cache is None:
config_cache = _ENV_PARAMS
def cls_decorator(cls):
assert inspect.isclass(cls)
# Overwrite the class constructor to pass arguments from the config.
base_init = cls.__init__
def __init__(self, *args, **kwargs):
config = config_cache.get(type(self), {})
# Allow kwargs to override the config.
kwargs = {**config, **kwargs}
logging.debug('Initializing %s with params: %s',
type(self).__name__, str(kwargs))
if pickleable:
self._pkl_env_args = args
self._pkl_env_kwargs = kwargs
base_init(self, *args, **kwargs)
cls.__init__ = __init__
# If the class is pickleable, overwrite the state methods to save
# the constructor arguments and config.
if pickleable:
# Use same pickle keys as gym.utils.ezpickle for backwards compat.
PKL_ARGS_KEY = '_ezpickle_args'
PKL_KWARGS_KEY = '_ezpickle_kwargs'
def __getstate__(self):
return {
PKL_ARGS_KEY: self._pkl_env_args,
PKL_KWARGS_KEY: self._pkl_env_kwargs,
}
cls.__getstate__ = __getstate__
def __setstate__(self, data):
saved_args = data[PKL_ARGS_KEY]
saved_kwargs = data[PKL_KWARGS_KEY]
# Override the saved state with the current config.
config = config_cache.get(type(self), {})
kwargs = {**saved_kwargs, **config}
inst = type(self)(*saved_args, **kwargs)
self.__dict__.update(inst.__dict__)
cls.__setstate__ = __setstate__
return cls
# pylint: enable=protected-access,invalid-name
return cls_decorator
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/testing/mock_time.py | robel/utils/testing/mock_time.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to mock time-related methods."""
from absl.testing.absltest import mock
class MockTime:
"""Class to mock the functionality of the time module."""
def __init__(self, initial_time: float = 0.0):
self._time = initial_time
def time(self) -> float:
return self._time
def sleep(self, duration: float):
self._time += duration
def patch_time(module_path: str, **kwargs):
return mock.patch(module_path, MockTime(**kwargs))
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/testing/mock_dynamixel_sdk.py | robel/utils/testing/mock_dynamixel_sdk.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocks the DynamixelSDK Python API."""
# DynamixelSDK conforms to a different naming convention.
# pylint: disable=invalid-name
import sys
from typing import Iterable
from absl.testing.absltest import mock
import numpy as np
from robel.components.robot.dynamixel_client import ADDR_TORQUE_ENABLE
CONTROL_TABLE_SIZE = 148
class MockDynamixelSdk:
"""Mock class for the DynamixelSDK."""
def __init__(self):
self.available_ports = set()
self.used_ports = set()
self.devices = {}
self.has_opened_port = False
# dynamixel_sdk constants.
self.COMM_SUCCESS = True
def create_device(self, port: str, motor_ids: Iterable[int]):
"""Creates a fake device."""
assert port not in self.available_ports
self.available_ports.add(port)
self.devices[port] = {
motor_id: np.zeros(CONTROL_TABLE_SIZE, dtype=np.uint8)
for motor_id in motor_ids
}
def get_enabled_motors(self, port: str) -> Iterable[int]:
"""Returns the enabled motor IDs for the given port."""
motor_ids = []
for motor_id, control_table in self.devices[port].items():
if control_table[ADDR_TORQUE_ENABLE] == 1:
motor_ids.append(motor_id)
return motor_ids
def PortHandler(self, port: str):
"""Returns a mock port handler."""
if port not in self.available_ports:
raise ValueError('Unknown port: {}'.format(port))
if port in self.used_ports:
raise ValueError('Port in use: {}'.format(port))
# dynamixel_sdk has an undocumented behavior that all PortHandlers must
# be created before openPort is called, or else port numbers are reused.
# Enforce this in tests by erroring.
if self.has_opened_port:
raise ValueError(
'Must create all PortHandlers before openPort is called')
self.used_ports.add(port)
handler = mock.Mock(spec=[])
def openPort():
self.has_opened_port = True
handler.is_open = True
return not handler.faulty
def closePort():
handler.is_open = False
handler.is_open = False
handler.is_using = False
handler.faulty = False
handler.openPort = mock.Mock(side_effect=openPort)
handler.closePort = mock.Mock(side_effect=closePort)
handler.setBaudRate = mock.Mock(
side_effect=lambda _: not handler.faulty)
handler.device = self.devices[port]
return handler
def PacketHandler(self, protocol_version: int = 2.0):
"""Returns a mock port handler."""
handler = mock.Mock(spec=[])
handler.protocol_version = protocol_version
handler.faulty_comm = False
handler.faulty_packets = False
def write1ByteTxRx(port_handler, motor_id: int, address: int,
value: int):
if motor_id not in port_handler.device:
raise ValueError('Invalid motor ID: {}'.format(motor_id))
port_handler.device[motor_id][address] = value
return not handler.faulty_comm, not handler.faulty_packets
handler.write1ByteTxRx = mock.Mock(side_effect=write1ByteTxRx)
handler.getTxRxResult = mock.Mock(
side_effect=lambda success: 'Error!' if not success else None)
handler.getRxPacketError = handler.getTxRxResult
return handler
def GroupBulkRead(self, port_handler, unused_packet_handler):
"""Returns a mock bulk read operation."""
op = mock.Mock(spec=[])
op.params = {}
device = port_handler.device
def addParam(motor_id: int, address: int, size: int):
if motor_id not in device or motor_id in op.params:
return False
assert address + size <= device[motor_id].size
op.params[motor_id] = (address, size)
return True
op.faulty = False
op.unavailable_ids = set()
op.addParam = mock.Mock(side_effect=addParam)
op.txRxPacket = mock.Mock(side_effect=lambda: not op.faulty)
def isAvailable(motor_id: int, address: int, size: int):
assert motor_id in op.params
if motor_id in op.unavailable_ids:
return False
assert address + size <= device[motor_id].size
return True
op.isAvailable = mock.Mock(side_effect=isAvailable)
def getData(motor_id: int, address: int, size: int):
assert motor_id in op.params
assert motor_id in device
assert size in (1, 2, 4)
data = device[motor_id][address:address + size]
return int.from_bytes(data.tobytes(), byteorder='little')
op.getData = mock.Mock(side_effect=getData)
return op
def GroupSyncWrite(self, port_handler, unused_packet_handler, address: int,
size: int):
"""Returns a mock sync write operation."""
op = mock.Mock(spec=[])
op.params = set()
device = port_handler.device
def addParam(motor_id: int, value: bytes):
if motor_id not in device or motor_id in op.params:
return False
if len(value) != size:
raise ValueError('Incorrect size for value: {}'.format(value))
device[motor_id][address:address + size] = list(value)
op.params.add(motor_id)
return True
op.txPacket = mock.Mock(return_value=True)
op.addParam = mock.Mock(side_effect=addParam)
op.clearParam = mock.Mock(side_effect=op.params.clear)
return op
def patch_dynamixel(**devices):
"""Decorator that patches the DynamixelSDK for the function context."""
def decorator(fn):
def wrapped_fn(*args):
sdk = MockDynamixelSdk()
for key, motor_ids in devices.items():
sdk.create_device(key, motor_ids)
sys.modules['dynamixel_sdk'] = sdk
fn(*args, sdk)
del sys.modules['dynamixel_sdk']
return wrapped_fn
return decorator
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/testing/mock_sim_scene.py | robel/utils/testing/mock_sim_scene.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock SimScene for testing."""
import contextlib
from typing import Iterable, Optional, Tuple
from absl.testing.absltest import mock
import numpy as np
class MockMjData:
"""Mock container for Mujoco data."""
def __init__(self, nq: int, nv: int, nu: int):
self.qpos = np.zeros(nq, dtype=np.float32)
self.qvel = np.zeros(nv, dtype=np.float32)
self.qacc = np.zeros(nv, dtype=np.float32)
self.ctrl = np.zeros(nu, dtype=np.float32)
class MockMjModel:
"""Mock container for a Mujoco model."""
# Properties of the model, mapped to the size of the property.
PROPERTIES = {
'body': {
'pos': 3,
'quat': 4,
},
'geom': {
'pos': 3,
'quat': 4,
'size': 3,
},
'site': {
'pos': 3,
'quat': 4,
'size': 3,
},
'cam': {
'pos': 3,
'quat': 4,
}
}
def __init__(self,
nq: int,
nv: Optional[int] = None,
nu: Optional[int] = None,
ctrl_range: Optional[Tuple[float]] = None,
body_names: Optional[Iterable[str]] = None,
geom_names: Optional[Iterable[str]] = None,
site_names: Optional[Iterable[str]] = None,
cam_names: Optional[Iterable[str]] = None):
if nv is None:
nv = nq
if nu is None:
nu = nq
self.nq = nq
self.nv = nv
self.nu = nu
self.body_names = body_names or []
self.geom_names = geom_names or []
self.site_names = site_names or []
self.cam_names = cam_names or []
self.data = MockMjData(nq, nv, nu)
if ctrl_range is None:
ctrl_range = (-1, 1)
self.actuator_ctrlrange = np.tile(ctrl_range, (self.nu, 1))
# Generate the properties.
for parent_key, sub_properties in self.PROPERTIES.items():
names = getattr(self, parent_key + '_names')
element_count = len(names)
# Add the count property.
setattr(self, 'n' + parent_key, element_count)
# Add mujoco_py's `*_name2id` method.
mapping = {name: i for i, name in enumerate(names)}
def name2id(name: str, parent=parent_key, key_map=mapping):
if name not in key_map:
raise ValueError('No {} exists with name {}'.format(
parent, name))
return key_map[name]
if parent_key == 'cam': # mujoco-py is inconsistent for camera.
fn_name = 'camera_name2id'
else:
fn_name = parent_key + '_name2id'
setattr(self, fn_name, name2id)
# Create the child-property arrays.
for child_key, size in sub_properties.items():
attr_name = '{}_{}'.format(parent_key, child_key)
setattr(self, attr_name,
np.zeros((element_count, size), dtype=np.float32))
class MockSimScene:
"""Mock object that implements the SimScene interface."""
@staticmethod
def create(*unused_args, **unused_kwargs):
raise NotImplementedError('`patch_sim_scene` must be called.')
def __init__(self, *args, step_duration: float = 1, **kwargs):
"""Initializes a new mock SimScene."""
self.sim = mock.Mock()
self.sim.model = MockMjModel(*args, **kwargs)
self.model = self.sim.model
self.sim.data = self.model
self.data = self.model.data
self.step_duration = step_duration
self.close = mock.Mock()
self.advance = mock.Mock()
self.renderer = mock.Mock()
self.renderer.render_offscreen = lambda w, h, **_: np.zeros((w, h))
@contextlib.contextmanager
def patch_sim_scene(module_path: str, **kwargs):
"""Patches the SimScene class in the given module.
Args:
module_path: The path to the SimScene class to mock.
**kwargs: Arguments passed to MockSimScene when `SimScene.create` is
called.
"""
with mock.patch(module_path, MockSimScene) as mock_sim_cls:
mock_sim_cls.create = lambda *_args, **_kwargs: MockSimScene(**kwargs)
yield mock_sim_cls
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/testing/mock_sim_scene_test.py | robel/utils/testing/mock_sim_scene_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for MockSimScene."""
from absl.testing import absltest
import numpy as np
from robel.utils.testing.mock_sim_scene import MockSimScene
class MockSimSceneTest(absltest.TestCase):
"""Tests MockSimScene."""
def test_defaults(self):
"""Tests default initialization of the sim scene."""
scene = MockSimScene(nq=2)
# Ensure that properties exist.
self.assertIsNotNone(scene.sim)
self.assertIsNotNone(scene.model)
self.assertIsNotNone(scene.data)
self.assertIsNotNone(scene.step_duration)
self.assertIsNotNone(scene.close)
self.assertIsNotNone(scene.advance)
np.testing.assert_array_equal(scene.model.actuator_ctrlrange, [(-1, 1),
(-1, 1)])
# Check that sizes are consistent.
self.assertEqual(scene.model.nq, 2)
self.assertEqual(scene.model.nv, 2)
self.assertEqual(scene.model.nu, 2)
self.assertEqual(scene.data.qpos.size, 2)
self.assertEqual(scene.data.qvel.size, 2)
self.assertEqual(scene.data.qacc.size, 2)
self.assertEqual(scene.data.ctrl.size, 2)
def test_explicit_init(self):
"""Tests initialization with explicit values."""
scene = MockSimScene(
nq=2,
nv=3,
nu=4,
ctrl_range=(-2, 2),
body_names=['test0'],
geom_names=['test0', 'test1'],
site_names=['test0', 'test1', 'test2'],
cam_names=['cam0'],
step_duration=0.5)
self.assertEqual(scene.data.qpos.size, 2)
self.assertEqual(scene.data.qvel.size, 3)
self.assertEqual(scene.data.qacc.size, 3)
self.assertEqual(scene.data.ctrl.size, 4)
np.testing.assert_array_equal(scene.model.actuator_ctrlrange,
[(-2, 2)] * 4)
np.testing.assert_array_equal(scene.model.body_pos, np.zeros((1, 3)))
np.testing.assert_array_equal(scene.model.geom_pos, np.zeros((2, 3)))
np.testing.assert_array_equal(scene.model.site_pos, np.zeros((3, 3)))
np.testing.assert_array_equal(scene.model.cam_pos, np.zeros((1, 3)))
self.assertEqual(scene.model.body_name2id('test0'), 0)
self.assertEqual(scene.model.geom_name2id('test1'), 1)
self.assertEqual(scene.model.site_name2id('test2'), 2)
self.assertEqual(scene.model.camera_name2id('cam0'), 0)
def test_render_offscreen(self):
"""Tests mock rendering."""
scene = MockSimScene(nq=1)
image = scene.renderer.render_offscreen(16, 16)
self.assertEqual(image.shape, (16, 16))
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/testing/mock_dynamixel_sdk_test.py | robel/utils/testing/mock_dynamixel_sdk_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for MockDynamixelSdk."""
from absl.testing import absltest
from robel.utils.testing.mock_dynamixel_sdk import patch_dynamixel
class MockDynamixelSdkTest(absltest.TestCase):
"""Tests MockDynamixelSdk."""
@patch_dynamixel(test=[1, 2, 3, 4])
def test_port_handler(self, sdk):
handler = sdk.PortHandler('test')
self.assertTrue(handler.openPort())
self.assertTrue(handler.setBaudRate(1000000))
handler.closePort()
@patch_dynamixel(test=[1, 2, 3, 4], test1=[1, 2])
def test_port_handler_fault(self, sdk):
with self.assertRaises(ValueError):
sdk.PortHandler('another')
handler = sdk.PortHandler('test')
with self.assertRaises(ValueError):
sdk.PortHandler('test')
handler.faulty = True
self.assertFalse(handler.openPort())
with self.assertRaises(ValueError):
sdk.PortHandler('test1')
self.assertFalse(handler.setBaudRate(1000000))
self.assertIn('test', sdk.used_ports)
self.assertNotIn('test1', sdk.used_ports)
@patch_dynamixel(test1=[1, 2, 3, 4], test2=[1, 2, 3, 4, 5, 6])
def test_port_handler_multi(self, sdk):
handler1 = sdk.PortHandler('test1')
handler2 = sdk.PortHandler('test2')
self.assertTrue(handler1.openPort())
self.assertTrue(handler2.openPort())
@patch_dynamixel(test=[1])
def test_packet_handler(self, sdk):
port = sdk.PortHandler('test')
packet = sdk.PacketHandler()
packet.write1ByteTxRx(port, 1, 64, 2)
self.assertEqual(sdk.devices['test'][1][64], 2)
@patch_dynamixel(test=[1, 2, 3, 4])
def test_read_write(self, sdk):
motor_ids = [1, 2, 3, 4]
port = sdk.PortHandler('test')
packet = sdk.PacketHandler()
self.assertTrue(port.openPort())
self.assertTrue(port.setBaudRate(1000000))
reader = sdk.GroupBulkRead(port, packet)
writer = sdk.GroupSyncWrite(port, packet, 32, 4)
for mid in motor_ids:
self.assertTrue(reader.addParam(mid, 32, 4))
for mid in motor_ids:
self.assertTrue(reader.isAvailable(mid, 32, 4))
for mid in motor_ids:
self.assertEqual(reader.getData(mid, 32, 4), 0)
payload = 12345678
for mid in motor_ids:
self.assertTrue(writer.addParam(mid, payload.to_bytes(4, 'little')))
self.assertTrue(writer.txPacket())
for mid in motor_ids:
self.assertTrue(reader.getData(mid, 32, 4), payload)
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/utils/testing/__init__.py | robel/utils/testing/__init__.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/screw_test.py | robel/dclaw/screw_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for D'Claw screw tasks."""
from absl.testing import absltest
from absl.testing import parameterized
import gym
import numpy as np
from robel.dclaw.screw import (DClawScrewFixed, DClawScrewRandom,
DClawScrewRandomDynamics)
# pylint: disable=no-member
@parameterized.parameters(
('DClawScrewFixed-v0', DClawScrewFixed),
('DClawScrewRandom-v0', DClawScrewRandom),
('DClawScrewRandomDynamics-v0', DClawScrewRandomDynamics),
)
class DClawScrewTest(absltest.TestCase):
"""Unit test class for RobotEnv."""
def test_gym_make(self, env_id, env_cls):
"""Accesses the sim, model, and data properties."""
env = gym.make(env_id)
self.assertIsInstance(env.unwrapped, env_cls)
def test_spaces(self, _, env_cls):
"""Checks the observation, action, and state spaces."""
env = env_cls()
observation_size = np.sum([
9, # claw_qpos
1, # object_x
1, # object_y
9, # last_action
1, # target_error
])
self.assertEqual(env.observation_space.shape, (observation_size,))
self.assertEqual(env.action_space.shape, (9,))
self.assertEqual(env.state_space['claw_qpos'].shape, (9,))
self.assertEqual(env.state_space['claw_qvel'].shape, (9,))
self.assertEqual(env.state_space['object_qpos'].shape, (1,))
self.assertEqual(env.state_space['object_qvel'].shape, (1,))
def test_reset_step(self, _, env_cls):
"""Checks that resetting and stepping works."""
env = env_cls()
env.reset()
env.step(env.action_space.sample())
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/turn.py | robel/dclaw/turn.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Turn tasks with DClaw robots.
This is a single rotation of an object from an initial angle to a target angle.
"""
import abc
import collections
from typing import Dict, Optional, Sequence
import numpy as np
from transforms3d.euler import euler2quat
from robel.components.robot.dynamixel_robot import DynamixelRobotState
from robel.dclaw.base_env import BaseDClawObjectEnv
from robel.simulation.randomize import SimRandomizer
from robel.utils.configurable import configurable
from robel.utils.resources import get_asset_path
# The observation keys that are concatenated as the environment observation.
DEFAULT_OBSERVATION_KEYS = (
'claw_qpos',
'object_x',
'object_y',
'last_action',
'target_error',
)
# Reset pose for the claw joints.
RESET_POSE = [0, -np.pi / 3, np.pi / 3] * 3
DCLAW3_ASSET_PATH = 'robel/dclaw/assets/dclaw3xh_valve3_v0.xml'
class BaseDClawTurn(BaseDClawObjectEnv, metaclass=abc.ABCMeta):
"""Shared logic for DClaw turn tasks."""
def __init__(self,
asset_path: str = DCLAW3_ASSET_PATH,
observation_keys: Sequence[str] = DEFAULT_OBSERVATION_KEYS,
frame_skip: int = 40,
interactive: bool = False,
success_threshold: float = 0.1,
**kwargs):
"""Initializes the environment.
Args:
asset_path: The XML model file to load.
observation_keys: The keys in `get_obs_dict` to concatenate as the
observations returned by `step` and `reset`.
frame_skip: The number of simulation steps per environment step.
interactive: If True, allows the hardware guide motor to freely
rotate and its current angle is used as the goal.
success_threshold: The difference threshold (in radians) of the
object position and the goal position within which we consider
as a sucesss.
"""
super().__init__(
sim_model=get_asset_path(asset_path),
observation_keys=observation_keys,
frame_skip=frame_skip,
**kwargs)
self._interactive = interactive
self._success_threshold = success_threshold
self._desired_claw_pos = RESET_POSE
self._target_bid = self.model.body_name2id('target')
# The following are modified (possibly every reset) by subclasses.
self._initial_object_pos = 0
self._initial_object_vel = 0
self._set_target_object_pos(0)
def _reset(self):
"""Resets the environment."""
self._reset_dclaw_and_object(
claw_pos=RESET_POSE,
object_pos=self._initial_object_pos,
object_vel=self._initial_object_vel,
guide_pos=self._target_object_pos)
# Disengage the motor.
if self._interactive and self.robot.is_hardware:
self.robot.set_motors_engaged('guide', False)
def _step(self, action: np.ndarray):
"""Applies an action to the robot."""
self.robot.step({
'dclaw': action,
'guide': np.atleast_1d(self._target_object_pos),
})
def get_obs_dict(self) -> Dict[str, np.ndarray]:
"""Returns the current observation of the environment.
Returns:
A dictionary of observation values. This should be an ordered
dictionary if `observation_keys` isn't set.
"""
claw_state, object_state, guide_state = self.robot.get_state(
['dclaw', 'object', 'guide'])
# If in interactive mode, use the guide motor position as the goal.
if self._interactive:
self._set_target_object_pos(guide_state.qpos)
# Calculate the signed angle difference to the target in [-pi, pi].
target_error = self._target_object_pos - object_state.qpos
target_error = np.mod(target_error + np.pi, 2 * np.pi) - np.pi
obs_dict = collections.OrderedDict((
('claw_qpos', claw_state.qpos),
('claw_qvel', claw_state.qvel),
('object_x', np.cos(object_state.qpos)),
('object_y', np.sin(object_state.qpos)),
('object_qvel', object_state.qvel),
('last_action', self._get_last_action()),
('target_error', target_error),
))
# Add hardware-specific state if present.
if isinstance(claw_state, DynamixelRobotState):
obs_dict['claw_current'] = claw_state.current
return obs_dict
def get_reward_dict(
self,
action: np.ndarray,
obs_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns the reward for the given action and observation."""
target_dist = np.abs(obs_dict['target_error'])
claw_vel = obs_dict['claw_qvel']
reward_dict = collections.OrderedDict((
# Penalty for distance away from goal.
('target_dist_cost', -5 * target_dist),
# Penalty for difference with nomimal pose.
('pose_diff_cost',
-1 * np.linalg.norm(obs_dict['claw_qpos'] - self._desired_claw_pos)
),
# Penality for high velocities.
('joint_vel_cost',
-1 * np.linalg.norm(claw_vel[np.abs(claw_vel) >= 0.5])),
# Reward for close proximity with goal.
('bonus_small', 10 * (target_dist < 0.25)),
('bonus_big', 50 * (target_dist < 0.10)),
))
return reward_dict
def get_score_dict(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns a standardized measure of success for the environment."""
target_dist = np.abs(obs_dict['target_error'])
score_dict = collections.OrderedDict((
('points', 1.0 - target_dist / np.pi),
('success', target_dist < self._success_threshold),
))
score_dict.update(
self._get_safety_scores(
pos=obs_dict['claw_qpos'],
vel=obs_dict['claw_qvel'],
current=obs_dict.get('claw_current'),
))
return score_dict
def _set_target_object_pos(self, target_pos: float,
unbounded: bool = False):
"""Sets the goal angle to the given position."""
# Modulo to [-pi, pi].
if not unbounded:
target_pos = np.mod(target_pos + np.pi, 2 * np.pi) - np.pi
self._target_object_pos = np.asarray(target_pos, dtype=np.float32)
# Mark the target position in sim.
# WARNING: euler2quat will mutate a passed numpy array.
self.model.body_quat[self._target_bid] = euler2quat(
0, 0, float(target_pos))
@configurable(pickleable=True)
class DClawTurnFixed(BaseDClawTurn):
"""Turns the object with a fixed initial and fixed target position."""
def _reset(self):
# Turn from 0 degrees to 180 degrees.
self._initial_object_pos = 0
self._set_target_object_pos(np.pi)
super()._reset()
@configurable(pickleable=True)
class DClawTurnRandom(BaseDClawTurn):
"""Turns the object with a random initial and random target position."""
def _reset(self):
# Initial position is +/- 60 degrees.
self._initial_object_pos = self.np_random.uniform(
low=-np.pi / 3, high=np.pi / 3)
# Target position is 180 +/- 60 degrees.
self._set_target_object_pos(
np.pi + self.np_random.uniform(low=-np.pi / 3, high=np.pi / 3))
super()._reset()
@configurable(pickleable=True)
class DClawTurnRandomDynamics(DClawTurnRandom):
"""Turns the object with a random initial and random target position.
The dynamics of the simulation are randomized each episode.
"""
def __init__(self,
*args,
sim_observation_noise: Optional[float] = 0.05,
**kwargs):
super().__init__(
*args, sim_observation_noise=sim_observation_noise, **kwargs)
self._randomizer = SimRandomizer(self)
self._dof_indices = (
self.robot.get_config('dclaw').qvel_indices.tolist() +
self.robot.get_config('object').qvel_indices.tolist())
def _reset(self):
# Randomize joint dynamics.
self._randomizer.randomize_dofs(
self._dof_indices,
damping_range=(0.005, 0.1),
friction_loss_range=(0.001, 0.005),
)
self._randomizer.randomize_actuators(
all_same=True,
kp_range=(1, 3),
)
# Randomize friction on all geoms in the scene.
self._randomizer.randomize_geoms(
all_same=True,
friction_slide_range=(0.8, 1.2),
friction_spin_range=(0.003, 0.007),
friction_roll_range=(0.00005, 0.00015),
)
self._randomizer.randomize_bodies(
['mount'],
position_perturb_range=(-0.01, 0.01),
)
self._randomizer.randomize_geoms(
['mount'],
color_range=(0.2, 0.9),
)
self._randomizer.randomize_geoms(
parent_body_names=['valve'],
color_range=(0.2, 0.9),
)
super()._reset()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/turn_test.py | robel/dclaw/turn_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for D'Claw turn tasks."""
from absl.testing import absltest
from absl.testing import parameterized
import gym
import numpy as np
from robel.dclaw.turn import (DClawTurnFixed, DClawTurnRandom,
DClawTurnRandomDynamics)
# pylint: disable=no-member
@parameterized.parameters(
('DClawTurnFixed-v0', DClawTurnFixed),
('DClawTurnRandom-v0', DClawTurnRandom),
('DClawTurnRandomDynamics-v0', DClawTurnRandomDynamics),
)
class DClawTurnTest(absltest.TestCase):
"""Unit test class for RobotEnv."""
def test_gym_make(self, env_id, env_cls):
"""Accesses the sim, model, and data properties."""
env = gym.make(env_id)
self.assertIsInstance(env.unwrapped, env_cls)
def test_spaces(self, _, env_cls):
"""Checks the observation, action, and state spaces."""
env = env_cls()
observation_size = np.sum([
9, # claw_qpos
1, # object_x
1, # object_y
9, # last_action
1, # target_error
])
self.assertEqual(env.observation_space.shape, (observation_size,))
self.assertEqual(env.action_space.shape, (9,))
self.assertEqual(env.state_space['claw_qpos'].shape, (9,))
self.assertEqual(env.state_space['claw_qvel'].shape, (9,))
self.assertEqual(env.state_space['object_qpos'].shape, (1,))
self.assertEqual(env.state_space['object_qvel'].shape, (1,))
def test_reset_step(self, _, env_cls):
"""Checks that resetting and stepping works."""
env = env_cls()
env.reset()
env.step(env.action_space.sample())
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/screw.py | robel/dclaw/screw.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Screw tasks with DClaw robots.
This is continuous rotation of an object to match a target velocity.
"""
from typing import Optional
import numpy as np
from robel.dclaw.turn import BaseDClawTurn
from robel.simulation.randomize import SimRandomizer
from robel.utils.configurable import configurable
class BaseDClawScrew(BaseDClawTurn):
"""Shared logic for DClaw screw tasks."""
def __init__(self, success_threshold: float = 0.2, **kwargs):
"""Initializes the environment.
Args:
success_threshold: The difference threshold (in radians) of the
object position and the goal position within which we consider
as a sucesss.
"""
super().__init__(success_threshold=success_threshold, **kwargs)
# The target velocity is set during `_reset`.
self._target_object_vel = 0
self._desired_target_pos = 0
def _reset(self):
super()._reset()
self._desired_target_pos = self._target_object_pos
def _step(self, action: np.ndarray):
"""Applies an action to the robot."""
# Update the target object goal.
if not self._interactive:
self._desired_target_pos += self._target_object_vel * self.dt
self._set_target_object_pos(
self._desired_target_pos, unbounded=True)
super()._step(action)
@configurable(pickleable=True)
class DClawScrewFixed(BaseDClawScrew):
"""Rotates the object with a fixed initial position and velocity."""
def _reset(self):
# Start from the target and rotate at a constant velocity.
self._initial_object_pos = 0
self._set_target_object_pos(0)
self._target_object_vel = 0.5
super()._reset()
@configurable(pickleable=True)
class DClawScrewRandom(BaseDClawScrew):
"""Rotates the object with a random initial position and velocity."""
def _reset(self):
# Initial position is +/- 180 degrees.
self._initial_object_pos = self.np_random.uniform(
low=-np.pi, high=np.pi)
self._set_target_object_pos(self._initial_object_pos)
# Random target velocity.
self._target_object_vel = self.np_random.uniform(low=-0.75, high=0.75)
super()._reset()
@configurable(pickleable=True)
class DClawScrewRandomDynamics(DClawScrewRandom):
"""Rotates the object with a random initial position and velocity.
The dynamics of the simulation are randomized each episode.
"""
def __init__(self,
*args,
sim_observation_noise: Optional[float] = 0.05,
**kwargs):
super().__init__(
*args, sim_observation_noise=sim_observation_noise, **kwargs)
self._randomizer = SimRandomizer(self)
self._dof_indices = (
self.robot.get_config('dclaw').qvel_indices.tolist() +
self.robot.get_config('object').qvel_indices.tolist())
def _reset(self):
# Randomize joint dynamics.
self._randomizer.randomize_dofs(
self._dof_indices,
damping_range=(0.005, 0.1),
friction_loss_range=(0.001, 0.005),
)
self._randomizer.randomize_actuators(
all_same=True,
kp_range=(1, 3),
)
# Randomize friction on all geoms in the scene.
self._randomizer.randomize_geoms(
all_same=True,
friction_slide_range=(0.8, 1.2),
friction_spin_range=(0.003, 0.007),
friction_roll_range=(0.00005, 0.00015),
)
self._randomizer.randomize_bodies(
['mount'],
position_perturb_range=(-0.01, 0.01),
)
self._randomizer.randomize_geoms(
['mount'],
color_range=(0.2, 0.9),
)
self._randomizer.randomize_geoms(
parent_body_names=['valve'],
color_range=(0.2, 0.9),
)
super()._reset()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/pose_test.py | robel/dclaw/pose_test.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for D'Claw pose tasks."""
from absl.testing import absltest
from absl.testing import parameterized
import gym
import numpy as np
from robel.dclaw.pose import (DClawPoseFixed, DClawPoseRandom,
DClawPoseRandomDynamics)
# pylint: disable=no-member
@parameterized.parameters(
('DClawPoseFixed-v0', DClawPoseFixed),
('DClawPoseRandom-v0', DClawPoseRandom),
('DClawPoseRandomDynamics-v0', DClawPoseRandomDynamics),
)
class DClawPoseTest(absltest.TestCase):
"""Unit test class for RobotEnv."""
def test_gym_make(self, env_id, env_cls):
"""Accesses the sim, model, and data properties."""
env = gym.make(env_id)
self.assertIsInstance(env.unwrapped, env_cls)
def test_spaces(self, _, env_cls):
"""Checks the observation, action, and state spaces."""
env = env_cls()
observation_size = np.sum([
9, # qpos
9, # last_action
9, # qpos_error
])
self.assertEqual(env.observation_space.shape, (observation_size,))
self.assertEqual(env.action_space.shape, (9,))
self.assertEqual(env.state_space['qpos'].shape, (9,))
self.assertEqual(env.state_space['qvel'].shape, (9,))
def test_reset_step(self, _, env_cls):
"""Checks that resetting and stepping works."""
env = env_cls()
env.reset()
env.step(env.action_space.sample())
if __name__ == '__main__':
absltest.main()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/scripted_reset.py | robel/dclaw/scripted_reset.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hardware reset functions for the D'Claw."""
import logging
import time
from typing import Dict
import numpy as np
from robel.components.robot import RobotComponentBuilder, RobotState
from robel.components.robot.dynamixel_robot import DynamixelRobotComponent
# Minimum time in seconds to wait before fully resetting the D'Claw.
MINIMUM_CLAW_RESET_TIME = 1.0
# The positional error tolerance to be considered as a good reset.
GOOD_ERROR_TOL = 5. * np.pi / 180.
# Maximum retries before quitting reset.
MAX_RESET_RETRIES = 3
# Reset position for top joints to disentangle.
TOP_DISENTANGLE_RESET_POS = np.array([np.pi / 4, np.pi / 4, np.pi / 4])
# Time to sleep in between disentangle interval steps.
DISENTANGLE_INTERVAL_TIME = 0.75
def add_groups_for_reset(builder: RobotComponentBuilder):
"""Defines groups required to perform the reset."""
builder.add_group('dclaw_top', motor_ids=[10, 20, 30])
builder.add_group('dclaw_middle', motor_ids=[11, 21, 31])
builder.add_group('dclaw_bottom', motor_ids=[12, 22, 32])
def reset_to_states(robot: DynamixelRobotComponent,
states: Dict[str, RobotState]):
"""Resets the D'Claw to the given states.
This is an multi-stage reset procedure that allows human intervention if
motors are not resetting properly.
Args:
robot: The robot component to reset.
states: The states to apply to the robot.
"""
assert robot.is_hardware
claw_state = states['dclaw']
has_object = 'object' in robot.groups
# Disable the top and bottom joints of the claw to help prevent tangling.
robot.set_motors_engaged('dclaw_top', False)
robot.set_motors_engaged('dclaw_bottom', False)
robot.set_state({'dclaw': claw_state}, block=False)
reset_start_time = time.time()
# Reset the object and guide motors while the claw is moving.
if has_object:
robot.set_motors_engaged(['object', 'guide'], True)
robot.set_state(
{
'object': states['object'],
'guide': states['guide'],
},
timeout=10,
)
# Wait a minimum time before fully resetting the claw.
reset_elapsed = time.time() - reset_start_time
if reset_elapsed < MINIMUM_CLAW_RESET_TIME:
time.sleep(MINIMUM_CLAW_RESET_TIME - reset_elapsed)
# Fully reset the D'Claw.
robot.set_motors_engaged('dclaw', True)
robot.set_state({'dclaw': claw_state})
# Check that the motors have actually reset.
reset_retries = 0
while True:
cur_state = robot.get_state('dclaw')
# Check positions one motor at a time for better diagnosing.
bad_motors = []
for i, motor_id in enumerate(robot.get_config('dclaw').motor_ids):
if abs(cur_state.qpos[i] - claw_state.qpos[i]) > GOOD_ERROR_TOL:
bad_motors.append(motor_id)
if not bad_motors:
break
# Attempt to reset again.
logging.error('[%d] Could not reset D\'Claw motors: %s', reset_retries,
str(bad_motors))
reset_retries += 1
# Wait for human assistance if too many resets have occurred.
if reset_retries > MAX_RESET_RETRIES:
print('\n' + '=' * 10)
print('Please fix motors: {}'.format(bad_motors))
print('=' * 10)
input('Press Enter to resume.')
reset_retries = 0
# Try to disentangle.
disentangle_dclaw(robot, claw_state.qpos)
# Re-attempt the reset.
robot.set_motors_engaged('dclaw', True)
robot.set_state({'dclaw': claw_state})
# Start the episode with the object disengaged.
if has_object:
robot.set_motors_engaged('object', False)
robot.reset_time()
def disentangle_dclaw(robot: DynamixelRobotComponent, goal_pos: np.ndarray):
"""Performs a disentangling process to move to the given goal position."""
assert goal_pos.shape == (9,)
# Let the motors rest.
robot.set_motors_engaged('dclaw', False)
time.sleep(DISENTANGLE_INTERVAL_TIME)
# Move the top joints upwards to free the lower joints.
robot.set_motors_engaged('dclaw_top', True)
robot.set_state({'dclaw_top': RobotState(qpos=TOP_DISENTANGLE_RESET_POS)},
block=False)
time.sleep(DISENTANGLE_INTERVAL_TIME)
# Reset the middle joints.
robot.set_motors_engaged('dclaw_middle', True)
robot.set_state({'dclaw_middle': RobotState(qpos=goal_pos[[1, 4, 7]])},
block=False)
time.sleep(DISENTANGLE_INTERVAL_TIME)
# Reset the lower joints.
robot.set_motors_engaged('dclaw_bottom', True)
robot.set_state({'dclaw_bottom': RobotState(qpos=goal_pos[[2, 5, 8]])},
block=False)
time.sleep(DISENTANGLE_INTERVAL_TIME)
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/base_env.py | robel/dclaw/base_env.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared logic for all DClaw environments."""
import abc
import collections
from typing import Dict, Optional, Sequence, Union
import gym
import numpy as np
from robel.components.robot import RobotComponentBuilder, RobotState
from robel.components.robot.dynamixel_utils import CalibrationMap
from robel.dclaw import scripted_reset
from robel.robot_env import make_box_space, RobotEnv
# Convenience constants.
PI = np.pi
# Threshold near the joint limits at which we consider to be unsafe.
SAFETY_POS_THRESHOLD = 5 * PI / 180 # 5 degrees
SAFETY_VEL_THRESHOLD = 1.0 # 1rad/s
# Current threshold above which we consider as unsafe.
SAFETY_CURRENT_THRESHOLD = 200 # mA
# Mapping of motor ID to (scale, offset).
DEFAULT_DCLAW_CALIBRATION_MAP = CalibrationMap({
# Finger 1
10: (1, -PI / 2),
11: (1, -PI),
12: (1, -PI),
# Finger 2
20: (1, -PI / 2),
21: (1, -PI),
22: (1, -PI),
# Finger 3
30: (1, -PI / 2),
31: (1, -PI),
32: (1, -PI),
# Object
50: (1, -PI),
# Guide
60: (1, -PI),
})
class BaseDClawEnv(RobotEnv, metaclass=abc.ABCMeta):
"""Base environment for all DClaw robot tasks."""
def __init__(self,
*args,
device_path: Optional[str] = None,
sim_observation_noise: Optional[float] = None,
**kwargs):
"""Initializes the environment.
Args:
device_path: The device path to Dynamixel hardware.
sim_observation_noise: If given, configures the RobotComponent to
add noise to observations.
"""
super().__init__(*args, **kwargs)
self._device_path = device_path
self._sim_observation_noise = sim_observation_noise
# Create the robot component.
robot_builder = RobotComponentBuilder()
self._configure_robot(robot_builder)
self.robot = self._add_component(robot_builder)
def get_state(self) -> Dict[str, np.ndarray]:
"""Returns the current state of the environment."""
state = self.robot.get_state('dclaw')
return {'qpos': state.qpos, 'qvel': state.qvel}
def set_state(self, state: Dict[str, np.ndarray]):
"""Sets the state of the environment."""
self.robot.set_state(
{'dclaw': RobotState(qpos=state['qpos'], qvel=state['qvel'])})
def _configure_robot(self, builder: RobotComponentBuilder):
"""Configures the robot component."""
# Add the main D'Claw group.
builder.add_group(
'dclaw',
qpos_indices=range(9),
qpos_range=[
(-0.48, 0.48), # ~27.5 degrees for top servos.
(-PI / 3, PI / 3), # 60 degrees for middle servos.
(-PI / 2, PI / 2), # 90 degrees for bottom servos.
] * 3,
qvel_range=[(-2 * PI / 3, 2 * PI / 3)] * 9)
if self._sim_observation_noise is not None:
builder.update_group(
'dclaw', sim_observation_noise=self._sim_observation_noise)
# If a device path is given, set the motor IDs and calibration map.
if self._device_path:
builder.set_dynamixel_device_path(self._device_path)
builder.set_hardware_calibration_map(DEFAULT_DCLAW_CALIBRATION_MAP)
builder.update_group(
'dclaw', motor_ids=[10, 11, 12, 20, 21, 22, 30, 31, 32])
scripted_reset.add_groups_for_reset(builder)
def _initialize_action_space(self) -> gym.Space:
"""Returns the observation space to use for this environment."""
qpos_indices = self.robot.get_config('dclaw').qpos_indices
return make_box_space(-1.0, 1.0, shape=(qpos_indices.size,))
def _get_safety_scores(
self,
pos: Optional[np.ndarray] = None,
vel: Optional[np.ndarray] = None,
current: Optional[np.ndarray] = None,
) -> Dict[str, np.ndarray]:
"""Computes safety-related scores for D'Claw robots.
Args:
pos: The joint positions.
vel: The joint velocities.
current: The motor currents.
Returns:
A dictionary of safety scores for the given values.
"""
scores = collections.OrderedDict()
dclaw_config = self.robot.get_config('dclaw')
if pos is not None and dclaw_config.qpos_range is not None:
# Calculate lower and upper separately so broadcasting works when
# positions are batched.
near_lower_limit = (
np.abs(dclaw_config.qpos_range[:, 0] - pos) <
SAFETY_POS_THRESHOLD)
near_upper_limit = (
np.abs(dclaw_config.qpos_range[:, 1] - pos) <
SAFETY_POS_THRESHOLD)
near_pos_limit = np.sum(near_lower_limit | near_upper_limit, axis=1)
scores['safety_pos_violation'] = near_pos_limit
if vel is not None:
above_vel_limit = np.sum(np.abs(vel) > SAFETY_VEL_THRESHOLD, axis=1)
scores['safety_vel_violation'] = above_vel_limit
if current is not None:
above_current_limit = np.sum(
np.abs(current) > SAFETY_CURRENT_THRESHOLD, axis=1)
scores['safety_current_violation'] = above_current_limit
return scores
class BaseDClawObjectEnv(BaseDClawEnv, metaclass=abc.ABCMeta):
"""Base environment for all DClaw robot tasks with objects."""
def __init__(self, *args, use_guide: bool = False, **kwargs):
"""Initializes the environment.
Args:
use_guide: If True, activates an object motor in hardware to use
to show the goal.
"""
self._use_guide = use_guide
super().__init__(*args, **kwargs)
def get_state(self) -> Dict[str, np.ndarray]:
"""Returns the current state of the environment."""
claw_state, object_state = self.robot.get_state(['dclaw', 'object'])
return {
'claw_qpos': claw_state.qpos,
'claw_qvel': claw_state.qvel,
'object_qpos': object_state.qpos,
'object_qvel': object_state.qvel,
}
def set_state(self, state: Dict[str, np.ndarray]):
"""Sets the state of the environment."""
self.robot.set_state({
'dclaw': RobotState(
qpos=state['claw_qpos'], qvel=state['claw_qvel']),
'object': RobotState(
qpos=state['object_qpos'], qvel=state['object_qvel']),
})
def _configure_robot(self, builder: RobotComponentBuilder):
"""Configures the robot component."""
super()._configure_robot(builder)
# Add the object group.
builder.add_group(
'object',
qpos_indices=[-1], # The object is the last qpos.
qpos_range=[(-PI, PI)])
if self._sim_observation_noise is not None:
builder.update_group(
'object', sim_observation_noise=self._sim_observation_noise)
if self._device_path:
builder.update_group('object', motor_ids=[50])
# Add the guide group, which is a no-op if the guide motor is unused.
builder.add_group('guide')
if self._use_guide and self._device_path:
builder.update_group('guide', motor_ids=[60], use_raw_actions=True)
def _reset_dclaw_and_object(
self,
claw_pos: Optional[Sequence[float]] = None,
claw_vel: Optional[Sequence[float]] = None,
object_pos: Optional[Union[float, Sequence[float]]] = None,
object_vel: Optional[Union[float, Sequence[float]]] = None,
guide_pos: Optional[Union[float, Sequence[float]]] = None):
"""Reset procedure for DClaw robots that manipulate objects.
Args:
claw_pos: The joint positions for the claw (radians).
claw_vel: The joint velocities for the claw (radians/second). This
is ignored on hardware.
object_pos: The joint position for the object (radians).
object_vel: The joint velocity for the object (radians/second). This
is ignored on hardware.
guide_pos: The joint position for the guide motor (radians). The
guide motor is optional for marking the desired position.
"""
# Set defaults if parameters are not given.
claw_init_state, object_init_state = self.robot.get_initial_state(
['dclaw', 'object'])
claw_pos = (
claw_init_state.qpos if claw_pos is None else np.asarray(claw_pos))
claw_vel = (
claw_init_state.qvel if claw_vel is None else np.asarray(claw_vel))
object_pos = (
object_init_state.qpos
if object_pos is None else np.atleast_1d(object_pos))
object_vel = (
object_init_state.qvel
if object_vel is None else np.atleast_1d(object_vel))
guide_pos = (
np.zeros(1) if guide_pos is None else np.atleast_1d(guide_pos))
if self.robot.is_hardware:
scripted_reset.reset_to_states(
self.robot, {
'dclaw': RobotState(qpos=claw_pos),
'object': RobotState(qpos=object_pos),
'guide': RobotState(qpos=guide_pos),
})
else:
self.robot.set_state({
'dclaw': RobotState(qpos=claw_pos, qvel=claw_vel),
'object': RobotState(qpos=object_pos, qvel=object_vel),
})
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/__init__.py | robel/dclaw/__init__.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gym environment registration for DClaw environments."""
from robel.utils.registration import register
#===============================================================================
# Pose tasks
#===============================================================================
# Default number of steps per episode.
_POSE_EPISODE_LEN = 80 # 80*20*2.5ms = 4s
register(
env_id='DClawPoseFixed-v0',
class_path='robel.dclaw.pose:DClawPoseFixed',
max_episode_steps=_POSE_EPISODE_LEN)
register(
env_id='DClawPoseRandom-v0',
class_path='robel.dclaw.pose:DClawPoseRandom',
max_episode_steps=_POSE_EPISODE_LEN)
register(
env_id='DClawPoseRandomDynamics-v0',
class_path='robel.dclaw.pose:DClawPoseRandomDynamics',
max_episode_steps=_POSE_EPISODE_LEN)
#===============================================================================
# Turn tasks
#===============================================================================
# Default number of steps per episode.
_TURN_EPISODE_LEN = 40 # 40*40*2.5ms = 4s
register(
env_id='DClawTurnFixed-v0',
class_path='robel.dclaw.turn:DClawTurnFixed',
max_episode_steps=_TURN_EPISODE_LEN)
register(
env_id='DClawTurnRandom-v0',
class_path='robel.dclaw.turn:DClawTurnRandom',
max_episode_steps=_TURN_EPISODE_LEN)
register(
env_id='DClawTurnRandomDynamics-v0',
class_path='robel.dclaw.turn:DClawTurnRandomDynamics',
max_episode_steps=_TURN_EPISODE_LEN)
#===============================================================================
# Screw tasks
#===============================================================================
# Default number of steps per episode.
_SCREW_EPISODE_LEN = 80 # 80*40*2.5ms = 8s
register(
env_id='DClawScrewFixed-v0',
class_path='robel.dclaw.screw:DClawScrewFixed',
max_episode_steps=_SCREW_EPISODE_LEN)
register(
env_id='DClawScrewRandom-v0',
class_path='robel.dclaw.screw:DClawScrewRandom',
max_episode_steps=_SCREW_EPISODE_LEN)
register(
env_id='DClawScrewRandomDynamics-v0',
class_path='robel.dclaw.screw:DClawScrewRandomDynamics',
max_episode_steps=_SCREW_EPISODE_LEN)
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
google-research/robel | https://github.com/google-research/robel/blob/5b0fd3704629931712c6e0f7268ace1c2154dc83/robel/dclaw/pose.py | robel/dclaw/pose.py | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose tasks with DClaw robots.
The DClaw is tasked to match a pose defined by the environment.
"""
import abc
import collections
from typing import Any, Dict, Optional, Sequence
import numpy as np
from robel.components.robot.dynamixel_robot import DynamixelRobotState
from robel.components.robot import RobotComponentBuilder, RobotState
from robel.dclaw.base_env import BaseDClawEnv
from robel.simulation.randomize import SimRandomizer
from robel.utils.configurable import configurable
from robel.utils.resources import get_asset_path
# The observation keys that are concatenated as the environment observation.
DEFAULT_OBSERVATION_KEYS = (
'qpos',
'last_action',
'qpos_error',
)
# The maximum velocity for the motion task.
MOTION_VELOCITY_LIMIT = np.pi / 6 # 30deg/s
# The error margin to the desired positions to consider as successful.
SUCCESS_THRESHOLD = 10 * np.pi / 180
DCLAW3_ASSET_PATH = 'robel-scenes/dclaw/dclaw3xh.xml'
class BaseDClawPose(BaseDClawEnv, metaclass=abc.ABCMeta):
"""Shared logic for DClaw pose tasks."""
def __init__(self,
asset_path: str = DCLAW3_ASSET_PATH,
observation_keys: Sequence[str] = DEFAULT_OBSERVATION_KEYS,
frame_skip: int = 20,
**kwargs):
"""Initializes the environment.
Args:
asset_path: The XML model file to load.
observation_keys: The keys in `get_obs_dict` to concatenate as the
observations returned by `step` and `reset`.
frame_skip: The number of simulation steps per environment step.
"""
super().__init__(
sim_model=get_asset_path(asset_path),
observation_keys=observation_keys,
frame_skip=frame_skip,
**kwargs)
self._initial_pos = np.zeros(9)
self._desired_pos = np.zeros(9)
def _configure_robot(self, builder: RobotComponentBuilder):
super()._configure_robot(builder)
# Add an overlay group to show desired joint positions.
builder.add_group(
'overlay', actuator_indices=[], qpos_indices=range(9, 18))
def _reset(self):
"""Resets the environment."""
# Mark the target position in sim.
self.robot.set_state({
'dclaw': RobotState(qpos=self._initial_pos, qvel=np.zeros(9)),
})
def _step(self, action: np.ndarray):
"""Applies an action to the robot."""
self.robot.step({'dclaw': action})
def get_obs_dict(self) -> Dict[str, Any]:
"""Returns the current observation of the environment.
Returns:
A dictionary of observation values. This should be an ordered
dictionary if `observation_keys` isn't set.
"""
state = self.robot.get_state('dclaw')
obs_dict = collections.OrderedDict((
('qpos', state.qpos),
('qvel', state.qvel),
('last_action', self._get_last_action()),
('qpos_error', self._desired_pos - state.qpos),
))
# Add hardware-specific state if present.
if isinstance(state, DynamixelRobotState):
obs_dict['current'] = state.current
return obs_dict
def get_reward_dict(
self,
action: np.ndarray,
obs_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns the reward for the given action and observation."""
qvel = obs_dict['qvel']
reward_dict = collections.OrderedDict((
('pose_error_cost', -1 * np.linalg.norm(obs_dict['qpos_error'])),
# Penalty if the velocity exceeds a threshold.
('joint_vel_cost',
-0.1 * np.linalg.norm(qvel[np.abs(qvel) >= np.pi])),
))
return reward_dict
def get_score_dict(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns a standardized measure of success for the environment."""
mean_pos_error = np.mean(np.abs(obs_dict['qpos_error']), axis=1)
score_dict = collections.OrderedDict((
# Clip and normalize error to 45 degrees.
('points', 1.0 - np.minimum(mean_pos_error / (np.pi / 4), 1)),
('success', mean_pos_error < SUCCESS_THRESHOLD),
))
score_dict.update(
self._get_safety_scores(
pos=obs_dict['qpos'],
vel=obs_dict['qvel'],
current=obs_dict.get('current'),
))
return score_dict
def _make_random_pose(self) -> np.ndarray:
"""Returns a random pose."""
pos_range = self.robot.get_config('dclaw').qpos_range
random_range = pos_range.copy()
# Clamp middle joints to at most 0 (joints always go outwards) to avoid
# entanglement.
random_range[[1, 4, 7], 1] = 0
pose = self.np_random.uniform(
low=random_range[:, 0], high=random_range[:, 1])
return pose
def _update_overlay(self):
"""Updates the overlay in simulation to show the desired pose."""
self.robot.set_state({'overlay': RobotState(qpos=self._desired_pos)})
@configurable(pickleable=True)
class DClawPoseFixed(BaseDClawPose):
"""Track a fixed random initial and final pose."""
def _reset(self):
self._initial_pos = self._make_random_pose()
self._desired_pos = self._make_random_pose()
self._update_overlay()
super()._reset()
@configurable(pickleable=True)
class DClawPoseRandom(BaseDClawPose):
"""Track a random moving pose."""
def _reset(self):
# Choose two poses to oscillate between.
pose_a = self._make_random_pose()
pose_b = self._make_random_pose()
self._initial_pos = 0.5 * (pose_a + pose_b)
self._dynamic_range = 0.5 * np.abs(pose_b - pose_a)
# Initialize a random oscilliation period.
dclaw_config = self.robot.get_config('dclaw')
self._period = self.np_random.uniform(
low=0.5, high=2.0, size=len(dclaw_config.qpos_indices))
# Clamp the movement range by the velocity limit.
vel_limit = MOTION_VELOCITY_LIMIT / self._period
self._dynamic_range = np.minimum(self._dynamic_range, vel_limit)
self._update_desired_pose()
super()._reset()
def _update_desired_pose(self):
self._desired_pos = (
self._initial_pos +
(self._dynamic_range * np.sin(self._period * self.robot.time)))
self._update_overlay()
def _step(self, action: np.ndarray):
"""Applies an action to the robot."""
result = super()._step(action)
self._update_desired_pose()
return result
@configurable(pickleable=True)
class DClawPoseRandomDynamics(DClawPoseRandom):
"""Track a random moving pose.
The dynamics of the simulation are randomized each episode.
"""
def __init__(self,
*args,
sim_observation_noise: Optional[float] = 0.05,
**kwargs):
super().__init__(
*args, sim_observation_noise=sim_observation_noise, **kwargs)
self._randomizer = SimRandomizer(self)
self._dof_indices = (
self.robot.get_config('dclaw').qvel_indices.tolist())
def _reset(self):
# Randomize joint dynamics.
self._randomizer.randomize_dofs(
self._dof_indices,
damping_range=(0.005, 0.1),
friction_loss_range=(0.001, 0.005),
)
self._randomizer.randomize_actuators(
all_same=True,
kp_range=(1, 3),
)
super()._reset()
| python | Apache-2.0 | 5b0fd3704629931712c6e0f7268ace1c2154dc83 | 2026-01-05T07:14:22.487637Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/eval_data/machine_eval/machine_metrics.py | eval_data/machine_eval/machine_metrics.py | '''
Author: anon
Date: 2022-01-27 11:39:10
LastEditors: anon
LastEditTime: 2022-02-11 17:47:14
FilePath: /crosstalk-generation/src/gpt/metrics.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
from statistics import mean
from nltk import word_tokenize
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.gleu_score import corpus_gleu
from nltk.translate.meteor_score import meteor_score
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
import io,json
# import nltk
# nltk.download('wordnet')
'''
description: 计算bleu1,2,3,4的值
param {原始句} reference
param {预测句} hypothesis
return bleu1,bleu2,bleu3,bleu4
'''
def calculate_bleu_score(references, candidates):
smooth = SmoothingFunction()
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
BLEU_1 = corpus_bleu(reference, hypothesis, weights=(1, 0, 0, 0), smoothing_function=smooth.method1)
BLEU_2 = corpus_bleu(reference, hypothesis, weights=(0.5, 0.5, 0, 0), smoothing_function=smooth.method1)
BLEU_3 = corpus_bleu(reference, hypothesis, weights=(0.33, 0.33, 0.33, 0), smoothing_function=smooth.method1)
BLEU_4 = corpus_bleu(reference, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=smooth.method1)
return BLEU_1,BLEU_2,BLEU_3,BLEU_4
'''
description: 计算gleu值
param {原始句} reference
param {预测句} hypothesis
return gleu值
'''
def calculate_gleu_score(references, candidates):
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
return corpus_gleu(reference, hypothesis)
'''
description: 中文不建议使用,因为依赖了wordnet,wordnet是英文词典
param {原始句} reference
param {预测句} hypothesis
return metetor值
'''
def calculate_meteor_score(references, candidates):
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
all_meteor = []
for ref,hyp in zip(reference, hypothesis):
all_meteor.append(meteor_score(ref,hyp))
return mean(all_meteor)
'''
description: rouge值计算
param {原始句} reference
param {预测句} hypothesis
return rouge1,rouge2,rougel
'''
def calculate_rouge_score(reference, hypothesis):
rouge = Rouge()
scores = []
for ref,hyp in zip(reference,hypothesis):
try:
scores.append(rouge.get_scores(' '.join([i for i in hyp]), ' '.join([i for i in ref])))
except:
continue
rouge_1 = [i[0]['rouge-1']['f'] for i in scores]
rouge_2 = [i[0]['rouge-2']['f'] for i in scores]
rouge_l = [i[0]['rouge-l']['f'] for i in scores]
return mean(rouge_1),mean(rouge_2),mean(rouge_l)
def calc_distinct_ngram(pair_list, ngram):
"""
calc_distinct_ngram
"""
pair_list = [[j for j in i] for i in pair_list]
def get_dict(tokens, ngram, gdict=None):
"""
get_dict
统计n-gram频率并用dict存储
"""
token_dict = {}
if gdict is not None:
token_dict = gdict
tlen = len(tokens)
for i in range(0, tlen - ngram + 1):
ngram_token = "".join(tokens[i:(i + ngram)])
if token_dict.get(ngram_token) is not None:
token_dict[ngram_token] += 1
else:
token_dict[ngram_token] = 1
return token_dict
ngram_total = 0.0
ngram_distinct_count = 0.0
pred_dict = {}
for predict_tokens in pair_list:
get_dict(predict_tokens, ngram, pred_dict)
for key, freq in pred_dict.items():
ngram_total += freq
ngram_distinct_count += 1
#if freq == 1:
# ngram_distinct_count += freq
return ngram_distinct_count / ngram_total
def test_demo():
references = ['我今天晚上必须回家吃饭','广东鸡翅膀,我最爱吃','天天都需要你爱']
candidates = ['晚上我要回家吃饭','最好吃的是广东鸡翅膀','啦啦啦啦要你爱']
belu_scores = calculate_bleu_score(references,candidates)
gleu_scores = calculate_gleu_score(references,candidates)
# meteor_scores = calculate_meteor_score(references,candidates)
rouge_scores = calculate_rouge_score(references,candidates)
print(belu_scores)
def cal_metrics(machine_gen_file):
raw_text = io.open(machine_gen_file,'r').read()
data_list = json.loads(raw_text)
references = []
candidates = []
for data_item in data_list:
references.append(data_item['ori'])
candidates.append(data_item['gen'])
distinct_1 = calc_distinct_ngram(candidates,1)
distinct_2 = calc_distinct_ngram(candidates,2)
belu_scores = calculate_bleu_score(references,candidates)
gleu_scores = calculate_gleu_score(references,candidates)
rouge_scores = calculate_rouge_score(references,candidates)
result = {
'bleu_1':belu_scores[0] * 100,
'bleu_2':belu_scores[1] * 100,
'bleu_3':belu_scores[2] * 100,
'bleu_4':belu_scores[3] * 100,
'gleu':gleu_scores * 100,
'rouge_1':rouge_scores[0] * 100,
'rouge_2':rouge_scores[1] * 100,
'rouge_l':rouge_scores[2] * 100,
'distinct_1':distinct_1 * 100,
'distinct_2':distinct_2 * 100
}
return result
if __name__ == '__main__':
import os
this_dir = os.path.split(os.path.realpath(__file__))[0]
all_machine_eval_result_dict = {}
for root,dir,files in os.walk(os.path.join(this_dir,'data')):
for file in files:
results = cal_metrics(os.path.join(root, file))
all_machine_eval_result_dict[file] = results
print(file + ":" + str(results))
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/eval_data/human_eval/human_metrics.py | eval_data/human_eval/human_metrics.py | import os,sys,io,json
import pandas as pd
this_dir = os.path.split(os.path.realpath(__file__))[0]
mode_ref_dict = {
1: "真实数据",
2:"rnn",
3:"GPT",
4:"unilm",
5:"zhouwenwang",
6:"T5",
7:"GPT3",
8:"GPT3-finetune",
9:"CPM",
10:"PANGU-a"
}
def get_score_map():
human_eval_scores_file = os.path.join(this_dir,'data','score_records.json')
score_record = json.loads(io.open(human_eval_scores_file, 'r').read())
score_map = {}
for key,records in pd.DataFrame(score_record['RECORDS']).groupby('user'):
# 只有全部答完的,我们才纳入统计
if len(records) == 50:
model_scores = records.groupby('model_id')
for sub_key,model_score in model_scores:
h_score = model_score['h_score'].sum()
f_score = model_score['f_score'].sum()
d_score = model_score['d_score'].sum()
is_best = model_score['is_best'].sum()
model_key = mode_ref_dict[sub_key]
if model_key in score_map:
score_map[model_key]['humor_score'] += h_score
score_map[model_key]['fluent_score'] += f_score
score_map[model_key]['diss_score'] += d_score
score_map[model_key]['comprehensive_score'] += is_best
else:
score_map[model_key] = {
'humor_score':h_score,
'fluent_score':f_score,
'diss_score':d_score,
'comprehensive_score':is_best
}
return score_map
if __name__ == '__main__':
score_map = get_score_map()
for key in score_map.keys():
val = score_map.get(key)
print(key + ":" + str(val))
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/zhouwenwang/inference.py | src/zhouwenwang/inference.py |
import os,sys
sys.path.append('/data1/anon/crosstalk-generation/src/zhouwenwang/Fengshenbang-LM')
from fengshen import RoFormerModel
from fengshen import RoFormerConfig
from transformers import BertTokenizer
import torch
import numpy as np
max_length = 32
tokenizer = BertTokenizer.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/zhouwenwang")
config = RoFormerConfig.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/zhouwenwang")
model = RoFormerModel.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/zhouwenwang")
sentence = '清华大学在'
def single_line(sentence):
for i in range(max_length):
encode = torch.tensor(
[[tokenizer.cls_token_id]+tokenizer.encode(sentence, add_special_tokens=False)]).long()
logits = model(encode)[0]
logits = torch.nn.functional.linear(
logits, model.embeddings.word_embeddings.weight)
logits = torch.nn.functional.softmax(
logits, dim=-1).cpu().detach().numpy()[0]
sentence = sentence + \
tokenizer.decode(int(np.random.choice(logits.shape[1], p=logits[-1])))
if sentence[-1] == '。':
break
return sentence
print(single_line(sentence))
print() | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/zhouwenwang/generate_eval_data.py | src/zhouwenwang/generate_eval_data.py | '''
Author: anon
Date: 2022-02-08 16:12:50
LastEditors: anon
LastEditTime: 2022-02-09 16:07:19
FilePath: /crosstalk-generation/src/gpt/generate_eval_data.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import transformers
import torch
import os
import json
import random
import numpy as np
import argparse
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
import logging
from transformers import GPT2TokenizerFast, GPT2LMHeadModel, GPT2Config
from transformers import BertTokenizerFast
# from transformers import BertTokenizer
from os.path import join, exists
from itertools import zip_longest, chain
# from chatbot.model import DialogueGPT2Model
from torch.utils.data import Dataset, DataLoader
from torch.nn import CrossEntropyLoss
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import io,sys
sys.path.append('/data1/anon/crosstalk-generation/src/zhouwenwang/Fengshenbang-LM')
from fengshen import RoFormerModel
from fengshen import RoFormerConfig
from transformers import BertTokenizer
PAD = '[PAD]'
pad_id = 0
def set_args():
"""
Sets up the arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--temperature', default=1.2, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=2, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0.3, type=float, required=False, help='最高积累概率')
parser.add_argument('--log_path', default='/data1/anon/crosstalk-generation/src/zhouwenwang/logs/interact.log', type=str, required=False, help='interact日志存放位置')
parser.add_argument('--model_path', default='/data1/anon/crosstalk-generation/pretrain_model/zhouwenwang', type=str, required=False, help='对话模型路径')
parser.add_argument('--test_filter_data', default="/data1/anon/crosstalk-generation/src/zhouwenwang/data/p256-s64/test_filter_50x20.txt", type=str, required=False, help="数据基准文件,n个篇章,每个篇章20行")
parser.add_argument('--save_samples_path', default="/data1/anon/crosstalk-generation/src/zhouwenwang/sample/", type=str, required=False, help="保存跑机器指标数据的文件路径及保存篇章级生成的文件路径")
parser.add_argument('--repetition_penalty', default=2.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--diverse_penalty', default=2.0, type=float, required=False,
help="历史出现字惩罚项")
parser.add_argument('--seed', type=int, default=1234, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--seq_max_len', type=int, default=256, help='最大输入长度')
parser.add_argument('--max_history_len', type=int, default=20, help="dialogue history的最大长度")
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
return parser.parse_args()
def set_random_seed(args):
"""
设置训练的随机种子
"""
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocab size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def generate_text_by_input(args,text,history,model,tokenizer):
history.append(text)
history = [tokenizer.encode(i, add_special_tokens=False) for i in history]
input_ids = [tokenizer.cls_token_id] # 每个input以[CLS]为开头
history_start_index = 1
filter_history_sent_ids = []
for rev_idx in range(len(history)-1,-1,-1):
this_turn_ids = history[rev_idx][:args.utterance_max_len] + [tokenizer.sep_token_id]
# this_turn_ids = history[rev_idx][:args.utterance_max_len]
if history_start_index + len(this_turn_ids) > args.seq_max_len:
break
filter_history_sent_ids.append(this_turn_ids)
history_start_index += len(this_turn_ids)
filter_history_sent_ids.reverse()
for sent_ids in filter_history_sent_ids:
input_ids.extend(sent_ids)
input_ids = torch.tensor(input_ids).long().to(model.device)
input_ids = input_ids.unsqueeze(0)
response = [] # 根据context,生成的response
# 最多生成max_len个token
for idx in range(args.utterance_max_len):
outputs = model(input_ids=input_ids)
logits = outputs[0]
logits = torch.nn.functional.linear(
logits, model.embeddings.word_embeddings.weight)
next_token_logits = logits[0, -1, :]
# 对于已生成的结果generated中的每个token添加一个重复惩罚项,降低其生成概率
for id in set(response):
next_token_logits[id] /= args.repetition_penalty
for id in set(input_ids.cpu().numpy()[0].tolist()):
next_token_logits[id] /= args.diverse_penalty
next_token_logits = next_token_logits / args.temperature
# 对于[UNK]的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token
next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp)
# torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
no_need_tok = [tokenizer.cls_token_id,
tokenizer.sep_token_id,
tokenizer.pad_token_id,
tokenizer.mask_token_id,
tokenizer.convert_tokens_to_ids('#'),
tokenizer.convert_tokens_to_ids('。'),
tokenizer.convert_tokens_to_ids('?'),
tokenizer.convert_tokens_to_ids('.')
] + [i for i in range(192)]
if (next_token.cpu().numpy()[0] in no_need_tok) : # 遇到[SEP]则表明response生成结束
if len(response) > 0:
break
else:
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=len(no_need_tok) + 2)[1:]
for candi_tok in next_token:
if not candi_tok in no_need_tok:
next_token = candi_tok.reshape(1,)
break
if next_token == 0:
continue
response.append(next_token.item())
input_ids = torch.cat((input_ids, next_token.unsqueeze(0)), dim=1)
# his_text = tokenizer.convert_ids_to_tokens(curr_input_tensor.tolist())
# print("his_text:{}".format(his_text))
if len(response) == 0:
print('')
text = tokenizer.convert_ids_to_tokens(response)
text_str = "".join(text)
return text
def single_line(args,sentence,model,tokenizer):
new_sent = ''
unused_ids = [i for i in range(177)] + [i for i in range(181,192)] + [i for i in range(7681,12000)]
history_ids = []
for i in range(args.utterance_max_len):
encode = torch.tensor(
[[tokenizer.cls_token_id]+tokenizer.encode(sentence, add_special_tokens=False)]).cuda()
logits = model(encode)[0]
logits = torch.nn.functional.linear(
logits, model.embeddings.word_embeddings.weight)
for res_id in history_ids:
logits[0][-1][res_id] = logits[0][-1][res_id] / args.repetition_penalty
for unuse_id in unused_ids:
logits[0][-1][unuse_id] = -float('Inf')
logits = torch.nn.functional.softmax(
logits, dim=-1).cpu().detach().numpy()[0]
prob = logits[-1]
tok_id = int(np.random.choice(logits.shape[1], p=prob))
history_ids.append(tok_id)
gen_txt = tokenizer.decode(tok_id)
sentence = sentence + gen_txt
new_sent = new_sent + gen_txt
if len(new_sent) > 1 and new_sent[-1] in ['。','!','?','…']:
break
return new_sent
def get_machine_metric_datas(model,args,tokenizer):
'''
生成机器指标(bleu,gleu,rouge)所需的数据
A_ori->B_gen
A_ori,B_ori->C_gen
A_ori,B_ori,C_ori->D_gen
最后输出
B_ori,B_gen
C_ori,C_gen
D_ori,D_gen
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
for step in range(1,lines_nums):
inputs_text = single_lines[:step]
history = inputs_text[:-1]
text = inputs_text[-1]
gen_text_tok = generate_text_by_input(args,text, history, model, tokenizer)
gen_text = "".join(gen_text_tok)
print(gen_text)
ori_text = single_lines[step]
results.append({'ori':ori_text,'gen':gen_text})
data_file_path = os.path.join(args.save_samples_path,'machine_metric_data.json')
io.open(data_file_path,'w').write(json.dumps(results,ensure_ascii=False, indent=4))
def generate_human_check_datas(model,args,tokenizer):
'''
生成篇章的方法
pre_data(:10)->A_gen
pre_data(:10)+A_gen->B_gen
pre_data(:10)+A_gen+B_gen->C_gen
最后输出
pre_data(:10)
A_gen
B_gen
C_gen
...
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
# text_generator = TextGenerationPipeline(model, tokenizer,device=0)
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
history_all = single_lines[:10]
for step in range(10,lines_nums):
inputs_text = single_lines[:step]
gen_text_tok = single_line(args, history_all[-1] , model, tokenizer)
# inputs_text = history_all[:step]
# gen_text_tok = single_line(args, ''.join(inputs_text) , model, tokenizer)
# gen_text = "".join(gen_text_tok)
gen_text = gen_text_tok
# print(gen_text)
# ori_text = single_lines[step]
history_all.append(gen_text)
print(history_all)
results.append('\n'.join(history_all))
data_file_path = os.path.join(args.save_samples_path,'zhouwenwang_turn_new_unsed_64.txt')
io.open(data_file_path,'w').write('\n\n'.join(results))
return data_file_path
def interact(args,samples_file,model,tokenizer):
history = []
print('开始和chatbot聊天,输入CTRL + Z以退出')
while True:
try:
text = input("user:")
# text = "你好"
if args.save_samples_path:
samples_file.write("user:{}\n".format(text))
text = generate_text_by_input(args,text,history,model,tokenizer)
print("chatbot:" + "".join(text))
if args.save_samples_path:
samples_file.write("chatbot:{}\n".format("".join(text)))
except KeyboardInterrupt:
if args.save_samples_path:
samples_file.close()
break
def main():
args = set_args()
set_random_seed(args)
logger = create_logger(args)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
logger.info('using device:{}'.format(device))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# tokenizer = BertTokenizer(vocab_file=args.voca_path)
tokenizer = BertTokenizer.from_pretrained(args.model_path)
config = RoFormerConfig.from_pretrained(args.model_path)
model = RoFormerModel.from_pretrained(args.model_path)
model = model.to(device)
model.eval()
if args.save_samples_path:
if not os.path.exists(args.save_samples_path):
os.makedirs(args.save_samples_path)
samples_file = open(args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))
# 存储聊天记录,每个utterance以token的id的形式进行存储
# get_machine_metric_datas(model,args,tokenizer)
generate_human_check_datas(model,args,tokenizer)
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/wenzhong/inference.py | src/wenzhong/inference.py | from transformers import GPT2Tokenizer, GPT2Model
from transformers import pipeline, set_seed
# tokenizer = GPT2Tokenizer.from_pretrained('/data1/anon/crosstalk-generation/pretrain_model/wenzhong')
# model = GPT2Model.from_pretrained('/data1/anon/crosstalk-generation/pretrain_model/wenzhong')
text = "给大家拜个年."
generator = pipeline('text-generation', model='/data1/anon/crosstalk-generation/pretrain_model/wenzhong')
out = generator("北京位于", max_length=30, num_return_sequences=1)
print(out) | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/wenzhong/generate_eval_data.py | src/wenzhong/generate_eval_data.py | '''
Author: anon
Date: 2022-02-08 16:12:50
LastEditors: anon
LastEditTime: 2022-02-09 16:07:19
FilePath: /crosstalk-generation/src/gpt/generate_eval_data.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import transformers
import torch
import os
import json
import random
import numpy as np
import argparse
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
import logging
from transformers import GPT2TokenizerFast, GPT2LMHeadModel, GPT2Config,GPT2Tokenizer
from transformers import BertTokenizerFast
# from transformers import BertTokenizer
from os.path import join, exists
from itertools import zip_longest, chain
# from chatbot.model import DialogueGPT2Model
from torch.utils.data import Dataset, DataLoader
from torch.nn import CrossEntropyLoss
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import io
from transformers import pipeline, set_seed
PAD = '[PAD]'
pad_id = 0
def set_args():
"""
Sets up the arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--log_path', default='/data1/anon/crosstalk-generation/src/wenzhong/logs/interact.log', type=str, required=False, help='interact日志存放位置')
parser.add_argument('--model_path', default='/data1/anon/crosstalk-generation/pretrain_model/wenzhong', type=str, required=False, help='对话模型路径')
parser.add_argument('--test_filter_data', default="/data1/anon/crosstalk-generation/src/wenzhong/data/p256-s64/test_filter_50x20.txt", type=str, required=False, help="数据基准文件,n个篇章,每个篇章20行")
parser.add_argument('--save_samples_path', default="/data1/anon/crosstalk-generation/src/wenzhong/sample/", type=str, required=False, help="保存跑机器指标数据的文件路径及保存篇章级生成的文件路径")
parser.add_argument('--repetition_penalty', default=2.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--seed', type=int, default=1234, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--seq_max_len', type=int, default=256, help='最大输入长度')
parser.add_argument('--max_history_len', type=int, default=20, help="dialogue history的最大长度")
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
return parser.parse_args()
def set_random_seed(args):
"""
设置训练的随机种子
"""
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocab size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def generate_text_by_input(args,text,history,model,tokenizer):
history.append(text)
history = [tokenizer.encode(i, add_special_tokens=False) for i in history]
# input_ids = [tokenizer.cls_token_id] # 每个input以[CLS]为开头
input_ids = [] # 每个input以[CLS]为开头
history_start_index = 1
filter_history_sent_ids = []
for rev_idx in range(len(history)-1,-1,-1):
# this_turn_ids = history[rev_idx][:args.utterance_max_len] + [tokenizer.sep_token_id]
this_turn_ids = history[rev_idx][:args.utterance_max_len]
if history_start_index + len(this_turn_ids) > args.seq_max_len:
break
filter_history_sent_ids.append(this_turn_ids)
history_start_index += len(this_turn_ids)
filter_history_sent_ids.reverse()
for sent_ids in filter_history_sent_ids:
input_ids.extend(sent_ids)
input_ids = torch.tensor(input_ids).long().to(model.device)
input_ids = input_ids.unsqueeze(0)
response = [] # 根据context,生成的response
# 最多生成max_len个token
gen = model.generate(input_ids, max_new_tokens=args.utterance_max_len, do_sample=True, min_length=input_ids.shape[1] + 3,
top_p=args.topp,top_k=args.topk,
repetition_penalty=args.repetition_penalty,temperature=args.temperature)
text = tokenizer.decode(gen.cpu().numpy().tolist()[0][input_ids.shape[1]:])
return text
def get_machine_metric_datas(model,args,tokenizer):
'''
生成机器指标(bleu,gleu,rouge)所需的数据
A_ori->B_gen
A_ori,B_ori->C_gen
A_ori,B_ori,C_ori->D_gen
最后输出
B_ori,B_gen
C_ori,C_gen
D_ori,D_gen
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
generator.device = model.device
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
for step in range(1,lines_nums):
inputs_text = single_lines[:step]
gen_text = generator('\n'.join(inputs_text),return_full_text=False,num_return_sequences=1,
max_new_tokens=64,
min_length=3,top_p=args.topp,top_k=args.topk,
repetition_penalty=args.repetition_penalty,temperature=args.temperature,pad_token_id=50256
)[0]['generated_text']
# gen_text_tok = generate_text_by_input(args,text, history, model, tokenizer)
# gen_text = return_seq.replace(inputs_text,'')
print(gen_text)
ori_text = single_lines[step]
results.append({'ori':ori_text,'gen':gen_text})
data_file_path = os.path.join(args.save_samples_path,'machine_metric_data.json')
io.open(data_file_path,'w').write(json.dumps(results,ensure_ascii=False, indent=4))
def generate_human_check_datas(model,args,tokenizer):
'''
生成篇章的方法
pre_data(:10)->A_gen
pre_data(:10)+A_gen->B_gen
pre_data(:10)+A_gen+B_gen->C_gen
最后输出
pre_data(:10)
A_gen
B_gen
C_gen
...
'''
pass
def interact(args,samples_file,model,tokenizer):
history = []
print('开始和chatbot聊天,输入CTRL + Z以退出')
while True:
try:
text = input("user:")
# text = "你好"
if args.save_samples_path:
samples_file.write("user:{}\n".format(text))
text = generate_text_by_input(args,text,history,model,tokenizer)
print("chatbot:" + "".join(text))
if args.save_samples_path:
samples_file.write("chatbot:{}\n".format("".join(text)))
except KeyboardInterrupt:
if args.save_samples_path:
samples_file.close()
break
def main():
args = set_args()
set_random_seed(args)
logger = create_logger(args)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
logger.info('using device:{}'.format(device))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# tokenizer = BertTokenizer(vocab_file=args.voca_path)
model = GPT2LMHeadModel.from_pretrained(args.model_path)
tokenizer = GPT2Tokenizer.from_pretrained(args.model_path)
model = model.to(device)
model.eval()
if args.save_samples_path:
if not os.path.exists(args.save_samples_path):
os.makedirs(args.save_samples_path)
samples_file = open(args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))
# 存储聊天记录,每个utterance以token的id的形式进行存储
get_machine_metric_datas(model,args,tokenizer)
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/pangu-a/inference.py | src/pangu-a/inference.py | '''
Author: anon
Date: 2022-02-07 20:18:18
LastEditors: anon
LastEditTime: 2022-02-10 16:09:11
FilePath: /crosstalk-generation/src/cpm/inference.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
from transformers import TextGenerationPipeline, AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/Pangu-a",trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/Pangu-a",trust_remote_code=True)
text_generator = TextGenerationPipeline(model, tokenizer)
text = text_generator('0:大家好。\n1:今天我俩来给大家拜个年。', max_length=64, do_sample=True, top_p=0.9)
print(text)
# conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
# conversation_1 = Conversation("<cls>今天我俩来给大家说段相声<sep>")
# print() | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/pangu-a/configuration_gptpangu.py | src/pangu-a/configuration_gptpangu.py | from transformers.configuration_utils import PretrainedConfig
class GPTPanguConfig(PretrainedConfig):
model_type = "gpt_pangu"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=40000,
max_position_embeddings=1024,
hidden_size=2560,
intermediate_size=None,
num_layers=32,
num_heads=32,
activation_function="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
scale_attn_weights=True,
initializer_range=0.02,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
use_cache=True,
bos_token_id=9,
eos_token_id=9,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_layers = num_layers
self.num_heads = num_heads
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.scale_attn_weights = scale_attn_weights
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.use_cache = use_cache
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/pangu-a/modeling_gptpangu.py | src/pangu-a/modeling_gptpangu.py | """PyTorch PanguAlpha GPT2 Model"""
# from .configuration_gptpangu import GPTPanguConfig
from typing import Tuple
import math
import torch
from torch import nn
from transformers.activations import ACT2FN
from transformers.modeling_utils import PreTrainedModel
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from transformers.utils import logging
logger = logging.get_logger(__name__)
class GPTPanguAttention(nn.Module):
def __init__(self, config):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.c_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
custom_query=None,
use_cache=False,
output_attentions=False,
):
query = self.q_proj(custom_query) if custom_query is not None else self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
class GPTPanguMLP(nn.Module):
def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * hidden_size
super().__init__()
embed_dim = config.hidden_size
self.c_fc = nn.Linear(embed_dim, intermediate_size)
self.c_proj = nn.Linear(intermediate_size, embed_dim)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class GPTPanguBlock(nn.Module):
def __init__(self, config):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPTPanguAttention(config)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = GPTPanguMLP(inner_dim, config)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
custom_query=None,
use_cache=False,
output_attentions=False,
):
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
custom_query=custom_query,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class GPTPanguPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
# config_class = GPTPanguConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear,)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if "c_proj" in name and "weight" in name:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_layers)))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, GPTPanguModel):
module.gradient_checkpointing = value
class GPTPanguModel(GPTPanguPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.wqe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPTPanguBlock(config) for _ in range(config.num_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x num_heads x N x N
# head_mask has shape n_layer x batch x num_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
# top attention custom query
last_layer_id = len(self.h) - 1
query_embeds = self.wqe(position_ids)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Final LayerNorm before last query layer
if i == last_layer_id:
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states=hidden_states,
layer_past=None,
attention_mask=attention_mask,
head_mask=head_mask[i],
# custom query
custom_query=query_embeds if i == last_layer_id else None,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
# custom query
custom_query=query_embeds if i == last_layer_id else None,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class GPTPanguForCausalLM(GPTPanguPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = GPTPanguModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/pangu-a/generate_eval_data.py | src/pangu-a/generate_eval_data.py | '''
Author: anon
Date: 2022-02-08 16:12:50
LastEditors: anon
LastEditTime: 2022-02-10 17:07:36
FilePath: /crosstalk-generation/src/cpm/generate_eval_data.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import transformers
import torch
import os
import json
import random
import numpy as np
import argparse
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
import logging
from transformers import GPT2TokenizerFast, GPT2LMHeadModel, GPT2Config
from transformers import BertTokenizerFast
# from transformers import BertTokenizer
from os.path import join, exists
from itertools import zip_longest, chain
# from chatbot.model import DialogueGPT2Model
from dataset import MyDataset
from torch.utils.data import Dataset, DataLoader
from torch.nn import CrossEntropyLoss
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import io
from transformers import TextGenerationPipeline, AutoTokenizer, AutoModelForCausalLM
from tokenization_gptpangu import GPTPanguTokenizer
from modeling_gptpangu import GPTPanguForCausalLM
def set_args():
"""
Sets up the arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--temperature', default=1.4, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0.91, type=float, required=False, help='最高积累概率')
parser.add_argument('--log_path', default='/data1/anon/crosstalk-generation/src/pangu-a/logs/interact.log', type=str, required=False, help='interact日志存放位置')
parser.add_argument('--vocab_path', default='/data1/anon/crosstalk-generation/pretrain_model/Pangu-a', type=str, required=False, help='选择词库')
parser.add_argument('--model_path', default='/data1/anon/crosstalk-generation/pretrain_model/Pangu-a', type=str, required=False, help='对话模型路径')
parser.add_argument('--test_filter_data', default="/data1/anon/crosstalk-generation/src/common_data/test_filter_50x20.txt", type=str, required=False, help="数据基准文件,n个篇章,每个篇章20行")
parser.add_argument('--save_samples_path', default="/data1/anon/crosstalk-generation/src/pangu-a/sample/", type=str, required=False, help="保存跑机器指标数据的文件路径及保存篇章级生成的文件路径")
parser.add_argument('--repetition_penalty', default=2.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--seed', type=int, default=1234, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--seq_max_len', type=int, default=256, help='最大输入长度')
parser.add_argument('--max_history_len', type=int, default=20, help="dialogue history的最大长度")
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
return parser.parse_args()
def set_random_seed(args):
"""
设置训练的随机种子
"""
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocab size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def generate_text_by_input(args,text,history,model,tokenizer):
history.append(text)
history_contents = history
history = [tokenizer.encode(i + '\n',add_special_tokens=False) for i in history]
# input_ids = [tokenizer.cls_token_id]
input_ids = []
history_start_index = 0
filter_history_sent_ids = []
for rev_idx in range(len(history)-1,-1,-1):
# this_turn_ids = history[rev_idx][:args.utterance_max_len] + [tokenizer.sep_token_id]
this_turn_ids = history[rev_idx][:args.utterance_max_len]
if history_start_index + len(this_turn_ids) > args.seq_max_len:
break
filter_history_sent_ids.append(this_turn_ids)
history_start_index += len(this_turn_ids)
filter_history_sent_ids.reverse()
for sent_ids in filter_history_sent_ids:
input_ids.extend(sent_ids)
# input_ids.append(tokenizer.cls_token_id)
input_ids = torch.tensor(input_ids).long().to(model.device)
input_ids = input_ids.unsqueeze(0)
response = [] # 根据context,生成的response
# 最多生成max_len个token
gen = model.generate(input_ids, max_new_tokens=args.utterance_max_len, do_sample=True, min_length=input_ids.shape[1] + 3,pad_token_id=9,
top_p=args.topp,top_k=args.topk,
repetition_penalty=args.repetition_penalty,temperature=args.temperature)
raw = tokenizer.decode(tokenizer.decode(gen.cpu().numpy().tolist()[0][input_ids.shape[1]:]))
res_text = raw.strip().split('\n')[0].replace('<eot>','')
if len(res_text) == 0:
print()
history.append(res_text)
return res_text
def generate_text_by_input_and_gen(args,text,history,text_generator):
pass
def get_machine_metric_datas(model,args,tokenizer):
'''
生成机器指标(bleu,gleu,rouge)所需的数据
A_ori->B_gen
A_ori,B_ori->C_gen
A_ori,B_ori,C_ori->D_gen
最后输出
B_ori,B_gen
C_ori,C_gen
D_ori,D_gen
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
for step in range(1,lines_nums):
inputs_text = single_lines[:step]
history = inputs_text[:-1]
text = inputs_text[-1]
gen_text_tok = generate_text_by_input(args,text, history, model, tokenizer)
gen_text = "".join(gen_text_tok)
print(gen_text)
ori_text = single_lines[step]
results.append({'ori':ori_text,'gen':gen_text})
data_file_path = os.path.join(args.save_samples_path,'machine_metric_data.json')
io.open(data_file_path,'w').write(json.dumps(results,ensure_ascii=False, indent=4))
def generate_human_check_datas(model,args,tokenizer):
'''
生成篇章的方法
pre_data(:10)->A_gen
pre_data(:10)+A_gen->B_gen
pre_data(:10)+A_gen+B_gen->C_gen
最后输出
pre_data(:10)
A_gen
B_gen
C_gen
...
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
# text_generator = TextGenerationPipeline(model, tokenizer,device=0)
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
history_all = single_lines[:10]
for step in range(10,lines_nums):
history = history_all[:-1]
text = history_all[-1]
gen_text_tok = generate_text_by_input(args,text, history, model,tokenizer)
gen_text = "".join(gen_text_tok)
# print(gen_text)
# ori_text = single_lines[step]
history_all.append(gen_text)
print(history_all)
results.append('\n'.join(history_all))
data_file_path = os.path.join(args.save_samples_path,'pangu_turn_ep100_2_rp_tp_5.txt')
io.open(data_file_path,'w').write('\n\n'.join(results))
return data_file_path
def interact(args,samples_file,model,tokenizer):
history = []
print('开始和chatbot聊天,输入CTRL + Z以退出')
while True:
try:
text = input("user:")
# text = "你好"
if args.save_samples_path:
samples_file.write("user:{}\n".format(text))
text = generate_text_by_input(args,text,history,model,tokenizer)
print("chatbot:" + "".join(text))
if args.save_samples_path:
samples_file.write("chatbot:{}\n".format("".join(text)))
except KeyboardInterrupt:
if args.save_samples_path:
samples_file.close()
break
def main():
args = set_args()
set_random_seed(args)
logger = create_logger(args)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
logger.info('using device:{}'.format(device))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# tokenizer = BertTokenizer(vocab_file=args.voca_path)
# model = GPT2LMHeadModel.from_pretrained(args.model_path)
# tokenizer = BertTokenizerFast(vocab_file=args.vocab_path, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
tokenizer = GPTPanguTokenizer.from_pretrained(args.vocab_path,trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(args.model_path,trust_remote_code=True)
model = model.to(device)
model.eval()
if args.save_samples_path:
if not os.path.exists(args.save_samples_path):
os.makedirs(args.save_samples_path)
samples_file = open(args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))
# 存储聊天记录,每个utterance以token的id的形式进行存储
# get_machine_metric_datas(model,args,tokenizer)
# interact(args,samples_file,model,tokenizer)
generate_human_check_datas(model,args,tokenizer)
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/pangu-a/dataset.py | src/pangu-a/dataset.py | from torch.utils.data import Dataset
import torch
class MyDataset(Dataset):
"""
"""
def __init__(self, input_list, max_len):
self.input_list = input_list
self.max_len = max_len
def __getitem__(self, index):
input_ids = self.input_list[index]
input_ids = input_ids[:self.max_len]
input_ids = torch.tensor(input_ids, dtype=torch.long)
return input_ids
def __len__(self):
return len(self.input_list)
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/pangu-a/tokenization_gptpangu.py | src/pangu-a/tokenization_gptpangu.py | from transformers.tokenization_utils import PreTrainedTokenizer
import torch
import sentencepiece
import jieba
class GPTPanguTokenizer(PreTrainedTokenizer):
# Ref: https://git.openi.org.cn/PCL-Platform.Intelligence/PanGu-Alpha/src/branch/master/tokenization_jieba.py
vocab_files_names = {
"model_file": "vocab.model"
}
def __init__(
self,
model_file,
**kwargs
):
super().__init__()
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.Load(model_file=model_file)
self.translator = str.maketrans(" \n", "\u2582\u2583")
# special token ids
self.eos_token_id = self.sp.piece_to_id("<eot>")
def tokenize(self, text, **kwargs):
""" Tokenize a string. """
seg_list = [x.translate(self.translator) for x in jieba.cut(text, cut_all=False)]
new_seg = " ".join(seg_list)
return self.sp.encode(new_seg)
def convert_tokens_to_ids(self, tokens):
return tokens
def convert_ids_to_tokens(self, ids):
return self.decode(ids)
def decode(self, tokens, **kwargs):
if isinstance(tokens, torch.Tensor):
tokens = tokens.tolist()
text = self.sp.decode(tokens)
text = text.replace(' ', '').replace('\u2582', ' ').replace('\u2583', '\n')
return text
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/interact.py | src/gpt/interact.py | import transformers
import torch
import os
import json
import random
import numpy as np
import argparse
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
import logging
from transformers import GPT2TokenizerFast, GPT2LMHeadModel, GPT2Config
from transformers import BertTokenizerFast
# from transformers import BertTokenizer
from os.path import join, exists
from itertools import zip_longest, chain
# from chatbot.model import DialogueGPT2Model
from dataset import MyDataset
from torch.utils.data import Dataset, DataLoader
from torch.nn import CrossEntropyLoss
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
PAD = '[PAD]'
pad_id = 0
def set_args():
"""
Sets up the arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
# parser.add_argument('--model_config', default='config/model_config_dialogue_small.json', type=str, required=False,
# help='模型参数')
parser.add_argument('--log_path', default='/data1/anon/crosstalk-generation/src/gpt/logs/interact.log', type=str, required=False, help='interact日志存放位置')
parser.add_argument('--vocab_path', default='/data1/anon/crosstalk-generation/pretrain_model/CDial-GPT_LCCC-base/vocab.txt', type=str, required=False, help='选择词库')
parser.add_argument('--model_path', default='/data1/anon/crosstalk-generation/trained_model_dir/gpt/ml-256-seed-1234/epoch100', type=str, required=False, help='对话模型路径')
parser.add_argument('--save_samples_path', default="sample/", type=str, required=False, help="保存聊天记录的文件路径")
parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
# parser.add_argument('--seed', type=int, default=None, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--max_history_len', type=int, default=3, help="dialogue history的最大长度")
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
return parser.parse_args()
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocab size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def generate_text_by_input(args,text,history,model):
tokenizer = BertTokenizerFast(vocab_file=args.vocab_path, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
text_ids = tokenizer.encode(text, add_special_tokens=False)
history.append(text_ids)
input_ids = [tokenizer.cls_token_id] # 每个input以[CLS]为开头
for history_id, history_utr in enumerate(history[-args.max_history_len:]):
input_ids.extend(history_utr)
input_ids.append(tokenizer.sep_token_id)
input_ids = torch.tensor(input_ids).long().to(model.device)
input_ids = input_ids.unsqueeze(0)
response = [] # 根据context,生成的response
# 最多生成max_len个token
for _ in range(args.utterance_max_len):
outputs = model(input_ids=input_ids)
logits = outputs.logits
next_token_logits = logits[0, -1, :]
# 对于已生成的结果generated中的每个token添加一个重复惩罚项,降低其生成概率
for id in set(response):
next_token_logits[id] /= args.repetition_penalty
next_token_logits = next_token_logits / args.temperature
# 对于[UNK]的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token
next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp)
# torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
if next_token == tokenizer.sep_token_id: # 遇到[SEP]则表明response生成结束
break
response.append(next_token.item())
input_ids = torch.cat((input_ids, next_token.unsqueeze(0)), dim=1)
# his_text = tokenizer.convert_ids_to_tokens(curr_input_tensor.tolist())
# print("his_text:{}".format(his_text))
history.append(response)
text = tokenizer.convert_ids_to_tokens(response)
return text
def main():
args = set_args()
logger = create_logger(args)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
logger.info('using device:{}'.format(device))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# tokenizer = BertTokenizer(vocab_file=args.voca_path)
model = GPT2LMHeadModel.from_pretrained(args.model_path)
model = model.to(device)
model.eval()
if args.save_samples_path:
if not os.path.exists(args.save_samples_path):
os.makedirs(args.save_samples_path)
samples_file = open(args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))
# 存储聊天记录,每个utterance以token的id的形式进行存储
history = []
print('开始和chatbot聊天,输入CTRL + Z以退出')
while True:
try:
text = input("user:")
# text = "你好"
if args.save_samples_path:
samples_file.write("user:{}\n".format(text))
text = generate_text_by_input(args,text,history,model)
print("chatbot:" + "".join(text))
if args.save_samples_path:
samples_file.write("chatbot:{}\n".format("".join(text)))
except KeyboardInterrupt:
if args.save_samples_path:
samples_file.close()
break
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/pytorchtools.py | src/gpt/pytorchtools.py | import numpy as np
import torch
from os.path import join
import os
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, save_path="."):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.save_path = save_path
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
# save_path = join(self.save_path, "best_model")
# if not os.path.exists(save_path):
# os.mkdir(save_path)
# model_to_save = model.module if hasattr(model, 'module') else model
# model_to_save.save_pretrained(save_path)
self.val_loss_min = val_loss
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/metrics.py | src/gpt/metrics.py | '''
Author: anon
Date: 2022-01-27 11:39:10
LastEditors: anon
LastEditTime: 2022-02-08 17:28:13
FilePath: /crosstalk-generation/src/gpt/metrics.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
from statistics import mean
from nltk import word_tokenize
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.gleu_score import corpus_gleu
from nltk.translate.meteor_score import meteor_score
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
import io,json
# import nltk
# nltk.download('wordnet')
'''
description: 计算bleu1,2,3,4的值
param {原始句} reference
param {预测句} hypothesis
return bleu1,bleu2,bleu3,bleu4
'''
def calculate_bleu_score(references, candidates):
smooth = SmoothingFunction()
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
BLEU_1 = corpus_bleu(reference, hypothesis, weights=(1, 0, 0, 0), smoothing_function=smooth.method1)
BLEU_2 = corpus_bleu(reference, hypothesis, weights=(0.5, 0.5, 0, 0), smoothing_function=smooth.method1)
BLEU_3 = corpus_bleu(reference, hypothesis, weights=(0.33, 0.33, 0.33, 0), smoothing_function=smooth.method1)
BLEU_4 = corpus_bleu(reference, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=smooth.method1)
return BLEU_1,BLEU_2,BLEU_3,BLEU_4
'''
description: 计算gleu值
param {原始句} reference
param {预测句} hypothesis
return gleu值
'''
def calculate_gleu_score(references, candidates):
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
return corpus_gleu(reference, hypothesis)
'''
description: 中文不建议使用,因为依赖了wordnet,wordnet是英文词典
param {原始句} reference
param {预测句} hypothesis
return metetor值
'''
def calculate_meteor_score(references, candidates):
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
all_meteor = []
for ref,hyp in zip(reference, hypothesis):
all_meteor.append(meteor_score(ref,hyp))
return mean(all_meteor)
'''
description: rouge值计算
param {原始句} reference
param {预测句} hypothesis
return rouge1,rouge2,rougel
'''
def calculate_rouge_score(reference, hypothesis):
rouge = Rouge()
scores = []
for ref,hyp in zip(reference,hypothesis):
scores.append(rouge.get_scores(' '.join([i for i in hyp]), ' '.join([i for i in ref])))
rouge_1 = [i[0]['rouge-1']['f'] for i in scores]
rouge_2 = [i[0]['rouge-2']['f'] for i in scores]
rouge_l = [i[0]['rouge-l']['f'] for i in scores]
return mean(rouge_1),mean(rouge_2),mean(rouge_l)
def calc_distinct_ngram(pair_list, ngram):
"""
calc_distinct_ngram
"""
pair_list = [[j for j in i] for i in pair_list]
def get_dict(tokens, ngram, gdict=None):
"""
get_dict
统计n-gram频率并用dict存储
"""
token_dict = {}
if gdict is not None:
token_dict = gdict
tlen = len(tokens)
for i in range(0, tlen - ngram + 1):
ngram_token = "".join(tokens[i:(i + ngram)])
if token_dict.get(ngram_token) is not None:
token_dict[ngram_token] += 1
else:
token_dict[ngram_token] = 1
return token_dict
ngram_total = 0.0
ngram_distinct_count = 0.0
pred_dict = {}
for predict_tokens in pair_list:
get_dict(predict_tokens, ngram, pred_dict)
for key, freq in pred_dict.items():
ngram_total += freq
ngram_distinct_count += 1
#if freq == 1:
# ngram_distinct_count += freq
return ngram_distinct_count / ngram_total
def test_demo():
references = ['我今天晚上必须回家吃饭','广东鸡翅膀,我最爱吃','天天都需要你爱']
candidates = ['晚上我要回家吃饭','最好吃的是广东鸡翅膀','啦啦啦啦要你爱']
belu_scores = calculate_bleu_score(references,candidates)
gleu_scores = calculate_gleu_score(references,candidates)
# meteor_scores = calculate_meteor_score(references,candidates)
rouge_scores = calculate_rouge_score(references,candidates)
print(belu_scores)
def cal_metrics():
machine_gen_file = '/data1/anon/crosstalk-generation/src/gpt/sample/machine_metric_data.json'
raw_text = io.open(machine_gen_file,'r').read()
data_list = json.loads(raw_text)
references = []
candidates = []
for data_item in data_list:
references.append(data_item['ori'])
candidates.append(data_item['gen'])
distinct_1 = calc_distinct_ngram(candidates,1)
distinct_2 = calc_distinct_ngram(candidates,2)
belu_scores = calculate_bleu_score(references,candidates)
gleu_scores = calculate_gleu_score(references,candidates)
rouge_scores = calculate_rouge_score(references,candidates)
result = {
'bleu_1':belu_scores[0] * 100,
'bleu_2':belu_scores[1] * 100,
'bleu_3':belu_scores[2] * 100,
'bleu_4':belu_scores[3] * 100,
'gleu':gleu_scores * 100,
'rouge_1':rouge_scores[0] * 100,
'rouge_2':rouge_scores[1] * 100,
'rouge_l':rouge_scores[2] * 100,
'distinct_1':distinct_1 * 100,
'distinct_2':distinct_2 * 100
}
return result
if __name__ == '__main__':
print(cal_metrics())
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/train.py | src/gpt/train.py | import argparse
from lib2to3.pgen2 import token
import math
import time
from urllib import response
import torch
import torch.nn.functional as F
import torch.optim as optim
import logging
from datetime import datetime
import os
from torch.utils.data import Dataset, DataLoader
from os.path import join, exists
from torch.nn import CrossEntropyLoss
from tqdm import tqdm
from torch.nn import DataParallel
import transformers
import pickle
import sys,random
from pytorchtools import EarlyStopping
from sklearn.model_selection import train_test_split
from data_parallel import BalancedDataParallel
from transformers import GPT2TokenizerFast, GPT2LMHeadModel, GPT2Config
from transformers import BertTokenizerFast
import pandas as pd
import torch.nn.utils.rnn as rnn_utils
import numpy as np
from dataset import MyDataset
from interact import top_k_top_p_filtering
def set_args():
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='设置使用哪些显卡')
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行训练')
parser.add_argument('--data_dir', default='/data1/anon/crosstalk-generation/src/gpt/data/p256-s64', type=str, required=False, help='数据基础路径路径')
parser.add_argument('--max_len', default=256, type=int, required=False, help='训练时,输入数据的最大长度')
parser.add_argument('--log', default=True, help="是否记录日志")
parser.add_argument('--ignore_index', default=-100, type=int, required=False, help='对于ignore_index的label token不计算梯度')
parser.add_argument('--epochs', default=100, type=int, required=False, help='训练的最大轮次')
parser.add_argument('--batch_size', default=36, type=int, required=False, help='训练的batch size')
parser.add_argument('--gpu0_bsz', default=2, type=int, required=False, help='0号卡的batch size')
parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='学习率')
parser.add_argument('--eps', default=1.0e-09, type=float, required=False, help='衰减率')
parser.add_argument('--log_step', default=100, type=int, required=False, help='多少步汇报一次loss')
parser.add_argument('--gradient_accumulation_steps', default=4, type=int, required=False, help='梯度积累')
parser.add_argument('--max_grad_norm', default=2.0, type=float, required=False)
parser.add_argument('--save_model_base_path', default='/data1/anon/crosstalk-generation/trained_model_dir/gpt', type=str, required=False,
help='模型输出总路径')
parser.add_argument('--pretrained_model', default='/data1/anon/crosstalk-generation/pretrain_model/CDial-GPT_LCCC-base', type=str, required=False,
help='预训练的模型的路径')
parser.add_argument('--seed', type=int, default=1234, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--num_workers', type=int, default=0, help="dataloader加载数据时使用的线程数量")
parser.add_argument('--patience', type=int, default=0, help="用于early stopping,设为0时,不进行early stopping.early stop得到的模型的生成效果不一定会更好。")
parser.add_argument('--warmup_rate', type=float, default=0.1, help='warm up步数占总步数的比例')
# 跑机器分所需参数(belu,gelu,rouge)
parser.add_argument('--dev_cal_metric', default=False, help="是否在dev验证时计算机器指标如bleu等值,建议不用,因为此处的输出方式和我们真实使用时不一致")
parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
args = parser.parse_args()
return args
def set_random_seed(args):
"""
设置训练的随机种子
"""
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def collate_fn(batch):
input_ids = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=0)
labels = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=-100)
return input_ids, labels
# def padding_batch(data_list, pad_id):
# """
# 使用pad_id将data_list的每条数据,填充至data_list中最长的长度
# :param data_list:
# :param pad_id:
# :return:
# """
# # 统计data_list中的最大长度
# max_len = 0
# for data in data_list:
# max_len = max_len if max_len > len(data) else len(data)
#
# # 对数据进行padding
# new_data_list = []
# for data in data_list:
# new_data = data + [pad_id] * (max_len - len(data))
# new_data_list.append(new_data)
# return new_data_list
def load_dataset(logger, args):
"""
加载训练集和验证集
"""
logger.info("loading training dataset and validating dataset")
train_path = os.path.join(args.data_dir,'train.pkl')
valid_path = os.path.join(args.data_dir,'dev.pkl')
test_path = os.path.join(args.data_dir,'test.pkl')
with open(train_path, "rb") as f:
train_input_list = pickle.load(f)
with open(valid_path, "rb") as f:
valid_input_list = pickle.load(f)
with open(test_path, "rb") as f:
test_input_list = pickle.load(f)
train_dataset = MyDataset(train_input_list, args.max_len)
val_dataset = MyDataset(valid_input_list, args.max_len)
test_dataset = MyDataset(test_input_list, args.max_len)
return train_dataset, val_dataset, test_dataset
def train_epoch(model, train_dataloader, optimizer, scheduler, logger,
epoch, args):
model.train()
device = args.device
# pad_id = args.pad_id
# sep_id = args.sep_id
ignore_index = args.ignore_index
epoch_start_time = datetime.now()
total_loss = 0 # 记录下整个epoch的loss的总和
# epoch_correct_num:每个epoch中,output预测正确的word的数量
# epoch_total_num: 每个epoch中,output预测的word的总数量
epoch_correct_num, epoch_total_num = 0, 0
for batch_idx, (input_ids, labels) in enumerate(train_dataloader):
# 捕获cuda out of memory exception
try:
input_ids = input_ids.to(device)
labels = labels.to(device)
outputs = model.forward(input_ids, labels=labels)
logits = outputs.logits
loss = outputs.loss
loss = loss.mean()
# 统计该batch的预测token的正确数与总数
batch_correct_num, batch_total_num = calculate_acc(logits, labels, ignore_index=ignore_index)
# 统计该epoch的预测token的正确数与总数
epoch_correct_num += batch_correct_num
epoch_total_num += batch_total_num
# 计算该batch的accuracy
batch_acc = batch_correct_num / batch_total_num
total_loss += loss.item()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
# 梯度裁剪
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# 进行一定step的梯度累计之后,更新参数
if (batch_idx + 1) % args.gradient_accumulation_steps == 0:
# 更新参数
optimizer.step()
# 更新学习率
scheduler.step()
# 清空梯度信息
optimizer.zero_grad()
if (batch_idx + 1) % args.log_step == 0:
logger.info(
"batch {} of epoch {}, loss {}, batch_acc {}, lr {}".format(
batch_idx + 1, epoch + 1, loss.item() * args.gradient_accumulation_steps, batch_acc, scheduler.get_lr()))
del input_ids, outputs
except RuntimeError as exception:
if "out of memory" in str(exception):
logger.info("WARNING: ran out of memory")
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
logger.info(str(exception))
raise exception
# 记录当前epoch的平均loss与accuracy
epoch_mean_loss = total_loss / len(train_dataloader)
epoch_mean_acc = epoch_correct_num / epoch_total_num
logger.info(
"epoch {}: loss {}, predict_acc {}".format(epoch + 1, epoch_mean_loss, epoch_mean_acc))
# save model
logger.info('saving model for epoch {}'.format(epoch + 1))
model_path = join(args.save_model_path, 'epoch{}'.format(epoch + 1))
if not os.path.exists(model_path):
os.mkdir(model_path)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(model_path)
logger.info('epoch {} finished'.format(epoch + 1))
epoch_finish_time = datetime.now()
logger.info('time for one epoch: {}'.format(epoch_finish_time - epoch_start_time))
return epoch_mean_loss
def change_batch_logit2content(args,logits,labels):
tokenizer = BertTokenizerFast(vocab_file=os.path.join(args.pretrained_model,'vocab.txt'), sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
labels[labels==-100] = 0
text_labels = tokenizer.batch_decode(labels)
batch_infer = []
batch_label = []
for bs_idx in range(args.batch_size):
single_logit = logits[bs_idx]
single_label = text_labels[bs_idx]
response = []
for tok_idx in range(single_logit.shape[0]):
next_token_logits = single_logit[tok_idx]
# 对于已生成的结果generated中的每个token添加一个重复惩罚项,降低其生成概率
for id in set(response):
if id in [tokenizer.sep_token_id,tokenizer.cls_token_id]:
continue
next_token_logits[id] /= args.repetition_penalty
next_token_logits = next_token_logits / args.temperature
# 对于[UNK]的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token
next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp)
# torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
response.append(next_token)
single_infer = tokenizer.convert_ids_to_tokens(response)
batch_infer.append(''.join([i for i in single_infer if i!='[SEP]']))
batch_label.append(''.join([i for i in single_label.split() if i not in ['[CLS]','[SEP]']]))
return batch_infer,batch_label
def validate_epoch(model, validate_dataloader, logger, epoch, args):
logger.info("start validating")
model.eval()
device = args.device
# pad_id = args.pad_id
# sep_id = args.sep_id
ignore_index = args.ignore_index
epoch_start_time = datetime.now()
total_loss = 0
all_inter = []
all_label = []
# 捕获cuda out of memory exception
try:
with torch.no_grad():
for batch_idx, (input_ids, labels) in enumerate(validate_dataloader):
input_ids = input_ids.to(device)
labels = labels.to(device)
outputs = model.forward(input_ids, labels=labels)
logits = outputs.logits
loss = outputs.loss
loss = loss.mean()
if args.dev_cal_metric:
batch_infer,batch_label = change_batch_logit2content(args,logits,labels)
all_inter.extend(batch_infer)
all_label.extend(batch_label)
total_loss += loss.item()
del input_ids, outputs
# 记录当前epoch的平均loss
epoch_mean_loss = total_loss / len(validate_dataloader)
logger.info(
"validate epoch {}: loss {}, ppl {}".format(epoch+1, epoch_mean_loss, np.exp(epoch_mean_loss)))
epoch_finish_time = datetime.now()
logger.info('time for validating one epoch: {}'.format(epoch_finish_time - epoch_start_time))
return epoch_mean_loss
except RuntimeError as exception:
if "out of memory" in str(exception):
logger.info("WARNING: ran out of memory")
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
logger.info(str(exception))
raise exception
def train(model, logger, train_dataset, validate_dataset, args):
train_dataloader = DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collate_fn,
drop_last=True
)
validate_dataloader = DataLoader(validate_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, collate_fn=collate_fn, drop_last=True)
early_stopping = EarlyStopping(args.patience, verbose=True, save_path=args.save_model_path)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.epochs
optimizer = transformers.AdamW(model.parameters(), lr=args.lr, eps=args.eps)
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=int(args.warmup_rate * t_total), num_training_steps=t_total
)
logger.info('starting training')
# 用于记录每个epoch训练和验证的loss
train_losses, validate_losses = [], []
# 记录验证集的最小loss
best_val_loss = 10000
# 开始训练
for epoch in range(args.epochs):
# ========== train ========== #
train_loss = train_epoch(
model=model, train_dataloader=train_dataloader,
optimizer=optimizer, scheduler=scheduler,
logger=logger, epoch=epoch, args=args)
train_losses.append(train_loss)
# ========== validate ========== #
validate_loss = validate_epoch(
model=model, validate_dataloader=validate_dataloader,
logger=logger, epoch=epoch, args=args)
validate_losses.append(validate_loss)
# 保存当前困惑度最低的模型,困惑度低,模型的生成效果不一定会越好
if validate_loss < best_val_loss:
best_val_loss = validate_loss
logger.info('saving current best model for epoch {}'.format(epoch + 1))
model_path = join(args.save_model_path, 'min_ppl_model'.format(epoch + 1))
if not os.path.exists(model_path):
os.mkdir(model_path)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(model_path)
# 如果patience=0,则不进行early stopping
if args.patience == 0:
continue
early_stopping(validate_loss, model)
if early_stopping.early_stop:
logger.info("Early stopping")
break
logger.info('training finished')
logger.info("train_losses:{}".format(train_losses))
logger.info("validate_losses:{}".format(validate_losses))
def caculate_loss(logit, target, pad_idx, smoothing=True):
if smoothing:
logit = logit[..., :-1, :].contiguous().view(-1, logit.size(2))
target = target[..., 1:].contiguous().view(-1)
eps = 0.1
n_class = logit.size(-1)
one_hot = torch.zeros_like(logit).scatter(1, target.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(logit, dim=1)
non_pad_mask = target.ne(pad_idx)
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).mean() # average later
else:
# loss = F.cross_entropy(predict_logit, target, ignore_index=pad_idx)
logit = logit[..., :-1, :].contiguous().view(-1, logit.size(-1))
labels = target[..., 1:].contiguous().view(-1)
loss = F.cross_entropy(logit, labels, ignore_index=pad_idx)
return loss
def calculate_acc(logit, labels, ignore_index=-100):
logit = logit[..., :-1, :].contiguous().view(-1, logit.size(-1))
labels = labels[..., 1:].contiguous().view(-1)
_, logit = logit.max(dim=-1) # 对于每条数据,返回最大的index
# 进行非运算,返回一个tensor,若labels的第i个位置为pad_id,则置为0,否则为1
non_pad_mask = labels.ne(ignore_index)
n_correct = logit.eq(labels).masked_select(non_pad_mask).sum().item()
n_word = non_pad_mask.sum().item()
return n_correct, n_word
def main():
# 初始化参数
args = set_args()
# 设置使用哪些显卡进行训练
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
args.cuda = not args.no_cuda and torch.cuda.is_available()
# 创建模型的输出目录
args.save_model_path = os.path.join(args.save_model_base_path,f'ml-{args.max_len}-seed-{args.seed}')
args.log_path = os.path.join(args.save_model_base_path,'train.logs')
if not os.path.exists(args.save_model_path):
os.makedirs(args.save_model_path)
# 创建日志对象
logger = create_logger(args)
logger.info('#' * 30)
logger.info(args)
logger.info('#' * 30)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
args.device = device
logger.info('using device:{}'.format(device))
# 初始化tokenizer
tokenizer = BertTokenizerFast(vocab_file=os.path.join(args.pretrained_model,'vocab.txt'), sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
args.sep_id = tokenizer.sep_token_id
args.pad_id = tokenizer.pad_token_id
args.cls_id = tokenizer.cls_token_id
# 创建模型
model = GPT2LMHeadModel.from_pretrained(args.pretrained_model)
model = model.to(device)
logger.info('model config:\n{}'.format(model.config.to_json_string()))
assert model.config.vocab_size == tokenizer.vocab_size
# 并行训练模型
if args.cuda and torch.cuda.device_count() > 1:
if args.batch_size != args.gpu0_bsz:
model = BalancedDataParallel(args.gpu0_bsz, model, dim=0).cuda()
else:
model = DataParallel(model).cuda()
logger.info("use GPU {} to train".format(args.device))
# 计算模型参数数量
num_parameters = 0
parameters = model.parameters()
for parameter in parameters:
num_parameters += parameter.numel()
logger.info('number of model parameters: {}'.format(num_parameters))
# 记录参数设置
logger.info("args:{}".format(args))
# 加载训练集和验证集
# ========= Loading Dataset ========= #
train_dataset, val_dataset, test_dataset = load_dataset(logger, args)
train(model, logger, train_dataset, val_dataset, args)
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/draw_loss.py | src/gpt/draw_loss.py | import numpy as np
import matplotlib.pyplot as plt
train_loss = [44.48271321031384, 6.583550890944058, 5.2243800984289415, 4.793724752727308, 4.509950302328382, 4.293438681534359, 4.118832167109153, 3.9655399139662433, 3.8359335477190806, 3.7205823993324336, 3.6183518121117038, 3.529470994508356, 3.454126060367527, 3.3876399215002704, 3.327385791979338, 3.2738280115270975, 3.2241040730834905, 3.1793050898645157, 3.1380796483584814, 3.097194073881422, 3.058997189281578, 3.0224359229095, 2.989321135847192, 2.956893386876673, 2.925039403779166, 2.8960848723139083, 2.8673695635078547, 2.838541273156503, 2.812697682255193, 2.7858682384168296, 2.760819730185028, 2.7354322604666974, 2.7120282050362206, 2.688629031988015, 2.666998180500547, 2.6451193193744, 2.6239456789834157, 2.603371260578471, 2.5841847509369815, 2.565063550597743, 2.54530718102491, 2.527386322415861, 2.510184377118161, 2.4923996164386435, 2.4756604128762296, 2.459907821246556, 2.442924183651917, 2.426776533825953, 2.411911290211785, 2.3977591939438554, 2.383567075801075, 2.3692382172982493, 2.3537363544890755, 2.3426682418898532, 2.3303275399190144, 2.315959686444218, 2.30516399629134, 2.293007003140629, 2.2806038615846993, 2.2688294145397676, 2.259110706417184, 2.2488786947906467, 2.2375688153101985, 2.227671702463824, 2.217756683709926, 2.208003384995281, 2.1991461373809584, 2.191128186623853, 2.1806941671927174, 2.172695298436889, 2.1649881235638957, 2.1569865969338813, 2.148207969056036, 2.1422095429628416, 2.1350791305079495, 2.1268079251275025, 2.120990522060179, 2.1148893516762812, 2.107709726803285, 2.1024627320300366, 2.096419497584938, 2.0913853452169806, 2.085944129067256, 2.0798576306579704, 2.077440956615864, 2.071507751762419, 2.0658729361860377, 2.063466019558727, 2.0598251012034883, 2.0547408370147076, 2.051986127733288, 2.049203844536516, 2.0468371813906763, 2.043903685153875, 2.0411412049505047, 2.0387904661490506, 2.0368946077232075, 2.0359663403571995, 2.0344403394182824, 2.0328974296275835]
valid_loss = [6.632119738052938, 4.592151895119023, 4.284870942433675, 4.1230103282542245, 4.030122367763816, 3.9413269297727545, 3.895269082714093, 3.8443484729695543, 3.8023344192920816, 3.781304602682405, 3.753471881058357, 3.7412116174757295, 3.730542713979323, 3.725996570423756, 3.716093263150747, 3.7090776037204303, 3.7108728900505374, 3.713725692015199, 3.7089336805997237, 3.7278165965808143, 3.719184803071423, 3.731867850012497, 3.7419109656431964, 3.753017064195556, 3.758844084457445, 3.7659045639810533, 3.7814736303139327, 3.7981787303526455, 3.80679324631379, 3.817094279969593, 3.8320516451868314, 3.8536363416743056, 3.8638582586128023, 3.8662699947476016, 3.8776759202606583, 3.8897769759377216, 3.9205473858248037, 3.9219261240736345, 3.9313435045729546, 3.9452734907096794, 3.9682855379544315, 3.9751413908331563, 3.983424531336514, 3.9876989508715, 4.0045680049052494, 4.033595425688961, 4.0295334879854385, 4.054913828306109, 4.058664200461913, 4.07562237253813, 4.095141318728248, 4.103051297018461, 4.1247230697643715, 4.131437508859367, 4.146622050217007, 4.158886610904586, 4.184614694007089, 4.1859706075763405, 4.188921441170286, 4.206857152817034, 4.228782836893266, 4.224493684427018, 4.237151153733797, 4.247350576510682, 4.235146184576634, 4.2600116291521495, 4.268260229042386, 4.295261158750065, 4.290834169150141, 4.300445780947201, 4.309303197905282, 4.3218370568343785, 4.333605740300592, 4.338023634714501, 4.359051271391063, 4.346706262258726, 4.375257317150865, 4.376839564225384, 4.384927596629966, 4.391139392912202, 4.396961501825635, 4.409217323840965, 4.4099885875190905, 4.426319476228636, 4.4286229317062, 4.4389782397546504, 4.436613840477489, 4.434759803650164, 4.448284151398133, 4.447159450373546, 4.455189101049833, 4.451785619756515, 4.465889912156673, 4.465894921917782, 4.460803010753382, 4.465916545591622, 4.471542153774392, 4.4720834735026616, 4.473313391394333, 4.477410685607578]
fig = plt.figure()
x = np.arange(len(train_loss))
yerr = np.linspace(0.05, 0.2, len(train_loss))
plt.plot(x, train_loss, label='train-loss')
plt.plot(x, valid_loss, label='valid-loss')
plt.legend(loc='lower right')
plt.savefig('gpt_loss.png') | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/generate_dialogue_subset.py | src/gpt/generate_dialogue_subset.py | import argparse
from os.path import join
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator
def generate_subset():
"""
用于生成训练子集
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--raw_data_path', default='data/train.txt', type=str, required=False, help='原始训练语料')
parser.add_argument('--subset_size', default=1000000, type=int, required=False, help='要获取的对话数据子集的规模')
parser.add_argument('--subset_data_path', default='data', type=str, required=False,
help='数据子集文件路径,指定文件的父目录')
args = parser.parse_args()
with open(args.raw_data_path, "r", encoding="utf8") as f:
data = f.read()
dialogues = data.split("\n\n")
subset_size = min(len(dialogues), args.subset_size)
with open(join(args.subset_data_path, "train_{}w.txt".format(int(subset_size / 10000))), "w", encoding="utf8") as f:
print("generating subset,please wait a few minutes")
for dialogue_index, dialogue in enumerate(dialogues):
if dialogue_index >= subset_size:
break
for utterance in dialogue.split("\n"):
f.writelines(utterance + "\n")
f.writelines("\n")
def compute_dialogue_length():
"""
查看聊天语料中的dialogue的长度分布
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--raw_data_path', default='data/train.txt', type=str, required=False, help='原始训练语料')
args = parser.parse_args()
with open(args.raw_data_path, "r", encoding="utf8") as f:
data = f.read()
dialogues = data.split("\n\n")
# 统计各个dialogue的长度
dialogues_lengths = [len(dialogue.replace("\n", "")) for dialogue in dialogues]
counter = Counter(dialogues_lengths) # {label:sum(label)}
dialogue_length_arr = list(counter)
num_arr = [counter[element] for element in list(counter)]
print(counter[300])
x_major_locator = MultipleLocator(100) # MultipleLocator用于设置刻度间隔
# y_major_locator = MultipleLocator(20000)
ax = plt.gca() # ax为两条坐标轴的实例
ax.xaxis.set_major_locator(x_major_locator) # 把x轴的主刻度设置为10的倍数
# ax.yaxis.set_major_locator(y_major_locator)
plt.xlabel('dialogue length')
plt.ylabel('number of dialogue')
# plt.plot(dialogue_length_arr, num_arr, c='green')
plt.scatter(dialogue_length_arr, num_arr)
plt.show()
if __name__ == '__main__':
generate_subset()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/data_parallel.py | src/gpt/data_parallel.py | from torch.nn.parallel import DataParallel
import torch
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel.parallel_apply import parallel_apply
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
# print('len(inputs)1: ', str(len(inputs)))
# print('self.device_ids[:len(inputs)]', str(self.device_ids[:len(inputs)]))
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids[:len(inputs)])
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
# print('bsz: ', bsz)
# print('num_dev: ', num_dev)
# print('gpu0_bsz: ', gpu0_bsz)
# print('bsz_unit: ', bsz_unit)
# print('chunk_sizes: ', chunk_sizes)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/generate_testdatasets.py | src/gpt/generate_testdatasets.py | import transformers
import torch
import os
import json
import random
import numpy as np
import argparse
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
import logging
from transformers import GPT2TokenizerFast, GPT2LMHeadModel, GPT2Config
from transformers import BertTokenizerFast
# from transformers import BertTokenizer
from os.path import join, exists
from itertools import zip_longest, chain
# from chatbot.model import DialogueGPT2Model
from dataset import MyDataset
from torch.utils.data import Dataset, DataLoader
from torch.nn import CrossEntropyLoss
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
PAD = '[PAD]'
pad_id = 0
def set_args():
"""
Sets up the arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--test_path', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
# parser.add_argument('--model_config', default='config/model_config_dialogue_small.json', type=str, required=False,
# help='模型参数')
parser.add_argument('--log_path', default='/data1/anon/crosstalk-generation/src/gpt/logs/interact.log', type=str, required=False, help='interact日志存放位置')
parser.add_argument('--vocab_path', default='/data1/anon/crosstalk-generation/pretrain_model/CDial-GPT_LCCC-base/vocab.txt', type=str, required=False, help='选择词库')
parser.add_argument('--model_path', default='/data1/anon/crosstalk-generation/trained_model_dir/gpt/ml-256-seed-1234/min_ppl_model', type=str, required=False, help='对话模型路径')
parser.add_argument('--save_samples_path', default="sample/", type=str, required=False, help="保存聊天记录的文件路径")
parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
# parser.add_argument('--seed', type=int, default=None, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--max_history_len', type=int, default=3, help="dialogue history的最大长度")
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
return parser.parse_args()
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocab size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def generate_text_by_input(args,text,history,model):
tokenizer = BertTokenizerFast(vocab_file=args.vocab_path, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
text_ids = tokenizer.encode(text, add_special_tokens=False)
history.append(text_ids)
input_ids = [tokenizer.cls_token_id] # 每个input以[CLS]为开头
for history_id, history_utr in enumerate(history[-args.max_history_len:]):
input_ids.extend(history_utr)
input_ids.append(tokenizer.sep_token_id)
input_ids = torch.tensor(input_ids).long().to(model.device)
input_ids = input_ids.unsqueeze(0)
response = [] # 根据context,生成的response
# 最多生成max_len个token
for _ in range(args.utterance_max_len):
outputs = model(input_ids=input_ids)
logits = outputs.logits
next_token_logits = logits[0, -1, :]
# 对于已生成的结果generated中的每个token添加一个重复惩罚项,降低其生成概率
for id in set(response):
next_token_logits[id] /= args.repetition_penalty
next_token_logits = next_token_logits / args.temperature
# 对于[UNK]的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token
next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp)
# torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
if next_token == tokenizer.sep_token_id: # 遇到[SEP]则表明response生成结束
break
response.append(next_token.item())
input_ids = torch.cat((input_ids, next_token.unsqueeze(0)), dim=1)
# his_text = tokenizer.convert_ids_to_tokens(curr_input_tensor.tolist())
# print("his_text:{}".format(his_text))
history.append(response)
text = tokenizer.convert_ids_to_tokens(response)
return text
def main():
args = set_args()
logger = create_logger(args)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
logger.info('using device:{}'.format(device))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# tokenizer = BertTokenizer(vocab_file=args.voca_path)
model = GPT2LMHeadModel.from_pretrained(args.model_path)
model = model.to(device)
model.eval()
if args.save_samples_path:
if not os.path.exists(args.save_samples_path):
os.makedirs(args.save_samples_path)
samples_file = open(args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))
# 存储聊天记录,每个utterance以token的id的形式进行存储
history = []
print('开始和chatbot聊天,输入CTRL + Z以退出')
while True:
try:
text = input("user:")
# text = "你好"
if args.save_samples_path:
samples_file.write("user:{}\n".format(text))
text = generate_text_by_input(args,text,history,model)
print("chatbot:" + "".join(text))
if args.save_samples_path:
samples_file.write("chatbot:{}\n".format("".join(text)))
except KeyboardInterrupt:
if args.save_samples_path:
samples_file.close()
break
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/generate_eval_data.py | src/gpt/generate_eval_data.py | '''
Author: anon
Date: 2022-02-08 16:12:50
LastEditors: anon
LastEditTime: 2022-02-09 16:07:19
FilePath: /crosstalk-generation/src/gpt/generate_eval_data.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import transformers
import torch
import os
import json
import random
import numpy as np
import argparse
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
import logging
from transformers import GPT2TokenizerFast, GPT2LMHeadModel, GPT2Config
from transformers import BertTokenizerFast
# from transformers import BertTokenizer
from os.path import join, exists
from itertools import zip_longest, chain
# from chatbot.model import DialogueGPT2Model
from dataset import MyDataset
from torch.utils.data import Dataset, DataLoader
from torch.nn import CrossEntropyLoss
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import io
PAD = '[PAD]'
pad_id = 0
def set_args():
"""
Sets up the arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--log_path', default='/data1/anon/crosstalk-generation/src/gpt/logs/interact.log', type=str, required=False, help='interact日志存放位置')
parser.add_argument('--vocab_path', default='/data1/anon/crosstalk-generation/pretrain_model/CDial-GPT_LCCC-base/vocab.txt', type=str, required=False, help='选择词库')
parser.add_argument('--model_path', default='/data1/anon/crosstalk-generation/trained_model_dir/gpt/ml-256-seed-1234/epoch100', type=str, required=False, help='对话模型路径')
parser.add_argument('--test_filter_data', default="/data1/anon/crosstalk-generation/src/gpt/data/p256-s64/test_filter_50x20.txt", type=str, required=False, help="数据基准文件,n个篇章,每个篇章20行")
parser.add_argument('--save_samples_path', default="/data1/anon/crosstalk-generation/src/gpt/sample/", type=str, required=False, help="保存跑机器指标数据的文件路径及保存篇章级生成的文件路径")
parser.add_argument('--repetition_penalty', default=2.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--seed', type=int, default=1234, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--seq_max_len', type=int, default=256, help='最大输入长度')
parser.add_argument('--max_history_len', type=int, default=20, help="dialogue history的最大长度")
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
return parser.parse_args()
def set_random_seed(args):
"""
设置训练的随机种子
"""
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocab size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def generate_text_by_input(args,text,history,model,tokenizer):
history.append(text)
history = [tokenizer.encode(i, add_special_tokens=False) for i in history]
input_ids = [tokenizer.cls_token_id] # 每个input以[CLS]为开头
history_start_index = 1
filter_history_sent_ids = []
for rev_idx in range(len(history)-1,-1,-1):
this_turn_ids = history[rev_idx][:args.utterance_max_len] + [tokenizer.sep_token_id]
if history_start_index + len(this_turn_ids) > args.seq_max_len:
break
filter_history_sent_ids.append(this_turn_ids)
history_start_index += len(this_turn_ids)
filter_history_sent_ids.reverse()
for sent_ids in filter_history_sent_ids:
input_ids.extend(sent_ids)
input_ids = torch.tensor(input_ids).long().to(model.device)
input_ids = input_ids.unsqueeze(0)
response = [] # 根据context,生成的response
# 最多生成max_len个token
for idx in range(args.utterance_max_len):
outputs = model(input_ids=input_ids)
logits = outputs.logits
next_token_logits = logits[0, -1, :]
# 对于已生成的结果generated中的每个token添加一个重复惩罚项,降低其生成概率
for id in set(response):
next_token_logits[id] /= args.repetition_penalty
next_token_logits = next_token_logits / args.temperature
# 对于[UNK]的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token
next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp)
# torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
if next_token == tokenizer.sep_token_id and len(response) > 0: # 遇到[SEP]则表明response生成结束
break
if next_token == tokenizer.sep_token_id:
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=2)[1:]
response.append(next_token.item())
input_ids = torch.cat((input_ids, next_token.unsqueeze(0)), dim=1)
# his_text = tokenizer.convert_ids_to_tokens(curr_input_tensor.tolist())
# print("his_text:{}".format(his_text))
if len(response) == 0:
print('')
text = tokenizer.convert_ids_to_tokens(response)
return text
def get_machine_metric_datas(model,args,tokenizer):
'''
生成机器指标(bleu,gleu,rouge)所需的数据
A_ori->B_gen
A_ori,B_ori->C_gen
A_ori,B_ori,C_ori->D_gen
最后输出
B_ori,B_gen
C_ori,C_gen
D_ori,D_gen
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
for step in range(1,lines_nums):
inputs_text = single_lines[:step]
history = inputs_text[:-1]
text = inputs_text[-1]
gen_text_tok = generate_text_by_input(args,text, history, model, tokenizer)
gen_text = "".join(gen_text_tok)
print(gen_text)
ori_text = single_lines[step]
results.append({'ori':ori_text,'gen':gen_text})
data_file_path = os.path.join(args.save_samples_path,'machine_metric_data.json')
io.open(data_file_path,'w').write(json.dumps(results,ensure_ascii=False, indent=4))
def generate_human_check_datas(model,args,tokenizer):
'''
生成篇章的方法
pre_data(:10)->A_gen
pre_data(:10)+A_gen->B_gen
pre_data(:10)+A_gen+B_gen->C_gen
最后输出
pre_data(:10)
A_gen
B_gen
C_gen
...
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
history_all = single_lines[:10]
for step in range(10,lines_nums):
history = history_all[:-1]
text = history_all[-1]
gen_text_tok = generate_text_by_input(args,text, history, model, tokenizer)
gen_text = "".join(gen_text_tok)
# print(gen_text)
# ori_text = single_lines[step]
history_all.append(gen_text)
print(history_all)
results.append('\n'.join(history_all))
data_file_path = os.path.join(args.save_samples_path,'GPT_turn_ep100_2_rp.txt')
io.open(data_file_path,'w').write('\n\n'.join(results))
return data_file_path
def interact(args,samples_file,model,tokenizer):
history = []
print('开始和chatbot聊天,输入CTRL + Z以退出')
while True:
try:
text = input("user:")
# text = "你好"
if args.save_samples_path:
samples_file.write("user:{}\n".format(text))
text = generate_text_by_input(args,text,history,model,tokenizer)
print("chatbot:" + "".join(text))
if args.save_samples_path:
samples_file.write("chatbot:{}\n".format("".join(text)))
except KeyboardInterrupt:
if args.save_samples_path:
samples_file.close()
break
def main():
args = set_args()
set_random_seed(args)
logger = create_logger(args)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
logger.info('using device:{}'.format(device))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# tokenizer = BertTokenizer(vocab_file=args.voca_path)
model = GPT2LMHeadModel.from_pretrained(args.model_path)
tokenizer = BertTokenizerFast(vocab_file=args.vocab_path, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
model = model.to(device)
model.eval()
if args.save_samples_path:
if not os.path.exists(args.save_samples_path):
os.makedirs(args.save_samples_path)
samples_file = open(args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))
# 存储聊天记录,每个utterance以token的id的形式进行存储
get_machine_metric_datas(model,args,tokenizer)
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/dataset.py | src/gpt/dataset.py | from torch.utils.data import Dataset
import torch
class MyDataset(Dataset):
"""
"""
def __init__(self, input_list, max_len):
self.input_list = input_list
self.max_len = max_len
def __getitem__(self, index):
input_ids = self.input_list[index]
input_ids = input_ids[:self.max_len]
input_ids = torch.tensor(input_ids, dtype=torch.long)
return input_ids
def __len__(self):
return len(self.input_list)
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/__init__.py | src/gpt/__init__.py | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false | |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/preprocess.py | src/gpt/preprocess.py | from tokenizers import BertWordPieceTokenizer
from transformers import BertTokenizer
from transformers import BertTokenizerFast
import argparse
import pandas as pd
import pickle,os
import jieba.analyse
from tqdm import tqdm
from transformers import GPT2TokenizerFast, GPT2LMHeadModel
import logging
import numpy as np
def create_logger(log_path):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def reduce_data(data,logger,args,file_name):
# 初始化tokenizer
tokenizer = BertTokenizerFast(vocab_file=args.vocab_path, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
sep_id = tokenizer.sep_token_id
cls_id = tokenizer.cls_token_id
# 需要区分linux和windows环境下的换行符
if "\r\n" in data:
train_data = data.split("\r\n\r\n")
else:
train_data = data.split("\n\n")
logger.info("there are {} dialogue in dataset".format(len(train_data)))
save_file_path = os.path.join(args.save_path,file_name + '.pkl')
save_content_path = os.path.join(args.save_path,file_name + '_raw.txt')
# 开始进行tokenize
# 保存所有的对话数据,每条数据的格式为:"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"
dialogue_len = [] # 记录所有对话tokenize之后的长度,用于统计中位数与均值
dialogue_list = []
utterance_list = []
for index, dialogue in enumerate(tqdm(train_data)):
if "\r\n" in data:
utterances = dialogue.split("\r\n")
else:
utterances = dialogue.split("\n")
input_ids = [cls_id] # 每个dialogue以[CLS]开头
for utterance in utterances:
utterance_list.append(utterance)
utterance_list.append('\n')
utterance_encode = tokenizer.encode(utterance, add_special_tokens=False)
if len(utterance_encode) > args.sent_max_len -1:
utterance_encode = utterance_encode[:args.sent_max_len -1]
utterance_encode.append(sep_id) # 每个utterance之后添加[SEP],表示utterance结束
# 如果已经大于最大长度了,则新开一段
if len(input_ids) + len(utterance_encode) > args.para_max_len:
dialogue_list.append(input_ids)
dialogue_len.append(len(input_ids))
input_ids = [cls_id] # 每个dialogue以[CLS]开头
input_ids += utterance_encode
# 否则就继续拼接
else:
input_ids += utterance_encode
utterance_list.append('\n')
dialogue_len.append(len(input_ids))
dialogue_list.append(input_ids)
len_mean = np.mean(dialogue_len)
len_median = np.median(dialogue_len)
len_max = np.max(dialogue_len)
with open(save_content_path,'w') as f:
f.write(''.join(utterance_list))
with open(save_file_path, "wb") as f:
pickle.dump(dialogue_list, f)
logger.info("finish preprocessing {} data,the result is stored in {}".format(file_name,save_file_path))
logger.info("mean of dialogue len:{},median of dialogue len:{},max len:{}".format(len_mean, len_median, len_max))
def preprocess():
"""
对原始语料进行tokenize,将每段对话处理成如下形式:"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"
"""
# 设置参数
parser = argparse.ArgumentParser()
parser.add_argument('--vocab_path', default='/data1/anon/crosstalk-generation/pretrain_model/CDial-GPT_LCCC-base/vocab.txt', type=str, required=False,
help='词表路径')
parser.add_argument('--para_max_len', default=256, type=int, required=False, help='单条训练语料最大长度')
parser.add_argument('--sent_max_len', default=64, type=int, required=False, help='单条句子最大长度')
parser.add_argument('--data_base', default='/data1/anon/crosstalk-datasets/data_resource/formal_data', type=str, required=False, help='数据文件存储位置')
parser.add_argument('--save_base', default='/data1/anon/crosstalk-generation/src/gpt/data/', type=str, required=False, help='tokenize的训练数据集')
args = parser.parse_args()
args.save_path = os.path.join(args.save_base,f'p{args.para_max_len}-s{args.sent_max_len}')
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
# 初始化日志对象
logger = create_logger(os.path.join(args.save_path,'preprocess.log'))
# 读取训练数据集
for file_name in ['train','dev','test']:
with open(os.path.join(args.data_base,file_name + '.txt'), 'rb') as f:
data = f.read().decode("utf-8")
reduce_data(data,logger,args,file_name)
if __name__ == '__main__':
preprocess()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/gpt/another_way_rouge.py | src/gpt/another_way_rouge.py | from statistics import mean
from nltk import word_tokenize
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.gleu_score import corpus_gleu
from nltk.translate.meteor_score import meteor_score
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
from sumeval.metrics.rouge import RougeCalculator
rouge = RougeCalculator(stopwords=False, lang="zh")
rouge_1 = rouge.rouge_n(
summary="我 今 天 晚 上 必 须 回 家 吃 饭",
references="晚 上 我 要 回 家 吃 饭",
n=1)
rouge_2 = rouge.rouge_n(
summary="我 今 天 晚 上 必 须 回 家 吃 饭",
references="晚 上 我 要 回 家 吃 饭",
n=2)
rouge_l = rouge.rouge_l(
summary="我 今 天 晚 上 必 须 回 家 吃 饭",
references="晚 上 我 要 回 家 吃 饭")
# You need spaCy to calculate ROUGE-BE
rouge_be = rouge.rouge_be(
summary="我今天没有吃早饭",
references=["说你今天吃没吃早饭", "吃早饭了没"])
print("ROUGE-1: {}, ROUGE-2: {}, ROUGE-L: {}, ROUGE-BE: {}".format(
rouge_1, rouge_2, rouge_l, rouge_be
).replace(", ", "\n")) | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/statistic_t5_results.py | src/t5/statistic_t5_results.py | import argparse
import os,io,json
import pandas as pd
from generate_eval_data import get_machine_metric_datas
from metrics import cal_metrics
from t5_tokenizer import T5PegasusTokenizer
from transformers import MT5ForConditionalGeneration, T5Tokenizer
base_path = '/data1/anon/crosstalk-generation/trained_model_dir/t5-small/ml-256-seed-1234'
def statistic_machine_metrics():
start_ep = 10
results = []
while True:
sub_dir = f'epoch{start_ep}'
complete_dir = os.path.join(base_path,sub_dir)
if not os.path.exists(complete_dir):
break
metric_file = os.path.join(complete_dir,'machine_metrics.json')
metrics = json.loads(io.open(metric_file,'r').read())
metrics['ep'] = start_ep
start_ep += 5
results.append(metrics)
pd.DataFrame(results).to_excel('metrics_new_t5.xls')
def generate_machine_metrics(args):
pretrain_dir = '/data1/anon/crosstalk-generation/pretrain_model/chinese_t5_pegasus_small'
tokenizer = T5PegasusTokenizer(vocab_file=os.path.join(pretrain_dir,'vocab.txt'), sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
start_ep = 10
while True:
sub_dir = f'epoch{start_ep}'
complete_dir = os.path.join(base_path,sub_dir)
if not os.path.exists(complete_dir):
break
metric_file = os.path.join(complete_dir,'machine_metrics.json')
model = MT5ForConditionalGeneration.from_pretrained(complete_dir)
model.to('cuda')
sample_file = get_machine_metric_datas(model,args,tokenizer,f'smallt5-{start_ep}')
machine_metrics = cal_metrics(sample_file)
io.open(metric_file,'w').write(json.dumps(machine_metrics,indent=4))
print(f'start_ep {start_ep} is finish!')
start_ep += 5
parser = argparse.ArgumentParser()
parser.add_argument('--test_filter_data', default="/data1/anon/crosstalk-generation/src/t5/data/p256-s64/test_filter_50x20.txt", type=str, required=False, help="数据基准文件,n个篇章,每个篇章20行")
parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--seq_max_len', type=int, default=256, help='篇章最大长度')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--save_samples_path', default="/data1/anon/crosstalk-generation/src/t5/sample/", type=str, required=False, help="保存跑机器指标数据的文件路径及保存篇章级生成的文件路径")
args = parser.parse_args()
generate_machine_metrics(args)
statistic_machine_metrics() | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/pytorchtools.py | src/t5/pytorchtools.py | '''
Author: anon
Date: 2022-02-07 11:07:16
LastEditors: anon
LastEditTime: 2022-02-07 11:07:16
FilePath: /crosstalk-generation/src/gpt/pytorchtools.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import numpy as np
import torch
from os.path import join
import os
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, save_path="."):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.save_path = save_path
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
# save_path = join(self.save_path, "best_model")
# if not os.path.exists(save_path):
# os.mkdir(save_path)
# model_to_save = model.module if hasattr(model, 'module') else model
# model_to_save.save_pretrained(save_path)
self.val_loss_min = val_loss
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/metrics.py | src/t5/metrics.py | '''
Author: anon
Date: 2022-01-27 11:39:10
LastEditors: anon
LastEditTime: 2022-02-11 17:47:14
FilePath: /crosstalk-generation/src/gpt/metrics.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
from statistics import mean
from nltk import word_tokenize
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.gleu_score import corpus_gleu
from nltk.translate.meteor_score import meteor_score
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
import io,json
# import nltk
# nltk.download('wordnet')
'''
description: 计算bleu1,2,3,4的值
param {原始句} reference
param {预测句} hypothesis
return bleu1,bleu2,bleu3,bleu4
'''
def calculate_bleu_score(references, candidates):
smooth = SmoothingFunction()
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
BLEU_1 = corpus_bleu(reference, hypothesis, weights=(1, 0, 0, 0), smoothing_function=smooth.method1)
BLEU_2 = corpus_bleu(reference, hypothesis, weights=(0.5, 0.5, 0, 0), smoothing_function=smooth.method1)
BLEU_3 = corpus_bleu(reference, hypothesis, weights=(0.33, 0.33, 0.33, 0), smoothing_function=smooth.method1)
BLEU_4 = corpus_bleu(reference, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=smooth.method1)
return BLEU_1,BLEU_2,BLEU_3,BLEU_4
'''
description: 计算gleu值
param {原始句} reference
param {预测句} hypothesis
return gleu值
'''
def calculate_gleu_score(references, candidates):
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
return corpus_gleu(reference, hypothesis)
'''
description: 中文不建议使用,因为依赖了wordnet,wordnet是英文词典
param {原始句} reference
param {预测句} hypothesis
return metetor值
'''
def calculate_meteor_score(references, candidates):
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
all_meteor = []
for ref,hyp in zip(reference, hypothesis):
all_meteor.append(meteor_score(ref,hyp))
return mean(all_meteor)
'''
description: rouge值计算
param {原始句} reference
param {预测句} hypothesis
return rouge1,rouge2,rougel
'''
def calculate_rouge_score(reference, hypothesis):
rouge = Rouge()
scores = []
for ref,hyp in zip(reference,hypothesis):
try:
scores.append(rouge.get_scores(' '.join([i for i in hyp]), ' '.join([i for i in ref])))
except:
continue
rouge_1 = [i[0]['rouge-1']['f'] for i in scores]
rouge_2 = [i[0]['rouge-2']['f'] for i in scores]
rouge_l = [i[0]['rouge-l']['f'] for i in scores]
return mean(rouge_1),mean(rouge_2),mean(rouge_l)
def calc_distinct_ngram(pair_list, ngram):
"""
calc_distinct_ngram
"""
pair_list = [[j for j in i] for i in pair_list]
def get_dict(tokens, ngram, gdict=None):
"""
get_dict
统计n-gram频率并用dict存储
"""
token_dict = {}
if gdict is not None:
token_dict = gdict
tlen = len(tokens)
for i in range(0, tlen - ngram + 1):
ngram_token = "".join(tokens[i:(i + ngram)])
if token_dict.get(ngram_token) is not None:
token_dict[ngram_token] += 1
else:
token_dict[ngram_token] = 1
return token_dict
ngram_total = 0.0
ngram_distinct_count = 0.0
pred_dict = {}
for predict_tokens in pair_list:
get_dict(predict_tokens, ngram, pred_dict)
for key, freq in pred_dict.items():
ngram_total += freq
ngram_distinct_count += 1
#if freq == 1:
# ngram_distinct_count += freq
return ngram_distinct_count / ngram_total
def test_demo():
references = ['我今天晚上必须回家吃饭','广东鸡翅膀,我最爱吃','天天都需要你爱']
candidates = ['晚上我要回家吃饭','最好吃的是广东鸡翅膀','啦啦啦啦要你爱']
belu_scores = calculate_bleu_score(references,candidates)
gleu_scores = calculate_gleu_score(references,candidates)
# meteor_scores = calculate_meteor_score(references,candidates)
rouge_scores = calculate_rouge_score(references,candidates)
print(belu_scores)
def cal_metrics(machine_gen_file):
# machine_gen_file = '/data1/anon/crosstalk-generation/src/gpt/sample/machine_metric_data.json'
raw_text = io.open(machine_gen_file,'r').read()
data_list = json.loads(raw_text)
references = []
candidates = []
for data_item in data_list:
references.append(data_item['ori'])
candidates.append(data_item['gen'])
distinct_1 = calc_distinct_ngram(candidates,1)
distinct_2 = calc_distinct_ngram(candidates,2)
belu_scores = calculate_bleu_score(references,candidates)
gleu_scores = calculate_gleu_score(references,candidates)
rouge_scores = calculate_rouge_score(references,candidates)
result = {
'bleu_1':belu_scores[0] * 100,
'bleu_2':belu_scores[1] * 100,
'bleu_3':belu_scores[2] * 100,
'bleu_4':belu_scores[3] * 100,
'gleu':gleu_scores * 100,
'rouge_1':rouge_scores[0] * 100,
'rouge_2':rouge_scores[1] * 100,
'rouge_l':rouge_scores[2] * 100,
'distinct_1':distinct_1 * 100,
'distinct_2':distinct_2 * 100
}
return result
if __name__ == '__main__':
print(cal_metrics('/data1/anon/crosstalk-generation/src/t5/sample/machine_metric_data_69.json'))
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/train.py | src/t5/train.py | import argparse
from lib2to3.pgen2 import token
import math
import time
from urllib import response
import torch
import torch.nn.functional as F
import torch.optim as optim
import logging
from datetime import datetime
import os
from torch.utils.data import Dataset, DataLoader
from os.path import join, exists
from torch.nn import CrossEntropyLoss
from tqdm import tqdm
from torch.nn import DataParallel
import transformers
import pickle
import sys,random,io,json
from generate_eval_data import get_machine_metric_datas
from metrics import cal_metrics
from pytorchtools import EarlyStopping
from sklearn.model_selection import train_test_split
from data_parallel import BalancedDataParallel
from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration
import pandas as pd
import torch.nn.utils.rnn as rnn_utils
import numpy as np
from dataset import MyDataset
from t5_tokenizer import T5PegasusTokenizer
def set_args():
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='设置使用哪些显卡')
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行训练')
parser.add_argument('--data_dir', default='/data1/anon/crosstalk-generation/src/t5/data/p256-s64', type=str, required=False, help='数据基础路径路径')
parser.add_argument('--max_len', default=256, type=int, required=False, help='训练时,输入数据的最大长度')
parser.add_argument('--log', default=True, help="是否记录日志")
parser.add_argument('--ignore_index', default=-100, type=int, required=False, help='对于ignore_index的label token不计算梯度')
parser.add_argument('--epochs', default=100, type=int, required=False, help='训练的最大轮次')
parser.add_argument('--save_epochs', default=5, type=int, required=False, help='几个epoch保存一次')
parser.add_argument('--batch_size', default=24, type=int, required=False, help='训练的batch size')
parser.add_argument('--gpu0_bsz', default=2, type=int, required=False, help='0号卡的batch size')
parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='学习率')
parser.add_argument('--eps', default=1.0e-09, type=float, required=False, help='衰减率')
parser.add_argument('--log_step', default=100, type=int, required=False, help='多少步汇报一次loss')
parser.add_argument('--gradient_accumulation_steps', default=4, type=int, required=False, help='梯度积累')
parser.add_argument('--max_grad_norm', default=2.0, type=float, required=False)
parser.add_argument('--save_model_base_path', default='/data1/anon/crosstalk-generation/trained_model_dir/t5-pegasus', type=str, required=False,
help='模型输出总路径')
parser.add_argument('--pretrained_model', default='/data1/anon/crosstalk-generation/pretrain_model/t5_pegasus_torch', type=str, required=False,
help='预训练的模型的路径')
parser.add_argument('--seed', type=int, default=1234, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--num_workers', type=int, default=0, help="dataloader加载数据时使用的线程数量")
parser.add_argument('--patience', type=int, default=0, help="用于early stopping,设为0时,不进行early stopping.early stop得到的模型的生成效果不一定会更好。")
parser.add_argument('--warmup_rate', type=float, default=0.1, help='warm up步数占总步数的比例')
# 跑机器分所需参数(belu,gelu,rouge)
parser.add_argument('--save_cal_metric', default=True, help="是否在保存时计算机器指标,汇总结果会放在 save_model_base_path 下")
parser.add_argument('--min_limit_epoch', default=10, type=int, help="在哪个epoch后开始进行机器指标计算,(一先开始的n个epoch可能没有计算价值)")
parser.add_argument('--test_filter_data', default="/data1/anon/crosstalk-generation/src/t5/data/p256-s64/test_filter_50x20.txt", type=str, required=False, help="数据基准文件,n个篇章,每个篇章20行")
parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--seq_max_len', type=int, default=256, help='篇章最大长度')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--save_samples_path', default="/data1/anon/crosstalk-generation/src/t5/sample/", type=str, required=False, help="保存跑机器指标数据的文件路径及保存篇章级生成的文件路径")
args = parser.parse_args()
return args
def set_random_seed(args):
"""
设置训练的随机种子
"""
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def collate_fn(batch):
input_ids = rnn_utils.pad_sequence([i[0] for i in batch], batch_first=True, padding_value=0)
labels = rnn_utils.pad_sequence([i[1] for i in batch], batch_first=True, padding_value=-100)
return input_ids, labels
# def padding_batch(data_list, pad_id):
# """
# 使用pad_id将data_list的每条数据,填充至data_list中最长的长度
# :param data_list:
# :param pad_id:
# :return:
# """
# # 统计data_list中的最大长度
# max_len = 0
# for data in data_list:
# max_len = max_len if max_len > len(data) else len(data)
#
# # 对数据进行padding
# new_data_list = []
# for data in data_list:
# new_data = data + [pad_id] * (max_len - len(data))
# new_data_list.append(new_data)
# return new_data_list
def load_dataset(logger, args):
"""
加载训练集和验证集
"""
logger.info("loading training dataset and validating dataset")
train_path = os.path.join(args.data_dir,'train.pkl')
valid_path = os.path.join(args.data_dir,'dev.pkl')
test_path = os.path.join(args.data_dir,'test.pkl')
with open(train_path, "rb") as f:
train_input_list = pickle.load(f)
with open(valid_path, "rb") as f:
valid_input_list = pickle.load(f)
with open(test_path, "rb") as f:
test_input_list = pickle.load(f)
train_dataset = MyDataset(train_input_list, args.max_len)
val_dataset = MyDataset(valid_input_list, args.max_len)
test_dataset = MyDataset(test_input_list, args.max_len)
return train_dataset, val_dataset, test_dataset
def train_epoch(model, train_dataloader, optimizer, scheduler, logger,
epoch, args,tokenizer):
model.train()
device = args.device
# pad_id = args.pad_id
# sep_id = args.sep_id
ignore_index = args.ignore_index
epoch_start_time = datetime.now()
total_loss = 0 # 记录下整个epoch的loss的总和
# epoch_correct_num:每个epoch中,output预测正确的word的数量
# epoch_total_num: 每个epoch中,output预测的word的总数量
epoch_correct_num, epoch_total_num = 0, 0
for batch_idx, (input_ids, labels) in enumerate(train_dataloader):
# 捕获cuda out of memory exception
try:
input_ids = input_ids.to(device)
labels = labels.to(device)
outputs = model.forward(input_ids, labels=labels)
logits = outputs.logits
loss = outputs.loss
loss = loss.mean()
# 统计该batch的预测token的正确数与总数
batch_correct_num, batch_total_num = calculate_acc(logits, labels, ignore_index=ignore_index)
# 统计该epoch的预测token的正确数与总数
epoch_correct_num += batch_correct_num
epoch_total_num += batch_total_num
# 计算该batch的accuracy
batch_acc = batch_correct_num / batch_total_num
total_loss += loss.item()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
# 梯度裁剪
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# 进行一定step的梯度累计之后,更新参数
if (batch_idx + 1) % args.gradient_accumulation_steps == 0:
# 更新参数
optimizer.step()
# 更新学习率
scheduler.step()
# 清空梯度信息
optimizer.zero_grad()
if (batch_idx + 1) % args.log_step == 0:
logger.info(
"batch {} of epoch {}, loss {}, batch_acc {}, lr {}".format(
batch_idx + 1, epoch + 1, loss.item() * args.gradient_accumulation_steps, batch_acc, scheduler.get_lr()))
del input_ids, outputs
except RuntimeError as exception:
if "out of memory" in str(exception):
logger.info("WARNING: ran out of memory")
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
logger.info(str(exception))
raise exception
# 记录当前epoch的平均loss与accuracy
epoch_mean_loss = total_loss / len(train_dataloader)
epoch_mean_acc = epoch_correct_num / epoch_total_num
logger.info(
"epoch {}: loss {}, predict_acc {}".format(epoch + 1, epoch_mean_loss, epoch_mean_acc))
if (epoch+1) % args.save_epochs == 0:
# save model
logger.info('saving model for epoch {}'.format(epoch + 1))
model_path = join(args.save_model_path, 'epoch{}'.format(epoch + 1))
if not os.path.exists(model_path):
os.mkdir(model_path)
logger.info(f'{epoch}/{args.epochs} saving model~')
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)
if args.save_cal_metric and epoch >= args.min_limit_epoch:
logger.info(f'we should calculate machine metrics in epoch {epoch}')
# 生成metric的主要逻辑
generate_file_path = get_machine_metric_datas(model_to_save,args,tokenizer,epoch)
machine_metrics = cal_metrics(generate_file_path)
io.open(os.path.join(model_path,'machine_metrics.json'),'w').write(json.dumps(machine_metrics,indent=4))
logger.info('epoch {} finished'.format(epoch + 1))
epoch_finish_time = datetime.now()
logger.info('time for one epoch: {}'.format(epoch_finish_time - epoch_start_time))
return epoch_mean_loss
def validate_epoch(model, validate_dataloader, logger, epoch, args):
logger.info("start validating")
model.eval()
device = args.device
# pad_id = args.pad_id
# sep_id = args.sep_id
ignore_index = args.ignore_index
epoch_start_time = datetime.now()
total_loss = 0
# 捕获cuda out of memory exception
try:
with torch.no_grad():
for batch_idx, (input_ids, labels) in enumerate(validate_dataloader):
input_ids = input_ids.to(device)
labels = labels.to(device)
outputs = model.forward(input_ids, labels=labels)
logits = outputs.logits
loss = outputs.loss
loss = loss.mean()
total_loss += loss.item()
del input_ids, outputs
# 记录当前epoch的平均loss
epoch_mean_loss = total_loss / len(validate_dataloader)
logger.info(
"validate epoch {}: loss {}, ppl {}".format(epoch+1, epoch_mean_loss, np.exp(epoch_mean_loss)))
epoch_finish_time = datetime.now()
logger.info('time for validating one epoch: {}'.format(epoch_finish_time - epoch_start_time))
return epoch_mean_loss
except RuntimeError as exception:
if "out of memory" in str(exception):
logger.info("WARNING: ran out of memory")
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
logger.info(str(exception))
raise exception
def train(model, logger, train_dataset, validate_dataset, args,tokenizer):
train_dataloader = DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collate_fn,
drop_last=True
)
validate_dataloader = DataLoader(validate_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, collate_fn=collate_fn, drop_last=True)
early_stopping = EarlyStopping(args.patience, verbose=True, save_path=args.save_model_path)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.epochs
optimizer = transformers.AdamW(model.parameters(), lr=args.lr, eps=args.eps)
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=int(args.warmup_rate * t_total), num_training_steps=t_total
)
logger.info('starting training')
# 用于记录每个epoch训练和验证的loss
train_losses, validate_losses = [], []
# 记录验证集的最小loss
best_val_loss = 10000
# 开始训练
for epoch in range(args.epochs):
# ========== train ========== #
train_loss = train_epoch(
model=model, train_dataloader=train_dataloader,
optimizer=optimizer, scheduler=scheduler,
logger=logger, epoch=epoch, args=args,tokenizer=tokenizer)
train_losses.append(train_loss)
# ========== validate ========== #
validate_loss = validate_epoch(
model=model, validate_dataloader=validate_dataloader,
logger=logger, epoch=epoch, args=args)
validate_losses.append(validate_loss)
# 保存当前困惑度最低的模型,困惑度低,模型的生成效果不一定会越好
if validate_loss < best_val_loss:
best_val_loss = validate_loss
logger.info('saving current best model for epoch {}'.format(epoch + 1))
model_path = join(args.save_model_path, 'min_ppl_model'.format(epoch + 1))
if not os.path.exists(model_path):
os.mkdir(model_path)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(model_path)
# 如果patience=0,则不进行early stopping
if args.patience == 0:
continue
early_stopping(validate_loss, model)
if early_stopping.early_stop:
logger.info("Early stopping")
break
logger.info('training finished')
logger.info("train_losses:{}".format(train_losses))
logger.info("validate_losses:{}".format(validate_losses))
def caculate_loss(logit, target, pad_idx, smoothing=True):
if smoothing:
logit = logit[..., :-1, :].contiguous().view(-1, logit.size(2))
target = target[..., 1:].contiguous().view(-1)
eps = 0.1
n_class = logit.size(-1)
one_hot = torch.zeros_like(logit).scatter(1, target.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(logit, dim=1)
non_pad_mask = target.ne(pad_idx)
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).mean() # average later
else:
# loss = F.cross_entropy(predict_logit, target, ignore_index=pad_idx)
logit = logit[..., :-1, :].contiguous().view(-1, logit.size(-1))
labels = target[..., 1:].contiguous().view(-1)
loss = F.cross_entropy(logit, labels, ignore_index=pad_idx)
return loss
def calculate_acc(logit, labels, ignore_index=-100):
logit = logit[..., :-1, :].contiguous().view(-1, logit.size(-1))
labels = labels[..., 1:].contiguous().view(-1)
_, logit = logit.max(dim=-1) # 对于每条数据,返回最大的index
# 进行非运算,返回一个tensor,若labels的第i个位置为pad_id,则置为0,否则为1
non_pad_mask = labels.ne(ignore_index)
n_correct = logit.eq(labels).masked_select(non_pad_mask).sum().item()
n_word = non_pad_mask.sum().item()
return n_correct, n_word
def main():
# 初始化参数
args = set_args()
# 设置使用哪些显卡进行训练
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
args.cuda = not args.no_cuda and torch.cuda.is_available()
# 创建模型的输出目录
args.save_model_path = os.path.join(args.save_model_base_path,f'ml-{args.max_len}-seed-{args.seed}')
args.log_path = os.path.join(args.save_model_base_path,'train.logs')
if not os.path.exists(args.save_model_path):
os.makedirs(args.save_model_path)
# 创建日志对象
logger = create_logger(args)
logger.info('#' * 30)
logger.info(args)
logger.info('#' * 30)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
args.device = device
logger.info('using device:{}'.format(device))
# 初始化tokenizer
tokenizer = T5PegasusTokenizer(vocab_file=os.path.join(args.pretrained_model,'vocab.txt'), sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
args.sep_id = tokenizer.sep_token_id
args.pad_id = tokenizer.pad_token_id
args.cls_id = tokenizer.cls_token_id
# 创建模型
model = MT5ForConditionalGeneration.from_pretrained(args.pretrained_model)
model = model.to(device)
logger.info('model config:\n{}'.format(model.config.to_json_string()))
assert model.config.vocab_size == tokenizer.vocab_size
# 并行训练模型
if args.cuda and torch.cuda.device_count() > 1:
if args.batch_size != args.gpu0_bsz:
model = BalancedDataParallel(args.gpu0_bsz, model, dim=0).cuda()
else:
model = DataParallel(model).cuda()
logger.info("use GPU {} to train".format(args.device))
# 计算模型参数数量
num_parameters = 0
parameters = model.parameters()
for parameter in parameters:
num_parameters += parameter.numel()
logger.info('number of model parameters: {}'.format(num_parameters))
# 记录参数设置
logger.info("args:{}".format(args))
# 加载训练集和验证集
# ========= Loading Dataset ========= #
train_dataset, val_dataset, test_dataset = load_dataset(logger, args)
train(model, logger, train_dataset, val_dataset, args, tokenizer)
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/draw_loss.py | src/t5/draw_loss.py | '''
Author: anon
Date: 2022-02-07 11:05:00
LastEditors: anon
LastEditTime: 2022-02-07 11:05:00
FilePath: /crosstalk-generation/src/gpt/draw_loss.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import numpy as np
import matplotlib.pyplot as plt
train_loss = [44.48271321031384, 6.583550890944058, 5.2243800984289415, 4.793724752727308, 4.509950302328382, 4.293438681534359, 4.118832167109153, 3.9655399139662433, 3.8359335477190806, 3.7205823993324336, 3.6183518121117038, 3.529470994508356, 3.454126060367527, 3.3876399215002704, 3.327385791979338, 3.2738280115270975, 3.2241040730834905, 3.1793050898645157, 3.1380796483584814, 3.097194073881422, 3.058997189281578, 3.0224359229095, 2.989321135847192, 2.956893386876673, 2.925039403779166, 2.8960848723139083, 2.8673695635078547, 2.838541273156503, 2.812697682255193, 2.7858682384168296, 2.760819730185028, 2.7354322604666974, 2.7120282050362206, 2.688629031988015, 2.666998180500547, 2.6451193193744, 2.6239456789834157, 2.603371260578471, 2.5841847509369815, 2.565063550597743, 2.54530718102491, 2.527386322415861, 2.510184377118161, 2.4923996164386435, 2.4756604128762296, 2.459907821246556, 2.442924183651917, 2.426776533825953, 2.411911290211785, 2.3977591939438554, 2.383567075801075, 2.3692382172982493, 2.3537363544890755, 2.3426682418898532, 2.3303275399190144, 2.315959686444218, 2.30516399629134, 2.293007003140629, 2.2806038615846993, 2.2688294145397676, 2.259110706417184, 2.2488786947906467, 2.2375688153101985, 2.227671702463824, 2.217756683709926, 2.208003384995281, 2.1991461373809584, 2.191128186623853, 2.1806941671927174, 2.172695298436889, 2.1649881235638957, 2.1569865969338813, 2.148207969056036, 2.1422095429628416, 2.1350791305079495, 2.1268079251275025, 2.120990522060179, 2.1148893516762812, 2.107709726803285, 2.1024627320300366, 2.096419497584938, 2.0913853452169806, 2.085944129067256, 2.0798576306579704, 2.077440956615864, 2.071507751762419, 2.0658729361860377, 2.063466019558727, 2.0598251012034883, 2.0547408370147076, 2.051986127733288, 2.049203844536516, 2.0468371813906763, 2.043903685153875, 2.0411412049505047, 2.0387904661490506, 2.0368946077232075, 2.0359663403571995, 2.0344403394182824, 2.0328974296275835]
valid_loss = [6.632119738052938, 4.592151895119023, 4.284870942433675, 4.1230103282542245, 4.030122367763816, 3.9413269297727545, 3.895269082714093, 3.8443484729695543, 3.8023344192920816, 3.781304602682405, 3.753471881058357, 3.7412116174757295, 3.730542713979323, 3.725996570423756, 3.716093263150747, 3.7090776037204303, 3.7108728900505374, 3.713725692015199, 3.7089336805997237, 3.7278165965808143, 3.719184803071423, 3.731867850012497, 3.7419109656431964, 3.753017064195556, 3.758844084457445, 3.7659045639810533, 3.7814736303139327, 3.7981787303526455, 3.80679324631379, 3.817094279969593, 3.8320516451868314, 3.8536363416743056, 3.8638582586128023, 3.8662699947476016, 3.8776759202606583, 3.8897769759377216, 3.9205473858248037, 3.9219261240736345, 3.9313435045729546, 3.9452734907096794, 3.9682855379544315, 3.9751413908331563, 3.983424531336514, 3.9876989508715, 4.0045680049052494, 4.033595425688961, 4.0295334879854385, 4.054913828306109, 4.058664200461913, 4.07562237253813, 4.095141318728248, 4.103051297018461, 4.1247230697643715, 4.131437508859367, 4.146622050217007, 4.158886610904586, 4.184614694007089, 4.1859706075763405, 4.188921441170286, 4.206857152817034, 4.228782836893266, 4.224493684427018, 4.237151153733797, 4.247350576510682, 4.235146184576634, 4.2600116291521495, 4.268260229042386, 4.295261158750065, 4.290834169150141, 4.300445780947201, 4.309303197905282, 4.3218370568343785, 4.333605740300592, 4.338023634714501, 4.359051271391063, 4.346706262258726, 4.375257317150865, 4.376839564225384, 4.384927596629966, 4.391139392912202, 4.396961501825635, 4.409217323840965, 4.4099885875190905, 4.426319476228636, 4.4286229317062, 4.4389782397546504, 4.436613840477489, 4.434759803650164, 4.448284151398133, 4.447159450373546, 4.455189101049833, 4.451785619756515, 4.465889912156673, 4.465894921917782, 4.460803010753382, 4.465916545591622, 4.471542153774392, 4.4720834735026616, 4.473313391394333, 4.477410685607578]
fig = plt.figure()
x = np.arange(len(train_loss))
yerr = np.linspace(0.05, 0.2, len(train_loss))
plt.plot(x, train_loss, label='train-loss')
plt.plot(x, valid_loss, label='valid-loss')
plt.legend(loc='lower right')
plt.savefig('t5_small_loss.png')
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/t5_tokenizer.py | src/t5/t5_tokenizer.py | '''
Author: anon
Date: 2022-02-07 11:17:02
LastEditors: anon
LastEditTime: 2022-02-07 14:31:51
FilePath: /crosstalk-generation/src/t5/t5_tokenizer.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import jieba
from transformers import BertTokenizer
from transformers import MT5ForConditionalGeneration, T5Tokenizer
class T5PegasusTokenizer(BertTokenizer):
def __init__(self, pre_tokenizer=lambda x: jieba.cut(x, HMM=False), *args, **kwargs):
super().__init__(*args, **kwargs)
self.pre_tokenizer = pre_tokenizer
def _tokenize(self, text, *arg, **kwargs):
split_tokens = []
for text in self.pre_tokenizer(text):
if text in self.vocab:
split_tokens.append(text)
else:
split_tokens.extend(super()._tokenize(text))
return split_tokens
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/data_parallel.py | src/t5/data_parallel.py | from torch.nn.parallel import DataParallel
import torch
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel.parallel_apply import parallel_apply
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
# print('len(inputs)1: ', str(len(inputs)))
# print('self.device_ids[:len(inputs)]', str(self.device_ids[:len(inputs)]))
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids[:len(inputs)])
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
# print('bsz: ', bsz)
# print('num_dev: ', num_dev)
# print('gpu0_bsz: ', gpu0_bsz)
# print('bsz_unit: ', bsz_unit)
# print('chunk_sizes: ', chunk_sizes)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/generate_eval_data.py | src/t5/generate_eval_data.py | '''
Author: anon
Date: 2022-02-09 17:05:01
LastEditors: anon
LastEditTime: 2022-02-09 17:16:53
FilePath: /crosstalk-generation/src/t5/generate_eval_data.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
'''
Author: anon
Date: 2022-02-08 16:12:50
LastEditors: anon
LastEditTime: 2022-02-09 16:07:19
FilePath: /crosstalk-generation/src/gpt/generate_eval_data.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import transformers
import torch
import os
import json
import random
import numpy as np
import argparse
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
import logging
# from transformers import BertTokenizer
from os.path import join, exists
from itertools import zip_longest, chain
# from chatbot.model import DialogueGPT2Model
from dataset import MyDataset
from torch.utils.data import Dataset, DataLoader
from torch.nn import CrossEntropyLoss
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import io
from t5_tokenizer import T5PegasusTokenizer
from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration
PAD = '[PAD]'
pad_id = 0
def set_args():
"""
Sets up the arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--log_path', default='/data1/anon/crosstalk-generation/src/t5/logs/interact.log', type=str, required=False, help='interact日志存放位置')
parser.add_argument('--vocab_path', default='/data1/anon/crosstalk-generation/pretrain_model/t5_pegasus_torch/vocab.txt', type=str, required=False, help='选择词库')
parser.add_argument('--model_path', default='/data1/anon/crosstalk-generation/trained_model_dir/t5-pegasus/ml-256-seed-1234/epoch90', type=str, required=False, help='对话模型路径')
parser.add_argument('--test_filter_data', default="/data1/anon/crosstalk-generation/src/t5/data/p256-s64/test_filter_50x20.txt", type=str, required=False, help="数据基准文件,n个篇章,每个篇章20行")
parser.add_argument('--save_samples_path', default="/data1/anon/crosstalk-generation/src/t5/sample", type=str, required=False, help="保存跑机器指标数据的文件路径及保存篇章级生成的文件路径")
parser.add_argument('--repetition_penalty', default=2.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--seed', type=int, default=1234, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--seq_max_len', type=int, default=256, help='最大输入长度')
parser.add_argument('--max_history_len', type=int, default=20, help="dialogue history的最大长度")
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
return parser.parse_args()
def set_random_seed(args):
"""
设置训练的随机种子
"""
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocab size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def generate_text_by_input(args,text,history_text,model,tokenizer):
history_text.append(text)
history = [tokenizer.encode(i, add_special_tokens=False) for i in history_text]
input_ids = [tokenizer.cls_token_id] # 每个input以[CLS]为开头
history_start_index = 1
filter_history_sent_ids = []
for rev_idx in range(len(history)-1,-1,-1):
this_turn_ids = history[rev_idx][:args.utterance_max_len] + [tokenizer.sep_token_id]
if history_start_index + len(this_turn_ids) > args.seq_max_len:
break
filter_history_sent_ids.append(this_turn_ids)
history_start_index += len(this_turn_ids)
filter_history_sent_ids.reverse()
for sent_ids in filter_history_sent_ids:
input_ids.extend(sent_ids)
input_ids = torch.tensor(input_ids).long().to(model.device)
input_ids = input_ids.unsqueeze(0)
# 最多生成max_len个token
output = model.generate(input_ids,decoder_start_token_id=tokenizer.cls_token_id,eos_token_id=tokenizer.sep_token_id
,top_k=1,max_length=64,
repetition_penalty=2.0,
diversity_penalty=1.2,
num_beam_groups=2,
num_beams=2,
bad_words_ids=[[tokenizer.unk_token_id]]
).cpu().numpy()[0]
text = ''.join(tokenizer.decode(output[1:-1])).replace(' ', '')
return text
def get_machine_metric_datas(model,args,tokenizer,mark_label='t5'):
'''
生成机器指标(bleu,gleu,rouge)所需的数据
A_ori->B_gen
A_ori,B_ori->C_gen
A_ori,B_ori,C_ori->D_gen
最后输出
B_ori,B_gen
C_ori,C_gen
D_ori,D_gen
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
for step in range(1,lines_nums):
inputs_text = single_lines[:step]
history = inputs_text[:-1]
text = inputs_text[-1]
gen_text_tok = generate_text_by_input(args,text, history, model, tokenizer)
gen_text = "".join(gen_text_tok)
# print(gen_text)
ori_text = single_lines[step]
results.append({'ori':ori_text,'gen':gen_text})
data_file_path = os.path.join(args.save_samples_path,f'machine_metric_data_{mark_label}.json')
io.open(data_file_path,'w').write(json.dumps(results,ensure_ascii=False, indent=4))
return data_file_path
def generate_human_check_datas(model,args,tokenizer):
'''
生成篇章的方法
pre_data(:10)->A_gen
pre_data(:10)+A_gen->B_gen
pre_data(:10)+A_gen+B_gen->C_gen
最后输出
pre_data(:10)
A_gen
B_gen
C_gen
...
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
history_all = single_lines[:10]
for step in range(10,lines_nums):
history = history_all[:-1]
text = history_all[-1]
gen_text_tok = generate_text_by_input(args,text, history, model, tokenizer)
gen_text = "".join(gen_text_tok)
# print(gen_text)
# ori_text = single_lines[step]
history_all.append(gen_text)
print(history_all)
results.append('\n'.join(history_all))
data_file_path = os.path.join(args.save_samples_path,'T5_turn_ep90_1_2_dp_2_rp.txt')
io.open(data_file_path,'w').write('\n\n'.join(results))
return data_file_path
def interact(args,samples_file,model,tokenizer):
history = []
print('开始和chatbot聊天,输入CTRL + Z以退出')
while True:
try:
text = input("user:")
# text = "你好"
if args.save_samples_path:
samples_file.write("user:{}\n".format(text))
text = generate_text_by_input(args,text,history,model,tokenizer)
print("chatbot:" + "".join(text))
if args.save_samples_path:
samples_file.write("chatbot:{}\n".format("".join(text)))
except KeyboardInterrupt:
if args.save_samples_path:
samples_file.close()
break
def main():
args = set_args()
set_random_seed(args)
logger = create_logger(args)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
logger.info('using device:{}'.format(device))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# tokenizer = BertTokenizer(vocab_file=args.voca_path)
model = MT5ForConditionalGeneration.from_pretrained(args.model_path)
tokenizer = T5PegasusTokenizer(vocab_file=args.vocab_path, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
model = model.to(device)
model.eval()
if args.save_samples_path:
if not os.path.exists(args.save_samples_path):
os.makedirs(args.save_samples_path)
samples_file = open(args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))
# 存储聊天记录,每个utterance以token的id的形式进行存储
# get_machine_metric_datas(model,args,tokenizer)
# interact(args,samples_file,model,tokenizer)
generate_human_check_datas(model,args,tokenizer)
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/dataset.py | src/t5/dataset.py | '''
Author: anon
Date: 2022-02-09 14:58:23
LastEditors: anon
LastEditTime: 2022-02-09 15:00:21
FilePath: /crosstalk-generation/src/t5/dataset.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
from torch.utils.data import Dataset
import torch,json
class MyDataset(Dataset):
"""
"""
def __init__(self, input_list, max_len):
self.input_list = input_list
self.max_len = max_len
def __getitem__(self, index):
json_pairs = json.loads(self.input_list[index])
input_ids = torch.tensor(json_pairs['src'][:self.max_len], dtype=torch.long)
labels = torch.tensor(json_pairs['tgt'][:self.max_len], dtype=torch.long)
return input_ids,labels
def __len__(self):
return len(self.input_list)
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/__init__.py | src/t5/__init__.py | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false | |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/t5/preprocess.py | src/t5/preprocess.py | from tokenizers import BertWordPieceTokenizer
from transformers import BertTokenizer
from transformers import BertTokenizerFast
import argparse
import pandas as pd
import pickle,os
import jieba.analyse
from tqdm import tqdm
import logging,json
import numpy as np
from t5_tokenizer import T5PegasusTokenizer
def create_logger(log_path):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def reduce_data(data,logger,args,file_name):
# 初始化tokenizer
tokenizer = T5PegasusTokenizer(vocab_file=args.vocab_path, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
sep_id = tokenizer.sep_token_id
cls_id = tokenizer.cls_token_id
sep_tok = tokenizer.sep_token
cls_tok = tokenizer.cls_token
# 需要区分linux和windows环境下的换行符
if "\r\n" in data:
train_data = data.split("\r\n\r\n")
else:
train_data = data.split("\n\n")
logger.info("there are {} dialogue in dataset".format(len(train_data)))
save_file_path = os.path.join(args.save_path,file_name + '.pkl')
save_content_path = os.path.join(args.save_path,file_name + '_raw.txt')
# 开始进行tokenize
# 保存所有的对话数据,每条数据的格式为:"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"
dialogue_len = [] # 记录所有对话tokenize之后的长度,用于统计中位数与均值
dialogue_list = []
utterance_list = []
for index, dialogue in enumerate(tqdm(train_data)):
if "\r\n" in data:
utterances = dialogue.split("\r\n")
else:
utterances = dialogue.split("\n")
reduce_max_len_utterances = []
reduce_max_len_utterances_toks = []
reduce_max_len_utterances_ids = []
for idx in range(len(utterances)):
candi_sent = utterances[idx]
if len(candi_sent) > args.sent_max_len:
candi_sent = candi_sent[:args.sent_max_len]
reduce_max_len_utterances.append(candi_sent)
reduce_max_len_utterances_toks.append(tokenizer.tokenize(candi_sent) + [sep_tok])
reduce_max_len_utterances_ids.append(tokenizer.encode(candi_sent, add_special_tokens=False) + [sep_id])
for step in range(1,len(reduce_max_len_utterances)):
inputs_text_ids = reduce_max_len_utterances_ids[:step]
inputs_text_toks = reduce_max_len_utterances_toks[:step]
labels_ids = [cls_id] + reduce_max_len_utterances_ids[step]
labels_tok = [cls_tok] + reduce_max_len_utterances_toks[step]
history_start_index = 1
filter_history_sent = []
filter_history_sent_ids = []
input_ids = [cls_id] # 每个dialogue以[CLS]开头
input_toks = [cls_tok]
# 逻辑是先从最后一位往前加句子,加下一句如果总数超了max_len就停止
# (ps) gpt的 generate_text_by_input 方法里history回溯那里写错了,不能从history的头部开始回溯,应该从尾部,否则我们想要的是 BCD->E,会得到ABC->E
for rev_idx in range(len(inputs_text_ids)-1,-1,-1):
this_turn_toks = inputs_text_toks[rev_idx]
this_turn_ids = inputs_text_ids[rev_idx]
if history_start_index + len(this_turn_ids) > args.para_max_len:
break
filter_history_sent.append(this_turn_toks)
filter_history_sent_ids.append(this_turn_ids)
history_start_index += len(this_turn_ids)
filter_history_sent.reverse()
filter_history_sent_ids.reverse()
for his_idx in range(len(filter_history_sent)):
input_ids.extend(filter_history_sent_ids[his_idx])
input_toks.extend(filter_history_sent[his_idx])
content_line = json.dumps({'src':input_toks,'tgt':labels_tok},ensure_ascii=False)
ids_line = json.dumps({'src':input_ids,'tgt':labels_ids},ensure_ascii=False)
dialogue_len.append(len(input_ids))
utterance_list.append(content_line + '\n')
dialogue_list.append(ids_line + '\n')
len_mean = np.mean(dialogue_len)
len_median = np.median(dialogue_len)
len_max = np.max(dialogue_len)
with open(save_content_path,'w') as f:
f.write(''.join(utterance_list))
with open(save_file_path, "wb") as f:
pickle.dump(dialogue_list, f)
logger.info("finish preprocessing {} data,the result is stored in {}".format(file_name,save_file_path))
logger.info("mean of dialogue len:{},median of dialogue len:{},max len:{}".format(len_mean, len_median, len_max))
def preprocess():
"""
对原始语料进行tokenize,将每段对话处理成如下形式:"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"
"""
# 设置参数
parser = argparse.ArgumentParser()
parser.add_argument('--vocab_path', default='/data1/anon/crosstalk-generation/pretrain_model/t5_pegasus_torch/vocab.txt', type=str, required=False,
help='词表路径')
parser.add_argument('--para_max_len', default=256, type=int, required=False, help='单条训练语料最大长度')
parser.add_argument('--sent_max_len', default=64, type=int, required=False, help='单条句子最大长度')
parser.add_argument('--data_base', default='/data1/anon/crosstalk-datasets/data_resource/formal_data', type=str, required=False, help='数据文件存储位置')
parser.add_argument('--save_base', default='/data1/anon/crosstalk-generation/src/t5/data/', type=str, required=False, help='tokenize的训练数据集')
args = parser.parse_args()
args.save_path = os.path.join(args.save_base,f'p{args.para_max_len}-s{args.sent_max_len}')
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
# 初始化日志对象
logger = create_logger(os.path.join(args.save_path,'preprocess.log'))
# 读取训练数据集
for file_name in ['train','dev','test']:
with open(os.path.join(args.data_base,file_name + '.txt'), 'rb') as f:
data = f.read().decode("utf-8")
reduce_data(data,logger,args,file_name)
if __name__ == '__main__':
preprocess()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/metric_result_correlation.py | src/beta_code/metric_result_correlation.py | import os
from scipy.stats import pearsonr, spearmanr
import pandas as pd
from beta_code.get_human_check_data_eval_score_pairs import mode_ref_dict
from human_metrics import get_score_map
from beta_code.machine_metrics import cal_metrics
import numpy as np
import matplotlib.pyplot as plt
machine_data_base_path = '/eval_data/machine_eval/'
mode_metric_file_dict = {
2:"rnn_predic.json",
3:"machine_metric_data_gpt_ep50.json",
4:"machine_metric_data_unilm.json",
5:"machine_metric_data_zhouwenwang.json",
6:"machine_metric_t5_pesg_ep15.json",
7:"gpt3_base_davinci_metric.json",
8:"gpt3_davinci_ft_metric.json",
9:"machine_metric_data_CPM_large.json",
10:"machine_metric_data_pangu_a.json"
}
line_head = ['bleu_1','bleu_2','bleu_3','bleu_4','gleu','rouge_1','rouge_2','rouge_l','distinct_1','distinct_2']
# col_head = ['humor_score','fluent_score','comprehensive_score','diss_score']
col_head = ['comprehensive_score','humor_score','fluent_score','diss_score']
col_head_convert = {
'comprehensive_score':'General','humor_score':'Humor','fluent_score':'Coherence','diss_score':'Ethical-risk'
}
line_head_convert = {
'bleu_1':'BLEU_1','bleu_2':'BLEU_2','bleu_3':'BLEU_3','bleu_4':'BLEU_4','gleu':'GLEU','rouge_1':'ROUGE_1','rouge_2':'ROUGE_2','rouge_l':'ROUGE_l','distinct_1':'Distinct_1','distinct_2':'Distinct_2'
}
score_map = get_score_map()
def run_metric_correlation(round_size=4):
all_machine_eval_result_dict = {}
all_human_eval_result_dict = {}
score_matrix_pearsonr = [[0 for i in range(len(line_head))] for i in range(len(col_head))]
score_matrix_spearmanr = [[0 for i in range(len(line_head))] for i in range(len(col_head))]
for model_idx in range(2,11):
machine_metrics = cal_metrics(os.path.join(machine_data_base_path, 'data', mode_metric_file_dict[model_idx]))
human_metrics = score_map.get(mode_ref_dict[model_idx])
for key in line_head:
vals = machine_metrics[key]
if key not in all_machine_eval_result_dict:
all_machine_eval_result_dict[key] = []
all_machine_eval_result_dict[key].append(vals)
for key in col_head:
vals = human_metrics[key]
if key not in all_human_eval_result_dict:
all_human_eval_result_dict[key] = []
all_human_eval_result_dict[key].append(vals)
for machine_idx in range(len(line_head)):
machine_metric_name = line_head[machine_idx]
for human_idx in range(len(col_head)):
human_metric_name = col_head[human_idx]
machine_vector = all_machine_eval_result_dict[machine_metric_name]
human_vector = all_human_eval_result_dict[human_metric_name]
P_value = pearsonr(human_vector, machine_vector)[0]
S_value = spearmanr(human_vector, machine_vector)[0]
score_matrix_pearsonr[human_idx][machine_idx] = round(P_value,round_size)
score_matrix_spearmanr[human_idx][machine_idx] = round(S_value,round_size)
return score_matrix_pearsonr,score_matrix_spearmanr
def out_excel():
all_export_data = []
score_matrix_pearsonr,score_matrix_spearmanr = run_metric_correlation()
fir_line = ['pearsonr'] + line_head
pearsonr = [fir_line] + [[col_head[i]] + score_matrix_pearsonr[i] for i in range(4)]
all_export_data.extend(pearsonr)
all_export_data.append([])
fir_line = ['spearman'] + line_head
spearman = [fir_line] + [[col_head[i]] + score_matrix_spearmanr[i] for i in range(4)]
all_export_data.extend(spearman)
all_export_data.append([])
pd.DataFrame(all_export_data).to_excel('metric_correlation.xls')
if __name__ == '__main__':
# matrix_type = 'pearsonr'
matrix_type = 'spearman'
score_matrix_pearsonr,score_matrix_spearmanr = run_metric_correlation(2)
vegetables = col_head
farmers = line_head
if matrix_type == 'pearsonr':
harvest = np.array(score_matrix_pearsonr)
else:
harvest = np.array(score_matrix_spearmanr)
fig, ax = plt.subplots()
im = ax.imshow(harvest)
# Show all ticks and label them with the respective list entries
ax.set_xticks(np.arange(len(farmers)), labels=[line_head_convert[i] for i in farmers])
ax.set_yticks(np.arange(len(vegetables)), labels=[col_head_convert[i] for i in vegetables])
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(vegetables)):
for j in range(len(farmers)):
text = ax.text(j, i, format(harvest[i, j],'.2f'),
ha="center", va="center", color="w")
# ax.set_title(f"{matrix_type}-Correlation coefficient")
fig.tight_layout()
# plt.savefig(f"{matrix_type}-Correlation coefficient.svg",dpi=600, format='svg')
plt.savefig(f"{matrix_type}-Correlation coefficient.pdf",bbox_inches='tight')
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/indicator_correlation.py | src/beta_code/indicator_correlation.py | import json,sys
sys.path.append('.')
from beta_code import machine_metrics
from collections import defaultdict
def get_metrics(references, candidates):
distinct_1 = machine_metrics.calc_distinct_ngram(candidates, 1)
distinct_2 = machine_metrics.calc_distinct_ngram(candidates, 2)
belu_scores = machine_metrics.calculate_bleu_score(references, candidates)
gleu_scores = machine_metrics.calculate_gleu_score(references, candidates)
rouge_scores = machine_metrics.calculate_rouge_score(references, candidates)
result = {
'bleu_1':belu_scores[0] * 100,
'bleu_2':belu_scores[1] * 100,
'bleu_3':belu_scores[2] * 100,
'bleu_4':belu_scores[3] * 100,
'gleu':gleu_scores * 100,
'rouge_1':rouge_scores[0] * 100,
'rouge_2':rouge_scores[1] * 100,
'rouge_l':rouge_scores[2] * 100,
'distinct_1':distinct_1 * 100,
'distinct_2':distinct_2 * 100
}
return result
def load_model_results():
model_results = json.load(open('../../eval_data/human_eval/data/generate_completions.json'))['RECORDS']
model_result_group_by_prompt = defaultdict(dict)
for item in model_results:
model_result_group_by_prompt[item['prompt_id']][item['type']] = item['content']
model_scores = defaultdict(dict)
for prompt_id, item in model_result_group_by_prompt.items():
label = item[1].split('\n')
for model_id, content in item.items():
content = content.split('\n')
model_scores[prompt_id][model_id] = get_metrics(label[:len(content)], content)
return model_scores
def load_human_results():
human_results = json.load(open('../../eval_data/human_eval/data/score_records.json'))['RECORDS']
human_result_group_by_prompt = defaultdict(dict)
for item in human_results:
if item['model_id'] in human_result_group_by_prompt[item['prompt_id']]:
human_result_group_by_prompt[item['prompt_id']][item['model_id']]['h_score'].append(item['h_score'])
human_result_group_by_prompt[item['prompt_id']][item['model_id']]['f_score'].append(item['f_score'])
human_result_group_by_prompt[item['prompt_id']][item['model_id']]['d_score'].append(item['d_score'])
human_result_group_by_prompt[item['prompt_id']][item['model_id']]['is_best'].append(item['is_best'])
else:
human_result_group_by_prompt[item['prompt_id']][item['model_id']] = {'h_score':[item['h_score']], 'f_score':[item['f_score']], 'd_score':[item['d_score']], 'is_best':[item['is_best']]}
for prompt_id, result_item in human_result_group_by_prompt.items():
for model_id, scores in result_item.items():
human_result_group_by_prompt[prompt_id][model_id]['h_score'] = sum(scores['h_score'])/len(scores['h_score'])
human_result_group_by_prompt[prompt_id][model_id]['f_score'] = sum(scores['f_score'])/len(scores['f_score'])
human_result_group_by_prompt[prompt_id][model_id]['d_score'] = sum(scores['d_score'])/len(scores['d_score'])
human_result_group_by_prompt[prompt_id][model_id]['is_best'] = sum(scores['is_best'])/len(scores['is_best'])
return human_result_group_by_prompt
def correlation(model_scores, human_scores):
from human_metrics import mode_ref_dict
from scipy.stats import pearsonr, spearmanr
model_prompt_result = defaultdict(lambda:{"human":{"h_score":[], "f_score":[], "d_score":[], "is_best":[]}, "machine":{"bleu_1":[], "bleu_2":[], "bleu_3":[], "bleu_4":[], "gleu":[], "rouge_1":[], "rouge_2":[], "rouge_l":[], "distinct_1":[], "distinct_2":[]}})
prompt_indexes = list(model_scores.keys())
result_list = []
model_split_dict = {}
for pid in prompt_indexes:
for mid, model_score in model_scores[pid].items():
# if mid not in model_prompt_result:
model_prompt_result[mid]['machine']["bleu_1"].append(model_score["bleu_1"])
model_prompt_result[mid]['machine']["bleu_2"].append(model_score["bleu_2"])
model_prompt_result[mid]['machine']["bleu_3"].append(model_score["bleu_3"])
model_prompt_result[mid]['machine']["bleu_4"].append(model_score["bleu_4"])
model_prompt_result[mid]['machine']["gleu"].append(model_score["gleu"])
model_prompt_result[mid]['machine']["rouge_1"].append(model_score["rouge_1"])
model_prompt_result[mid]['machine']["rouge_2"].append(model_score["rouge_2"])
model_prompt_result[mid]['machine']["rouge_l"].append(model_score["rouge_l"])
model_prompt_result[mid]['machine']["distinct_1"].append(model_score["distinct_1"])
model_prompt_result[mid]['machine']["distinct_2"].append(model_score["distinct_2"])
# print(model_prompt_result)
for mid, human_score in human_scores[pid].items():
# print(human_score)
model_prompt_result[mid]["human"]["h_score"].append(human_score["h_score"])
model_prompt_result[mid]["human"]["f_score"].append(human_score["f_score"])
model_prompt_result[mid]["human"]["d_score"].append(human_score["d_score"])
model_prompt_result[mid]["human"]["is_best"].append(human_score["is_best"])
for model_id, model_value in model_prompt_result.items():
if model_id == 1:
continue
for h_score_name, h_score_value in model_value['human'].items():
for m_score_name, m_score_value in model_value['machine'].items():
# print(model_id, h_score_name, m_score_name, h_score_value, m_score_value)
P_value = pearsonr(h_score_value, m_score_value)[0]
S_value = spearmanr(h_score_value, m_score_value)[0]
result_list.append([mode_ref_dict.get(model_id), h_score_name, m_score_name, round(P_value, 4), round(S_value, 4)])
if mode_ref_dict.get(model_id) not in model_split_dict:
model_split_dict[mode_ref_dict.get(model_id)] = []
model_split_dict[mode_ref_dict.get(model_id)].append([h_score_name, m_score_name, round(P_value, 4), round(S_value, 4)])
return result_list,model_split_dict
print()
# for r_type, r_score in model_value.items():
# for score_name, score_value in r_score.items():
# print(model_id, r_type, score_name, len(score_value)) | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/machine_metrics.py | src/beta_code/machine_metrics.py | '''
Author: anon
Date: 2022-01-27 11:39:10
LastEditors: anon
LastEditTime: 2022-02-11 17:47:14
FilePath: /crosstalk-generation/src/gpt/metrics.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
from statistics import mean
from nltk import word_tokenize
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.gleu_score import corpus_gleu
from nltk.translate.meteor_score import meteor_score
from nltk.translate.bleu_score import SmoothingFunction
from rouge import Rouge
import io,json
# import nltk
# nltk.download('wordnet')
'''
description: 计算bleu1,2,3,4的值
param {原始句} reference
param {预测句} hypothesis
return bleu1,bleu2,bleu3,bleu4
'''
def calculate_bleu_score(references, candidates):
smooth = SmoothingFunction()
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
BLEU_1 = corpus_bleu(reference, hypothesis, weights=(1, 0, 0, 0), smoothing_function=smooth.method1)
BLEU_2 = corpus_bleu(reference, hypothesis, weights=(0.5, 0.5, 0, 0), smoothing_function=smooth.method1)
BLEU_3 = corpus_bleu(reference, hypothesis, weights=(0.33, 0.33, 0.33, 0), smoothing_function=smooth.method1)
BLEU_4 = corpus_bleu(reference, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=smooth.method1)
return BLEU_1,BLEU_2,BLEU_3,BLEU_4
'''
description: 计算gleu值
param {原始句} reference
param {预测句} hypothesis
return gleu值
'''
def calculate_gleu_score(references, candidates):
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
return corpus_gleu(reference, hypothesis)
'''
description: 中文不建议使用,因为依赖了wordnet,wordnet是英文词典
param {原始句} reference
param {预测句} hypothesis
return metetor值
'''
def calculate_meteor_score(references, candidates):
reference = [[[j for j in i]] for i in references]
hypothesis = [[j for j in i] for i in candidates]
all_meteor = []
for ref,hyp in zip(reference, hypothesis):
all_meteor.append(meteor_score(ref,hyp))
return mean(all_meteor)
'''
description: rouge值计算
param {原始句} reference
param {预测句} hypothesis
return rouge1,rouge2,rougel
'''
def calculate_rouge_score(reference, hypothesis):
rouge = Rouge()
scores = []
for ref,hyp in zip(reference,hypothesis):
try:
scores.append(rouge.get_scores(' '.join([i for i in hyp]), ' '.join([i for i in ref])))
except:
continue
rouge_1 = [i[0]['rouge-1']['f'] for i in scores]
rouge_2 = [i[0]['rouge-2']['f'] for i in scores]
rouge_l = [i[0]['rouge-l']['f'] for i in scores]
return mean(rouge_1),mean(rouge_2),mean(rouge_l)
def calc_distinct_ngram(pair_list, ngram):
"""
calc_distinct_ngram
"""
pair_list = [[j for j in i] for i in pair_list]
def get_dict(tokens, ngram, gdict=None):
"""
get_dict
统计n-gram频率并用dict存储
"""
token_dict = {}
if gdict is not None:
token_dict = gdict
tlen = len(tokens)
for i in range(0, tlen - ngram + 1):
ngram_token = "".join(tokens[i:(i + ngram)])
if token_dict.get(ngram_token) is not None:
token_dict[ngram_token] += 1
else:
token_dict[ngram_token] = 1
return token_dict
ngram_total = 0.0
ngram_distinct_count = 0.0
pred_dict = {}
for predict_tokens in pair_list:
get_dict(predict_tokens, ngram, pred_dict)
for key, freq in pred_dict.items():
ngram_total += freq
ngram_distinct_count += 1
#if freq == 1:
# ngram_distinct_count += freq
return ngram_distinct_count / ngram_total
def test_demo():
references = ['我今天晚上必须回家吃饭','广东鸡翅膀,我最爱吃','天天都需要你爱']
candidates = ['晚上我要回家吃饭','最好吃的是广东鸡翅膀','啦啦啦啦要你爱']
belu_scores = calculate_bleu_score(references,candidates)
gleu_scores = calculate_gleu_score(references,candidates)
# meteor_scores = calculate_meteor_score(references,candidates)
rouge_scores = calculate_rouge_score(references,candidates)
print(belu_scores)
def cal_metrics(machine_gen_file):
raw_text = io.open(machine_gen_file,'r').read()
data_list = json.loads(raw_text)
references = []
candidates = []
for data_item in data_list:
references.append(data_item['ori'])
candidates.append(data_item['gen'])
distinct_1 = calc_distinct_ngram(candidates,1)
distinct_2 = calc_distinct_ngram(candidates,2)
belu_scores = calculate_bleu_score(references,candidates)
gleu_scores = calculate_gleu_score(references,candidates)
rouge_scores = calculate_rouge_score(references,candidates)
result = {
'bleu_1':belu_scores[0] * 100,
'bleu_2':belu_scores[1] * 100,
'bleu_3':belu_scores[2] * 100,
'bleu_4':belu_scores[3] * 100,
'gleu':gleu_scores * 100,
'rouge_1':rouge_scores[0] * 100,
'rouge_2':rouge_scores[1] * 100,
'rouge_l':rouge_scores[2] * 100,
'distinct_1':distinct_1 * 100,
'distinct_2':distinct_2 * 100
}
return result
if __name__ == '__main__':
import os
this_dir = os.path.split(os.path.realpath(__file__))[0]
all_machine_eval_result_dict = {}
for root,dir,files in os.walk(os.path.join(this_dir,'data')):
for file in files:
results = cal_metrics(os.path.join(root, file))
all_machine_eval_result_dict[file] = results
print(file + ":" + str(results))
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/muti_heatmap_demo.py | src/beta_code/muti_heatmap_demo.py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
"Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (M, N).
row_labels
A list or array of length M with the labels for the rows.
col_labels
A list or array of length N with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# Show all ticks and label them with the respective list entries.
ax.set_xticks(np.arange(data.shape[1]), labels=col_labels)
ax.set_yticks(np.arange(data.shape[0]), labels=row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
ax.spines[:].set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=("black", "white"),
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A pair of colors. The first is used for values below a threshold,
the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
np.random.seed(19680801)
fig, ((ax, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))
# Replicate the above example with a different font size and colormap.
im, _ = heatmap(harvest, vegetables, farmers, ax=ax,
cmap="Wistia", cbarlabel="harvest [t/year]")
annotate_heatmap(im, valfmt="{x:.1f}", size=7)
# Create some new data, give further arguments to imshow (vmin),
# use an integer format on the annotations and provide some colors.
data = np.random.randint(2, 100, size=(7, 7))
y = ["Book {}".format(i) for i in range(1, 8)]
x = ["Store {}".format(i) for i in list("ABCDEFG")]
im, _ = heatmap(data, y, x, ax=ax2, vmin=0,
cmap="magma_r", cbarlabel="weekly sold copies")
annotate_heatmap(im, valfmt="{x:d}", size=7, threshold=20,
textcolors=("red", "white"))
# Sometimes even the data itself is categorical. Here we use a
# `matplotlib.colors.BoundaryNorm` to get the data into classes
# and use this to colorize the plot, but also to obtain the class
# labels from an array of classes.
data = np.random.randn(6, 6)
y = ["Prod. {}".format(i) for i in range(10, 70, 10)]
x = ["Cycle {}".format(i) for i in range(1, 7)]
qrates = list("ABCDEFG")
norm = matplotlib.colors.BoundaryNorm(np.linspace(-3.5, 3.5, 8), 7)
fmt = matplotlib.ticker.FuncFormatter(lambda x, pos: qrates[::-1][norm(x)])
im, _ = heatmap(data, y, x, ax=ax3,
cmap=plt.get_cmap("PiYG", 7), norm=norm,
cbar_kw=dict(ticks=np.arange(-3, 4), format=fmt),
cbarlabel="Quality Rating")
annotate_heatmap(im, valfmt=fmt, size=9, fontweight="bold", threshold=-1,
textcolors=("red", "black"))
# We can nicely plot a correlation matrix. Since this is bound by -1 and 1,
# we use those as vmin and vmax. We may also remove leading zeros and hide
# the diagonal elements (which are all 1) by using a
# `matplotlib.ticker.FuncFormatter`.
corr_matrix = np.corrcoef(harvest)
im, _ = heatmap(corr_matrix, vegetables, vegetables, ax=ax4,
cmap="PuOr", vmin=-1, vmax=1,
cbarlabel="correlation coeff.")
def func(x, pos):
return "{:.2f}".format(x).replace("0.", ".").replace("1.00", "")
annotate_heatmap(im, valfmt=matplotlib.ticker.FuncFormatter(func), size=7)
plt.tight_layout()
plt.show() | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/Simple_relation.py | src/beta_code/Simple_relation.py | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false | |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/out_excel.py | src/beta_code/out_excel.py | import pandas as pd
from beta_code.get_human_check_data_eval_score_pairs import mode_ref_dict
from beta_code.indicator_correlation import load_model_results,load_human_results,correlation
model_r = load_model_results()
human_r = load_human_results()
correlation_result,model_split_dict = correlation(model_r, human_r)
line_head = ['bleu_1','bleu_2','bleu_3','bleu_4','gleu','rouge_1','rouge_2','rouge_l','distinct_1','distinct_2']
col_head = ['h_score','f_score','is_best','d_score']
def single_model_print(model_desc):
score_matrix_pearsonr = [[0 for i in range(len(line_head))] for i in range(len(col_head))]
score_matrix_spearmanr = [[0 for i in range(len(line_head))] for i in range(len(col_head))]
for item in model_desc:
x_idx = line_head.index(item[1])
y_idx = col_head.index(item[0])
score_matrix_pearsonr[y_idx][x_idx] = item[2]
score_matrix_spearmanr[y_idx][x_idx] = item[3]
return score_matrix_pearsonr,score_matrix_spearmanr
def run_all_rel_data():
all_pearson = []
all_spearmanr = []
sum_score_matrix_pearsonr,sum_score_matrix_spearmanr = run_combine_rel_data()
fir_line = ['mean'] + line_head
pearsonr = [fir_line] + [[col_head[i]] + sum_score_matrix_pearsonr[i] for i in range(4)]
spearmanr = [fir_line] + [[col_head[i]] + sum_score_matrix_spearmanr[i] for i in range(4)]
all_pearson.extend(pearsonr)
all_spearmanr.extend(spearmanr)
all_pearson.append([])
all_spearmanr.append([])
for idx in range(2,11):
key = mode_ref_dict[idx]
score_matrix_pearsonr,score_matrix_spearmanr = single_model_print(model_split_dict[key])
fir_line = [key] + line_head
pearsonr = [fir_line] + [[col_head[i]] + score_matrix_pearsonr[i] for i in range(4)]
spearmanr = [fir_line] + [[col_head[i]] + score_matrix_spearmanr[i] for i in range(4)]
all_pearson.extend(pearsonr)
all_spearmanr.extend(spearmanr)
all_pearson.append([])
all_spearmanr.append([])
print()
pd.DataFrame(all_spearmanr).to_excel('spearman.xls')
pd.DataFrame(all_pearson).to_excel('pearson.xls')
def run_combine_rel_data():
sum_score_matrix_pearsonr = [[0 for i in range(len(line_head))] for i in range(len(col_head))]
sum_score_matrix_spearmanr = [[0 for i in range(len(line_head))] for i in range(len(col_head))]
model_size = 0
for idx in range(2,11):
key = mode_ref_dict[idx]
score_matrix_pearsonr,score_matrix_spearmanr = single_model_print(model_split_dict[key])
model_size += 1
for y_idx in range(len(col_head)):
for x_idx in range(len(line_head)):
sum_score_matrix_pearsonr[y_idx][x_idx] += score_matrix_pearsonr[y_idx][x_idx]
sum_score_matrix_spearmanr[y_idx][x_idx] += score_matrix_spearmanr[y_idx][x_idx]
for y_idx in range(len(col_head)):
for x_idx in range(len(line_head)):
sum_score_matrix_pearsonr[y_idx][x_idx] = round(sum_score_matrix_pearsonr[y_idx][x_idx] / model_size,4)
sum_score_matrix_spearmanr[y_idx][x_idx] = round(sum_score_matrix_spearmanr[y_idx][x_idx] / model_size,4)
return sum_score_matrix_pearsonr,sum_score_matrix_spearmanr
if __name__ == '__main__':
run_all_rel_data()
# run_combine_rel_data()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/__init__.py | src/beta_code/__init__.py | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false | |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/matlibplot_heatmap.py | src/beta_code/matlibplot_heatmap.py | import numpy as np
import matplotlib.pyplot as plt
from beta_code.indicator_correlation import load_model_results,load_human_results,correlation
model_r = load_model_results()
human_r = load_human_results()
correlation_result,model_split_dict = correlation(model_r, human_r)
line_head = ['bleu_1','bleu_2','bleu_3','bleu_4','gleu','rouge_1','rouge_2','rouge_l','distinct_1','distinct_2']
col_head = ['h_score','f_score','is_best','d_score']
def single_model_print(model_desc):
score_matrix_pearsonr = [[0 for i in range(len(line_head))] for i in range(len(col_head))]
score_matrix_spearmanr = [[0 for i in range(len(line_head))] for i in range(len(col_head))]
for item in model_desc:
x_idx = line_head.index(item[1])
y_idx = col_head.index(item[0])
score_matrix_pearsonr[y_idx][x_idx] = item[2]
score_matrix_spearmanr[y_idx][x_idx] = item[3]
return score_matrix_pearsonr,score_matrix_spearmanr
model_type = 'T5'
matrix_type = 'pearsonr'
score_matrix_pearsonr,score_matrix_spearmanr = single_model_print(model_split_dict[model_type])
vegetables = col_head
farmers = line_head
if matrix_type == 'pearsonr':
harvest = np.array(score_matrix_pearsonr)
else:
harvest = np.array(score_matrix_spearmanr)
fig, ax = plt.subplots()
im = ax.imshow(harvest)
# Show all ticks and label them with the respective list entries
ax.set_xticks(np.arange(len(farmers)), labels=farmers)
ax.set_yticks(np.arange(len(vegetables)), labels=vegetables)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(vegetables)):
for j in range(len(farmers)):
text = ax.text(j, i, harvest[i, j],
ha="center", va="center", color="w")
ax.set_title(f"model-{model_type}-{matrix_type}-Correlation coefficient")
fig.tight_layout()
plt.show() | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/get_human_check_data_eval_score_pairs.py | src/beta_code/get_human_check_data_eval_score_pairs.py | import os,json,io
import pandas as pd
this_dir = os.path.split(os.path.realpath(__file__))[0]
'''
generate_completions.json 生成的后10段文本
meta_prompt.json 50个前10段文本
score_records.json 评分记录
user_list.json 评分用户
'''
# 模型id对应表
mode_ref_dict = {
1: "真实数据",
2:"rnn",
3:"GPT",
4:"unilm",
5:"zhouwenwang",
6:"T5",
7:"GPT3",
8:"GPT3-finetune",
9:"CPM",
10:"PANGU-a"
}
# 打分记录说明
score_desc_dict = {
'prompt_id':'对应哪一个前10句 meta_prompt.json中的id',
'model_id':'对应哪一个模型 mode_ref_dict',
'h_score':'幽默度打分,0~5 高分好',
'f_score':'是否通顺 通顺1,不通顺0',
'd_score':'是否侮辱 侮辱1,不侮辱0',
'is_best':'综合打分 0~5 高分好',
'user':'评分人'
}
# 生成文本说明
generate_desc_dict = {
'content':'文本',
'type':'对应的模型类型 mode_ref_dict',
'prompt_id':'对应的上文id',
}
if __name__ == '__main__':
pass | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/gpt3_finetune_guide/__init__.py | src/beta_code/gpt3_finetune_guide/__init__.py | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false | |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/gpt3_prompt_generate/Prompt_rule_generator.py | src/beta_code/gpt3_prompt_generate/Prompt_rule_generator.py | import os,sys,io,json,random
from pathlib import Path
this_dir = os.path.split(os.path.realpath(__file__))[0]
'''
多热点,多模板多样性
'''
def get_filter_prompt():
human_ref_data_dir = os.path.join(Path(this_dir).parent.parent,'eval_data','human_eval')
filter_prompt_file_path = os.path.join(human_ref_data_dir,'data','meta_prompt.json')
json_prompt = json.loads(io.open(filter_prompt_file_path,'r').read())['RECORDS']
return json_prompt
word_ref_pool = {
'#postive#':['高兴','欣慰','欢喜','爽','开心','舒服','心里美'],
'#negtive#':['郁闷','难过','忧虑','难过','不好受','烦'],
'#KEY#':['#KEY#'],
'#QC#':['聊聊','说说','谈谈','讲讲','唠唠','掰扯掰扯','研究研究']
}
negtive_hotpoint = ['佩洛西窜访台湾','林志颖出车祸了','互联网大厂裁员','烂尾楼事件','研究所招聘硕士保安','我论文投NeurIPS被打了一个低分','薇娅偷税漏税','河南暴雨','新疆棉事件',
'新冠肺炎事件','瑞幸造假事件','西昌森林火灾','打卡式旅游','乔碧萝殿下','北大毕业生送外卖','大学生网贷','抖音式生活','超前消费','行车不规范 亲人两行泪',
'明星数据造假','娘炮文化','AI换脸','演员天价片酬','人造节日','骚扰电话','共享单车涨价','打击盗版电影','唐山打人案','我爸是李刚',
'P2P平台爆雷','高铁霸座男','美国枪击案','小镇做题家','网络炫富','出生率下降','程序员996加班']
postive_hotpoint = \
['新冠口服特效药','军工股涨停','网络直播带货','特斯拉造机器人','GPT3写论文研究自己','暗黑三手游上线','开放三胎','孟晚舟回国','鸿星尔克爆卖','东京奥运会',
'神舟十二号火箭飞行任务成功','G20峰会将在沙特举行','5G网络将覆盖所有地级市','科教兴国','打工人才是人上人','国潮品牌','广场舞','垃圾分类','人工智能画家','文明旅游',
'网络文学','正当防卫','中国锦鲤','建议专家不要建议']
all_hotpoint = negtive_hotpoint + postive_hotpoint
start_sentence_pool = [
['您好!','您好!'],
['人来的不少,我很#postive#。','八百年前的老词,又用到这了!'],
['各位同胞们!想死你们了!','特别是今晚第一次来到我们当中的,看谁谁顺眼,心里特#postive#。'],
['好险啊!','你这人有毛病,怎么一见面就大叫“好险啊”。'],
['亲爱的朋友们','接下来由我俩为大家演出'],
['您在这干嘛呢?','我们这是表演相声。'],
['朋友们好,新年吉祥!','给各位拜年了'],
['今天咱俩说什么?','今天咱俩#QC#点儿不一样的。'],
['在场的亲爱的观众朋友们!大家好!','今天晚上来人不少啊,满坑满谷!'],
['来了老弟?','嚯!,好久不见!'],
['我最近发现一个秘密!','给咱们大伙分享#QC#?']
]
under_take_sentence_pool = [
['最近在忙什么呀','说出来吓死你','什么呀这么神秘'],
['今天很#postive#呀','是,来了这么多的朋友'],
['今天我和我的搭档在这儿给大家讲一段','对,讲一段'],
['我最近有些#negtive#','嚯,有什么可#negtive#的呀'],
['今天特别#postive#','什么事儿呀这么#postive#'],
['今天特别#negtive#','什么事儿呀这么#negtive#'],
['别提了,#negtive#着呢!','什么事这么#negtive#?'],
]
inspired_sentence_pool = [
['今天我们来#QC##KEY#','#KEY#?'],
['我最近比较关心时事','那咱们来#QC##KEY#'],
['我最近比较关心时事','心系天下!','对,就比如这个#KEY#'],
['我最近比较关心时事','什么时事呀?','就比如最近的这个#KEY#'],
['最近有很多大事发生!','对,很多大事','就比如最近的#KEY#'],
['我们#QC#这个#KEY#'],
['你听说那个#KEY#了么','听说了,都在讨论这个'],
['你听说那个#KEY#了么','听说了,这可是个热点'],
['你听说那个#KEY#了么','怎么,您有研究?'],
['#KEY#,你没听说么!','听说了,都在讨论这个'],
['#KEY#,你没听说么!','听说了,这可是个热点'],
['#KEY#,你没听说么!','怎么,您有研究?'],
['我最近研究了这个#KEY#','嚯,您这厉害了','那可不是'],
]
if __name__ == '__main__':
total_nums = 300
start_candi_num = len(start_sentence_pool)
under_candi_num = len(under_take_sentence_pool)
inspired_candi_num = len(inspired_sentence_pool)
prompt_res_list = set([])
for idx in range(total_nums):
start_sentence = start_sentence_pool[random.randint(0,start_candi_num-1)]
undertake_sentence = under_take_sentence_pool[random.randint(0,under_candi_num-1)]
inspired_sentence = inspired_sentence_pool[random.randint(0,inspired_candi_num-1)]
hot_point = ''
if 'negtive' in ''.join(undertake_sentence):
hot_point = negtive_hotpoint[random.randint(0,len(negtive_hotpoint)-1)]
elif 'postive' in ''.join(undertake_sentence):
hot_point = postive_hotpoint[random.randint(0,len(postive_hotpoint)-1)]
else:
hot_point = all_hotpoint[random.randint(0,len(all_hotpoint)-1)]
word_ref_candi = {k:word_ref_pool[k][random.randint(0,len(word_ref_pool[k]) - 1)] for k in word_ref_pool.keys()}
prompt_list = start_sentence + undertake_sentence + inspired_sentence
prompt_raw = '\n'.join([str(i % 2) + ':' + prompt_list[i] for i in range(len(prompt_list))])
for key in word_ref_candi.keys():
prompt_raw = prompt_raw.replace(key,word_ref_candi[key])
format_prompt = prompt_raw.replace('#KEY#', hot_point)
format_prompt = '#' * 3 + hot_point + '#' * 3 + '\n' + format_prompt
prompt_res_list.add(format_prompt)
# print()
io.open(os.path.join(this_dir,'candi_fill_prompt.txt'),'w').write('\n\n'.join(prompt_res_list))
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/gpt3_prompt_generate/Gpt3_Generator.py | src/beta_code/gpt3_prompt_generate/Gpt3_Generator.py |
import io,os
from tqdm import tqdm
from beta_code.gpt3_prompt_generate.GPT3_Call import call_gpt3
this_dir = os.path.split(os.path.realpath(__file__))[0]
prompt_file_path = os.path.join(this_dir,'human_filter_prompt.txt')
completion_file_path = os.path.join(this_dir,'candi_completion.txt')
def main():
# input_text + '\n--gen--' + call_gpt_.last_response.data['choices'][0]['text']
all_prompts = io.open(prompt_file_path, 'r').read().split('\n\n')
for idx in tqdm(range(len(all_prompts))):
prompt = all_prompts[idx]
response = call_gpt3(prompt)
all_content = prompt + '\n--gen--\n' + response.last_response.data['choices'][0]['text'] + '\n\n'
io.open('candi_completion.txt', 'a').write(all_content)
print()
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/gpt3_prompt_generate/GPT3_Call.py | src/beta_code/gpt3_prompt_generate/GPT3_Call.py | import os
import openai
def call_gpt3(prompt_text):
response = openai.Completion.create(
model="davinci",
prompt=prompt_text,
temperature=0.2,
max_tokens=256,
top_p=1,
frequency_penalty=0.85,
presence_penalty=0.4
)
return response
if __name__ == '__main__':
input_text = '###美国枪击案###\n0:我最近发现一个秘密!\n1:给咱们大伙分享说说?\n0:最近在忙什么呀\n1:说出来吓死你\n0:什么呀这么神秘\n1:你听说那个美国枪击案了么\n0:听说了,都在讨论这个事情'
call_gpt_ = call_gpt3(input_text)
print()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/beta_code/gpt3_prompt_generate/__init__.py | src/beta_code/gpt3_prompt_generate/__init__.py | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false | |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/cpm/inference.py | src/cpm/inference.py | '''
Author: anon
Date: 2022-02-07 20:18:18
LastEditors: anon
LastEditTime: 2022-02-10 16:09:11
FilePath: /crosstalk-generation/src/cpm/inference.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
from transformers import TextGenerationPipeline, AutoTokenizer, AutoModelWithLMHead,ConversationalPipeline,Conversation
tokenizer = AutoTokenizer.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/CPM-large")
model = AutoModelWithLMHead.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/CPM-large")
text_generator = TextGenerationPipeline(model, tokenizer)
text = text_generator('我是吴相博', max_length=50, do_sample=True, top_p=0.9)
print(text)
# conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
# conversation_1 = Conversation("<cls>今天我俩来给大家说段相声<sep>")
# print() | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/cpm/generate_eval_data.py | src/cpm/generate_eval_data.py | '''
Author: anon
Date: 2022-02-08 16:12:50
LastEditors: anon
LastEditTime: 2022-02-10 17:07:36
FilePath: /crosstalk-generation/src/cpm/generate_eval_data.py
Description:
Copyright (c) 2022 by anon/Ultrapower, All Rights Reserved.
'''
import transformers
import torch
import os,re
import json
import random
import numpy as np
import argparse
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
import logging
from transformers import GPT2TokenizerFast, GPT2LMHeadModel, GPT2Config
from transformers import BertTokenizerFast
# from transformers import BertTokenizer
from os.path import join, exists
from itertools import zip_longest, chain
# from chatbot.model import DialogueGPT2Model
from dataset import MyDataset
from torch.utils.data import Dataset, DataLoader
from torch.nn import CrossEntropyLoss
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import io
from transformers import TextGenerationPipeline, AutoTokenizer, AutoModelWithLMHead
def set_args():
"""
Sets up the arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
parser.add_argument('--temperature', default=1.2, type=float, required=False, help='生成的temperature')
parser.add_argument('--topk', default=2, type=int, required=False, help='最高k选1')
parser.add_argument('--topp', default=0.3, type=float, required=False, help='最高积累概率')
parser.add_argument('--log_path', default='/data1/anon/crosstalk-generation/src/cpm_4922/logs/interact.log', type=str, required=False, help='interact日志存放位置')
parser.add_argument('--vocab_path', default='/data1/anon/crosstalk-generation/pretrain_model/CPM-large', type=str, required=False, help='选择词库')
parser.add_argument('--model_path', default='/data1/anon/crosstalk-generation/pretrain_model/CPM-large', type=str, required=False, help='对话模型路径')
parser.add_argument('--test_filter_data', default="/data1/anon/crosstalk-generation/src/cpm_4922/data/p256-s64/test_filter_50x20.txt", type=str, required=False, help="数据基准文件,n个篇章,每个篇章20行")
parser.add_argument('--save_samples_path', default="/data1/anon/crosstalk-generation/src/cpm_4922/sample", type=str, required=False, help="保存跑机器指标数据的文件路径及保存篇章级生成的文件路径")
parser.add_argument('--repetition_penalty', default=2.0, type=float, required=False,
help="重复惩罚参数,若生成的对话重复性较高,可适当提高该参数")
parser.add_argument('--diverse_penalty', default=2.0, type=float, required=False,
help="历史出现字惩罚项")
parser.add_argument('--seed', type=int, default=1234, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--utterance_max_len', type=int, default=64, help='每个utterance的最大长度,超过指定长度则进行截断')
parser.add_argument('--seq_max_len', type=int, default=256, help='最大输入长度')
parser.add_argument('--max_history_len', type=int, default=20, help="dialogue history的最大长度")
parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
return parser.parse_args()
def set_random_seed(args):
"""
设置训练的随机种子
"""
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_logger(args):
"""
将日志输出到日志文件和控制台
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# 创建一个handler,用于写入日志文件
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
# 创建一个handler,用于将日志输出到控制台
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocab size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
# torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)
# ...表示其他维度由计算机自行推断
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def generate_text_by_input(args,text,history,model,tokenizer):
history.append(text)
history = [tokenizer.encode(i,add_special_tokens=False) for i in history]
input_ids = [tokenizer.cls_token_id]
history_start_index = 0
filter_history_sent_ids = []
for rev_idx in range(len(history)-1,-1,-1):
this_turn_ids = history[rev_idx][:args.utterance_max_len] + [tokenizer.sep_token_id]
if history_start_index + len(this_turn_ids) > args.seq_max_len:
break
filter_history_sent_ids.append(this_turn_ids)
history_start_index += len(this_turn_ids)
filter_history_sent_ids.reverse()
for sent_ids in filter_history_sent_ids:
input_ids.extend(sent_ids)
# input_ids.append(tokenizer.cls_token_id)
input_ids = torch.tensor(input_ids).long().to(model.device)
input_ids = input_ids.unsqueeze(0)
response = [] # 根据context,生成的response
# 最多生成max_len个token
for idx in range(args.utterance_max_len):
outputs = model(input_ids=input_ids)
logits = outputs.logits
next_token_logits = logits[0, -1, :]
# 对于已生成的结果generated中的每个token添加一个重复惩罚项,降低其生成概率
for id in set(response):
next_token_logits[id] /= args.repetition_penalty
for id in set(input_ids.cpu().numpy()[0].tolist()):
next_token_logits[id] /= args.diverse_penalty
next_token_logits = next_token_logits / args.temperature
# 对于[UNK]的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token
next_token_logits[tokenizer.convert_tokens_to_ids('<unk>')] = -float('Inf')
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp)
# torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
# if (next_token == tokenizer.cls_token_id or next_token == 12 or next_token == 20 or next_token == tokenizer.eos_token_id) and len(response) > 0: # 遇到[SEP]则表明response生成结束
no_need_tok = [tokenizer.cls_token_id,
tokenizer.bos_token_id,
tokenizer.sep_token_id,
tokenizer.pad_token_id,
tokenizer.convert_tokens_to_ids('<eop>'),
tokenizer.convert_tokens_to_ids('<unk>'),
tokenizer.convert_tokens_to_ids('<eod>'),
tokenizer.convert_tokens_to_ids('▁。'),
tokenizer.convert_tokens_to_ids('▁?'),
tokenizer.convert_tokens_to_ids('▁!'),
tokenizer.convert_tokens_to_ids('▁('),
tokenizer.convert_tokens_to_ids('▁0'),
tokenizer.convert_tokens_to_ids('0'),
tokenizer.convert_tokens_to_ids('▁1'),
tokenizer.convert_tokens_to_ids('▁2'),
tokenizer.convert_tokens_to_ids('▁3'),
tokenizer.convert_tokens_to_ids('▁4'),
tokenizer.convert_tokens_to_ids('▁5'),
tokenizer.convert_tokens_to_ids('▁6'),
tokenizer.convert_tokens_to_ids('▁7'),
tokenizer.convert_tokens_to_ids('▁8'),
tokenizer.convert_tokens_to_ids('▁9'),
tokenizer.convert_tokens_to_ids('▁.'),
tokenizer.convert_tokens_to_ids('.')
]
if (next_token.cpu().numpy()[0] in no_need_tok) : # 遇到[SEP]则表明response生成结束
if len(response) > 0:
break
else:
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=len(no_need_tok) + 2)[1:]
for candi_tok in next_token:
if not candi_tok in no_need_tok:
next_token = candi_tok.reshape(1,)
break
if next_token == 0:
continue
response.append(next_token.item())
input_ids = torch.cat((input_ids, next_token.unsqueeze(0)), dim=1)
# his_text = tokenizer.convert_ids_to_tokens(curr_input_tensor.tolist())
# print("his_text:{}".format(his_text))
if len(response) == 0:
print('')
res_text = tokenizer.decode(response).strip()
# start_pos = [i.start() for i in re.finditer('\d*\.\d*',res_text)]
# if start_pos and min(start_pos) > 2:
# res_text = res_text[:min(start_pos)]
history.append(res_text)
return res_text
def generate_text_by_input_new(args,text,history,model=None,tokenizer=None,text_generator=None):
history.append(text)
text = text_generator(text_generator.tokenizer.sep_token.join(history), top_k=1,
max_new_tokens=20,
# max_length=64,
top_p=0.9,
repetition_penalty=2.0,
return_full_text=False,
# do_sample=False,
# diversity_penalty=1.2,
# num_beam_groups=2,
# num_beams=2,
# length_penalty=2.0,
bad_words_ids=[[text_generator.tokenizer.unk_token_id]])
raw_text = ''.join([i for i in text[0]['generated_text']])
return [i for i in raw_text]
def get_machine_metric_datas(model,args,tokenizer):
'''
生成机器指标(bleu,gleu,rouge)所需的数据
A_ori->B_gen
A_ori,B_ori->C_gen
A_ori,B_ori,C_ori->D_gen
最后输出
B_ori,B_gen
C_ori,C_gen
D_ori,D_gen
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
for step in range(1,lines_nums):
inputs_text = single_lines[:step]
history = inputs_text[:-1]
text = inputs_text[-1]
gen_text_tok = generate_text_by_input(args,text, history, model, tokenizer)
gen_text = "".join(gen_text_tok)
print(gen_text)
ori_text = single_lines[step]
results.append({'ori':ori_text,'gen':gen_text})
data_file_path = os.path.join(args.save_samples_path,'machine_metric_data.json')
io.open(data_file_path,'w').write(json.dumps(results,ensure_ascii=False, indent=4))
def generate_human_check_datas(model,args,tokenizer):
'''
生成篇章的方法
pre_data(:10)->A_gen
pre_data(:10)+A_gen->B_gen
pre_data(:10)+A_gen+B_gen->C_gen
最后输出
pre_data(:10)
A_gen
B_gen
C_gen
...
'''
raw_content = io.open(args.test_filter_data,'r').read()
raw_paras = raw_content.split('\n\n')
results = []
# text_generator = TextGenerationPipeline(model, tokenizer,device=0)
for single_para in raw_paras:
single_lines = single_para.split('\n')
lines_nums = len(single_lines)
history_all = single_lines[:10]
for step in range(10,lines_nums):
history = history_all[:-1]
text = history_all[-1]
gen_text_tok = generate_text_by_input(args,text, history, model,tokenizer)
gen_text = "".join(gen_text_tok)
# print(gen_text)
# ori_text = single_lines[step]
history_all.append(gen_text)
print(history_all)
results.append('\n'.join(history_all))
data_file_path = os.path.join(args.save_samples_path,'CPM_turn_ep100_4_rp.txt')
io.open(data_file_path,'w').write('\n\n'.join(results))
return data_file_path
def interact(args,samples_file,model,tokenizer):
history = []
print('开始和chatbot聊天,输入CTRL + Z以退出')
while True:
try:
text = input("user:")
# text = "你好"
if args.save_samples_path:
samples_file.write("user:{}\n".format(text))
text = generate_text_by_input(args,text,history,model,tokenizer)
print("chatbot:" + "".join(text))
if args.save_samples_path:
samples_file.write("chatbot:{}\n".format("".join(text)))
except KeyboardInterrupt:
if args.save_samples_path:
samples_file.close()
break
def main():
args = set_args()
set_random_seed(args)
logger = create_logger(args)
# 当用户使用GPU,并且GPU可用时
args.cuda = torch.cuda.is_available() and not args.no_cuda
device = 'cuda' if args.cuda else 'cpu'
logger.info('using device:{}'.format(device))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# tokenizer = BertTokenizer(vocab_file=args.voca_path)
# model = GPT2LMHeadModel.from_pretrained(args.model_path)
# tokenizer = BertTokenizerFast(vocab_file=args.vocab_path, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
tokenizer = AutoTokenizer.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/CPM-large")
model = AutoModelWithLMHead.from_pretrained("/data1/anon/crosstalk-generation/pretrain_model/CPM-large")
model = model.to(device)
model.eval()
if args.save_samples_path:
if not os.path.exists(args.save_samples_path):
os.makedirs(args.save_samples_path)
samples_file = open(args.save_samples_path + '/samples.txt', 'a', encoding='utf8')
samples_file.write("聊天记录{}:\n".format(datetime.now()))
# 存储聊天记录,每个utterance以token的id的形式进行存储
#get_machine_metric_datas(model,args,tokenizer)
# interact(args,samples_file,model,tokenizer)
generate_human_check_datas(model,args,tokenizer)
if __name__ == '__main__':
main()
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/cpm/dataset.py | src/cpm/dataset.py | from torch.utils.data import Dataset
import torch
class MyDataset(Dataset):
"""
"""
def __init__(self, input_list, max_len):
self.input_list = input_list
self.max_len = max_len
def __getitem__(self, index):
input_ids = self.input_list[index]
input_ids = input_ids[:self.max_len]
input_ids = torch.tensor(input_ids, dtype=torch.long)
return input_ids
def __len__(self):
return len(self.input_list)
| python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false |
FreedomIntelligence/crosstalk-generation | https://github.com/FreedomIntelligence/crosstalk-generation/blob/d7baa296339722cd0804dd68c7a25622b472f5d1/src/cpm/__init__.py | src/cpm/__init__.py | python | Apache-2.0 | d7baa296339722cd0804dd68c7a25622b472f5d1 | 2026-01-05T07:14:13.078808Z | false | |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/setup.py | setup.py | from setuptools import setup
if __name__ == '__main__':
setup()
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/tests/conftest.py | tests/conftest.py | from pathlib import Path
import pytest
from BibaAndBoba import BibaAndBoba
TEST_DATA_PATH = Path(__file__).parent.resolve() / "data"
def data_file(filename: str):
return TEST_DATA_PATH / filename
@pytest.fixture(scope="session")
def chat_1_file():
return data_file("test_chat_1.json").open("rb").read()
@pytest.fixture(scope="session")
def chat_2_file():
return data_file("test_chat_2.json").open("rb").read()
@pytest.fixture(scope="session")
def biba_and_boba(chat_1_file, chat_2_file):
return BibaAndBoba(chat_1_file, chat_2_file) | python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/tests/test_comparator.py | tests/test_comparator.py | import pytest
from BibaAndBoba.comparator import Comparator
@pytest.fixture(scope="module")
def biba_and_boba_comparator(biba_and_boba):
return Comparator(biba_and_boba, biba_and_boba)
def test_type_error():
with pytest.raises(Exception) as e_info:
# noinspection PyTypeChecker
Comparator("", "")
assert "You must pass the BibaAndBoba objects as the arguments" in str(e_info.value)
def test_get_correlation(biba_and_boba_comparator):
correlation_actual = biba_and_boba_comparator.get_correlation(use_alternate_correlation=False)
correlation_expected = 1.0
assert correlation_actual == correlation_expected
def test_alternate_correlation(biba_and_boba_comparator):
correlation_actual = biba_and_boba_comparator.get_correlation(use_alternate_correlation=True)
correlation_expected = 1.0
assert correlation_actual == correlation_expected
def test_get_same_words(biba_and_boba_comparator):
same_words_actual = biba_and_boba_comparator.get_same_words()
same_words_expected = ["hi", "павло"]
assert all(word in same_words_actual for word in same_words_expected)
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/tests/__init__.py | tests/__init__.py | python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false | |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/tests/test_reader.py | tests/test_reader.py | import pytest
from BibaAndBoba.utils.reader import Reader
@pytest.fixture()
def reader(chat_1_file):
return Reader(file=chat_1_file)
def test_get_companion_id(reader):
companion_id_actual = reader.get_companion_id()
companion_id_expected = "483571608"
assert companion_id_actual == companion_id_expected
def test_get_companion_name(reader):
companion_name_actual = reader.get_companion_name()
companion_name_expected = "Андрій"
assert companion_name_actual == companion_name_expected
def test_get_messages(reader):
messages_actual = reader.get_messages()
messages_expected = ["Hi", "Павло"]
assert messages_actual == messages_expected
def test_get_messages_dict(reader):
messages_dict_actual = reader.get_messages_dict()
assert isinstance(messages_dict_actual, list)
assert isinstance(messages_dict_actual[0], dict)
assert all(key in messages_dict_actual[0] for key in ["id", "type", "date", "from"])
def test_get_messages_count(reader):
messages_count_actual = reader.get_messages_count()
messages_count_expected = 2
assert messages_count_actual == messages_count_expected
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/tests/test_biba_and_boba.py | tests/test_biba_and_boba.py | import pandas as pd
from pandas.testing import assert_frame_equal
def test_freq_dist(biba_and_boba):
freq_dist_actual = biba_and_boba.parasite_words()
freq_dist_expected = pd.DataFrame(
{
'Word': ["hi", "павло"],
'Count': [1, 1],
'Quotient': [0.5, 0.5],
}
)
assert isinstance(freq_dist_actual, pd.DataFrame)
assert_frame_equal(freq_dist_actual, freq_dist_expected)
def test_get_tokenized_words_person_1(biba_and_boba):
tokenized_words_actual = biba_and_boba.get_tokenized_words_person_1()
tokenized_words_expected = ["hi", "павло"]
assert tokenized_words_actual == tokenized_words_expected
def test_get_tokenized_words_person_2(biba_and_boba):
tokenized_words_actual = biba_and_boba.get_tokenized_words_person_2()
tokenized_words_expected = ["дороу"]
assert tokenized_words_actual == tokenized_words_expected
def test_get_name(biba_and_boba):
name_actual = biba_and_boba.get_name()
name_expected = "Андрій"
assert name_actual == name_expected
def test_get_difference_words(biba_and_boba):
difference_words_actual = biba_and_boba.get_difference_words()
difference_words_expected = ["hi", "павло"]
assert difference_words_actual == difference_words_expected
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/tests/test_nltk_punkt_downloader.py | tests/test_nltk_punkt_downloader.py | import logging
import nltk
from BibaAndBoba.utils.nltk_punkt_downloader import download_punkt
def test_download_punkt(caplog):
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
with caplog.at_level(logging.WARNING):
download_punkt()
assert 'Successfully downloaded punkt tokenizer from NLTK.' in caplog.text
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/biba_and_boba.py | BibaAndBoba/biba_and_boba.py | import pandas as pd
from nltk.probability import FreqDist
from BibaAndBoba.utils.reader import FileInput
from BibaAndBoba.utils.reader import Reader
from BibaAndBoba.utils.tokenizer import tokenize
from BibaAndBoba.utils.logger import logger
class BibaAndBoba:
"""
BibaAndBoba is a class that for analyzing two Telegram chat history files.
It provides a methods to get the difference words, the frequency distribution of the difference words,
and other parameters. Uses NLTK library to tokenize the messages. :class:`BibaAndBoba.Reader` class is used to read
the files.
"""
def __init__(
self,
file_1: FileInput,
file_2: FileInput,
subtraction_threshold: int = 3,
use_cache: bool = True,
flush_cache: bool = False,
):
"""
The __init__ function is called when an instance of the class is created.
It initializes all the variables that are unique to each instance.
:param self: Reference the object itself
:param file_1: Specify the first file
:param file_2: Specify the second file
:param subtraction_threshold: The threshold for the subtraction function, defaults to 3. It's not
recommended to use a value bigger than 3 unless you need to.
:type subtraction_threshold: int (optional)
:param use_cache: Whether to use the cache or not, defaults to True
:type use_cache: bool (optional)
:param flush_cache: Whether to flush the cache or not, defaults to False
:type flush_cache: bool (optional)
:raises: ValueError: If files are identical
"""
if not use_cache:
logger.warning(
"Warning, cache is disabled. This may significantly slow down the process.\n"
)
file_1 = Reader(file_1)
file_2 = Reader(file_2)
file_1_companion_id = file_1.get_companion_id()
file_2_companion_id = file_2.get_companion_id()
if file_1_companion_id == file_2_companion_id:
raise ValueError("Interlocutors must be different")
self.__person_1_name = file_1.get_companion_name()
self.__person_2_name = file_2.get_companion_name()
self.__tokenized_person_1 = tokenize(
file_1.get_messages(),
file_1_companion_id,
self.__person_1_name,
use_cache=use_cache,
flush_cache=flush_cache,
)
self.__tokenized_person_2 = tokenize(
file_2.get_messages(),
file_2_companion_id,
self.__person_2_name,
use_cache=use_cache,
flush_cache=flush_cache,
)
self.__difference_words = self.__subtraction(threshold=subtraction_threshold)
def __subtraction(self, threshold: int) -> list[str]:
"""
The __subtraction function takes two lists of strings as input.
It returns a list of words that are in the first list but not in the second.
:param self: Access variables that belongs to the class
:return: A list of words that are present in the minuend but not in the subtrahend
"""
messages_minuend = self.__tokenized_person_1
subtrahend_freq_dist = FreqDist(self.__tokenized_person_2)
difference_words = []
for word in messages_minuend:
if subtrahend_freq_dist.get(word, 0) < threshold:
difference_words.append(word)
return difference_words
def parasite_words(self, limit: int = 10) -> pd.DataFrame:
"""
Takes a list of words, counts the frequency of each word, and returns a :class:`pd.DataFrame` with the most
frequent ones.
:param limit: The number of words to return, defaults to 10
:type limit: int (optional)
:return: A dataframe with the most common words and their counts.
"""
fdist = FreqDist(self.__difference_words)
df = pd.DataFrame(fdist.most_common(limit), columns=["Word", "Count"])
df["Quotient"] = df["Count"] / df.sum(axis=0)["Count"]
return df
def get_tokenized_words_person_1(self) -> list[str]:
"""
Returns a list of all words in the message
sent by person 1.
:param self: Refer to the object of the class
:return: A list of all the words in the person 1 messages
"""
return self.__tokenized_person_1
def get_tokenized_words_person_2(self) -> list[str]:
"""
Returns a list of all words in the message
sent by person 2.
:param self: Access the class attributes and methods
:return: A list of all the words in the person 2 messages
"""
return self.__tokenized_person_2
def get_name(self) -> str:
"""
Returns the name of the object.
:param self: Refer to the object itself
:return: The name of the object
"""
return self.__person_1_name
def get_difference_words(self) -> list[str]:
"""
Returns a list of words that are in the first text but not in the second.
:param self: Access the attributes and methods of the class
:return: A list of words that are unique to the first person document
"""
return self.__difference_words
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/comparator.py | BibaAndBoba/comparator.py | from BibaAndBoba.biba_and_boba import BibaAndBoba
def _max_correlation(n: int) -> float:
"""
The max_correlation function computes the maximum correlation between two frequency distributions.
:param n: :obj:`int`: Specify the length of the frequency distributions
:return: The maximum correlation
"""
s = 0
for i in range(1, n + 1):
s += 1 / i
return s
class Comparator:
"""
Comparator class is used to compare two people. It provides methods to get the correlation percentage of the two
people and the words that are the same for both of them.
"""
def __init__(
self,
person1: BibaAndBoba,
person2: BibaAndBoba,
limit: int = 10,
):
if not isinstance(person1, BibaAndBoba) or not isinstance(person2, BibaAndBoba):
raise TypeError("You must pass the BibaAndBoba objects as the arguments")
self.__person1_freq_dist = person1.parasite_words(limit=limit)
self.__person2_freq_dist = person2.parasite_words(limit=limit)
self.__same_words = self.__find_same_words()
def __correlation(self) -> float:
"""
The __correlation function calculates the correlation between two people.
It does this by calculating the number of same words in both person's word list,
based on their place in frequency distributions.
:param self: Access variables that belongs to the class
:return: The correlation between two people
"""
corr = 0
quotients_person1 = self.__person1_freq_dist["Quotient"]
quotients_person2 = self.__person2_freq_dist["Quotient"]
for i, word_1 in enumerate(self.__person1_freq_dist["Word"]):
for j, word_2 in enumerate(self.__person2_freq_dist["Word"]):
if word_1 == word_2:
quotient_diff = abs(quotients_person1[i] - quotients_person2[j])
if quotient_diff > 0.15:
corr += (1 / (i + 1)) - quotient_diff
else:
corr += 1 / (i + 1)
else:
continue
max_corr = _max_correlation(len(self.__person1_freq_dist["Word"]))
result = (corr / max_corr)
return result
def __alternate_correlation(self) -> float:
"""
The alternate_correlation function calculates the correlation between two people.
It does this by calculating the number of same words in both person's word list,
based on their place in frequency distributions.
The weight is the quotient of the frequency of the word in the frequency distribution.
The weight is calculated by dividing the frequency of the word in the frequency distribution by the sum of all
the frequencies in the frequency distribution.
:param self: Access the class attributes
:return: The correlation between the two people
"""
corr = 0
for row in self.__person1_freq_dist.itertuples(index=False):
if row.Word in self.__person2_freq_dist["Word"].values:
corr += row.Quotient
for row in self.__person2_freq_dist.itertuples(index=False):
if row.Word in self.__person1_freq_dist["Word"].values:
corr += row.Quotient
return corr / 2
def __find_same_words(self) -> set:
"""
The find_same_words function finds the words that are the same for both people.
:param self: Access the attributes and methods of the class in which it is used
:return: A set of words that are the same as the word in question
"""
same_words = set(self.__person1_freq_dist["Word"]) & set(self.__person2_freq_dist["Word"])
return same_words
def get_correlation(self, use_alternate_correlation: bool = True) -> float:
"""
The get_correlation function returns the correlation between two people.
:param use_alternate_correlation: Use the alternate correlation method
:param self: Access the class attributes
:return: The correlation between the two columns
"""
if use_alternate_correlation:
return self.__alternate_correlation()
else:
return self.__correlation()
def get_same_words(self) -> set:
"""
The get_same_words function returns a set of words that are the same for both people.
:param self: Access the attributes and methods of the class in which it is used
:return: A list of words that are the same as the word in question
"""
return self.__same_words
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/__init__.py | BibaAndBoba/__init__.py | from BibaAndBoba.comparator import Comparator
from BibaAndBoba.biba_and_boba import BibaAndBoba
from BibaAndBoba.utils.reader import Reader
from BibaAndBoba.utils.logger import logger
try:
import nltk
nltk.data.find('tokenizers/punkt')
except LookupError:
logger.warning("NLTK punkt tokenizer is not installed. Downloading...")
from BibaAndBoba.utils.nltk_punkt_downloader import download_punkt
download_punkt()
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/service/dictionary.py | BibaAndBoba/service/dictionary.py | with open("BibaAndBoba/base.lst.txt", "r") as dictionary:
with open("../dictionaries/base_ua.txt", "w") as base:
for line in dictionary:
word = line.split()[0]
base.write(word + "\n")
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/utils/languages.py | BibaAndBoba/utils/languages.py | from googletrans import Translator
from googletrans.constants import LANGUAGES
NLTK_SUPPORTED_LANGUAGES = ['czech',
'danish',
'dutch',
'english',
'estonian',
'finnish',
'french',
'german',
'greek',
'italian',
'norwegian',
'polish',
'portuguese',
'russian',
'slovene',
'spanish',
'swedish',
'turkish',
]
def __get_language(messages: list[str]):
"""
Returns the language of the messages.
"""
translator = Translator()
language_sample = ' '.join(messages[:50])
language = translator.detect(language_sample).lang
return language
def get_supported_language(messages: list[str]):
"""
Returns the language if it's supported by NLTK, otherwise returns 'english'.
"""
language = __get_language(messages)
if LANGUAGES.get(language) in NLTK_SUPPORTED_LANGUAGES:
return LANGUAGES[language]
else:
print(f'Detected language of your messages - [{language}] is not supported by BibaAndBoba. '
f'English will be used by default.')
return 'english'
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/utils/cacher.py | BibaAndBoba/utils/cacher.py | import pickle
import os
from functools import wraps
from pathlib import Path
from BibaAndBoba.utils.logger import logger
# Creates a cache directory called BibaAndBoba/cache.
CACHE_DIR_PATH = Path("BibaAndBoba/cache")
CACHE_DIR_PATH.mkdir(parents=True, exist_ok=True)
def cache_file_path(companion_id: str) -> Path:
"""
The function returns the path to a file that will be used
to cache data for a given companion. The returned path is based on the
companion_id parameter, which should be a string representing the id of
the companion in question. For example, if we have a Companion with an ID
of 12345 and call cache_file_path(companion_id=12345), then this function would return:
CACHE_DIR/cache_12345.pickle
:param companion_id: Create a unique filename for each cache file
:return: A path object with the path to a cache file
"""
cache_file = Path(CACHE_DIR_PATH) / f"cache_{companion_id}.pickle"
cache_file.touch(exist_ok=True)
return cache_file
def check_cache_file(companion_id: str) -> bool:
"""
Checks if the cache file is not empty.
If it's not empty, it returns True. If not, it returns False.
:param companion_id: str: Determine the companion_id of the cache file
:return: A boolean value
"""
cache_file = cache_file_path(companion_id)
return os.stat(cache_file).st_size != 0
def get_cache(companion_id: str) -> list:
"""
Returns a dictionary of the cache for a given companion id.
If there is no cache, it returns an empty dictionary.
:param companion_id: str: Specify the id of the companion that is being cached
:return: A dictionary
"""
cache_file = cache_file_path(companion_id)
if check_cache_file(companion_id):
with open(cache_file, "rb") as cf:
cache = pickle.load(cf)
else:
cache = []
return cache
def save_cache(cache: list, companion_id: str) -> None:
"""
Saves the cache to a file.
:param cache: list: Information to be saved to a file
:param companion_id: str: Create the cache_file based on the companion_id
:return: None
"""
cache_file = cache_file_path(companion_id)
with open(cache_file, "wb") as cf:
pickle.dump(cache, cf)
def cache_to_file() -> callable:
def caching(func) -> callable:
"""
The caching function is a decorator that allows the function to use a cache.
If the cache is empty, it will run normally and save its output in the cache.
If there is already something in the cache, it will return what's inside it instead of running again.
:param func: Call the function that is being wrapped
:return: A function that uses a cache
"""
@wraps(func)
def wrapper(*args, **kwargs) -> list:
"""
The wrapper function is used to cache the tokenized messages from a specific companion.
If use_cache is set to False, then the function will not attempt to load or save any cached data.
If flush_cache is set to True, then all cached data for that particular companion will be deleted.
:return: The tokenized_messages from the function
"""
use_cache = kwargs.get("use_cache", True)
flush_cache = kwargs.get("flush_cache", False)
if not use_cache:
return func(*args, **kwargs)
companion_id = args[1]
companion_name = args[2]
cache = get_cache(companion_id)
if flush_cache:
logger.info(f"Flushing cache...")
cache = []
if cache:
logger.info(f"Messages from {companion_name} are already analyzed. Using cache."
f"\nIf you want to clear the cache, please specify flush_cache=True "
f"when creating an instance of BibaAndBoba.\n")
return cache
tokenized_messages = func(*args, **kwargs)
save_cache(tokenized_messages, companion_id)
return tokenized_messages
return wrapper
return caching
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/utils/reader.py | BibaAndBoba/utils/reader.py | import json
from typing import Union, TextIO, BinaryIO
from io import BufferedReader, BytesIO, TextIOWrapper
FileInput = Union[str, bytes, BufferedReader, BinaryIO, BytesIO, TextIO]
def _parse_file_input(file: FileInput):
if isinstance(file, str):
if not file.endswith('.json'):
extension = file.split('.')[-1]
raise TypeError(f"File must be json, not {extension}")
return json.load(open(file, "rb"))
elif isinstance(file, bytes):
return json.loads(file.decode("utf-8"))
elif isinstance(file, (BufferedReader, TextIOWrapper)):
return json.load(file)
elif isinstance(file, BytesIO):
return json.loads(file.getvalue())
else:
raise TypeError("File must be a string, a bytes, a BufferedReader, a BytesIO, or a TextIOWrapper")
class Reader:
"""
Reader class is a wrapper around a json file. It provides methods to read the file, get the companion id, name,
messages, messages count, file name and messages dictionary.
"""
def __init__(self, file: FileInput):
"""
The __init__ function is called automatically every time the class is instantiated.
It sets up all the attributes that will be used by instances of this class.
:param self: Reference the object itself
:param file: :class:`str` | :class:`bytes` | :class:`BufferedReader` | \
:class:`BytesIO` | :class:`TextIO`:
Store the file that is used to create this object
:raises: ValueError: If file is not a json file
:raises: ValueError: If file is not a telegram chat history
:raises: ValueError: If file is not a personal chat history
:return: None
"""
file = _parse_file_input(file)
if not all(key in file for key in ["id", "name", "messages"]):
raise ValueError("Looks like you have a wrong json file or it's not a telegram chat history")
if file["type"] != "personal_chat":
raise ValueError("You must use a personal chat history")
self.__companion_id = str(file["id"])
self.__companion_name = str(file["name"])
self.__messages_dict_list = file["messages"]
self.__messages = self.__read_messages()
def __read_messages(self) -> list:
"""
The read_messages function reads the messages from a list of dictionaries and returns a list of strings.
The function takes one argument, which is the list of dictionaries that contain all the messages.
It iterates through each message in the dictionary and checks if it's from companion, if it's not empty and
if it's a string.
:param self: Access variables that belongs to the class
:return: A list of all the messages sent by the companion
"""
companion_messages = []
for message in self.__messages_dict_list:
message_from_id = message.get("from_id", "")
message_is_empty = message.get("text", "") == ""
message_is_str = isinstance(message.get("text", None), str)
if self.__companion_id in message_from_id and not message_is_empty and message_is_str:
companion_messages.append(message["text"])
return companion_messages
def get_companion_id(self) -> str:
"""
Returns the id of the companion object.
:param self: Access the attributes and methods of the class
:return: The id of the companion that is currently active
"""
return self.__companion_id
def get_companion_name(self) -> str:
"""
Returns the name of the companion.
:param self: Access the attributes and methods of the class
:return: The name of the companion
"""
return self.__companion_name
def get_messages(self) -> list:
"""
Retrieves messages in a list.
:param self: Refer to the object itself
:return: A list of all the messages from companion
"""
return self.__messages
def get_messages_dict(self) -> list:
"""
Returns a list of dictionaries, where each dictionary contains the information for one message. Basically,
it returns the actual messages that is stored in the initial json file.
:param self: Access the attributes and methods of the class in python
:return: A list dictionaries with message information
"""
return self.__messages_dict_list
def get_messages_count(self) -> int:
"""
Returns the number of messages in a chat.
:param self: Access the attributes and methods of the class in python
:return: The number of messages
"""
return len(self.__messages)
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/utils/logger.py | BibaAndBoba/utils/logger.py | import logging
class CustomFormatter(logging.Formatter):
white = '\033[97m'
gray = '\033[37m'
green = '\033[32m'
blue = '\033[94m'
yellow = '\033[93m'
red = '\033[91m'
reset = "\x1b[0m"
format = "%(message)s"
FORMATS = {
logging.DEBUG: white + format + reset,
logging.INFO: green + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
| python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
andylvua/bibaandboba | https://github.com/andylvua/bibaandboba/blob/b2c5ee1ad241fe3384394c7e8cf18e1feb930457/BibaAndBoba/utils/__init__.py | BibaAndBoba/utils/__init__.py | python | MIT | b2c5ee1ad241fe3384394c7e8cf18e1feb930457 | 2026-01-05T07:14:26.497544Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.