Shuu12121's picture
Upload ModernBERT model
99a2a0e verified
metadata
tags:
  - sentence-transformers
  - sentence-similarity
  - feature-extraction
  - dense
  - generated_from_trainer
  - dataset_size:799680
  - loss:MultipleNegativesRankingLoss
base_model: Shuu12121/CodeModernBERT-Owl-v1
widget:
  - source_sentence: |-
      Disconnects the pool.

      Does everything that +clear+ does, except if the pool is closed
      this method does nothing but +clear+ would raise PoolClosedError.

      @since 2.1.0
      @api private
    sentences:
      - |-
        def disconnect!(options = nil)
                do_clear(options)
              rescue Error::PoolClosedError
                # The "disconnected" state is between closed and paused.
                # When we are trying to disconnect the pool, permit the pool to be
                # already closed.
              end
      - "func TestNamedTupleWithEscapedColumns(t *testing.T) {\n\tTestProtocols(t, func(t *testing.T, protocol clickhouse.Protocol) {\n\t\tconn, err := GetNativeConnection(t, protocol, nil, nil, nil)\n\t\tctx := context.Background()\n\t\trequire.NoError(t, err)\n\t\t// https://github.com/ClickHouse/ClickHouse/pull/36544\n\t\tif !CheckMinServerServerVersion(conn, 22, 5, 0) {\n\t\t\tt.Skip(fmt.Errorf(\"unsupported clickhouse version\"))\n\t\t\treturn\n\t\t}\n\t\tconst ddl = \"CREATE TABLE test_tuple (Col1 Tuple(`56` String, `a22\\\\`` Int64)) Engine MergeTree() ORDER BY tuple()\"\n\t\tdefer func() {\n\t\t\tconn.Exec(ctx, \"DROP TABLE IF EXISTS test_tuple\")\n\t\t}()\n\t\trequire.NoError(t, conn.Exec(ctx, ddl))\n\t\tbatch, err := conn.PrepareBatch(ctx, \"INSERT INTO test_tuple\")\n\t\trequire.NoError(t, err)\n\t\tvar (\n\t\t\tcol1Data = map[string]any{\"56\": \"A\", \"a22`\": int64(1)}\n\t\t)\n\t\trequire.NoError(t, batch.Append(col1Data))\n\t\trequire.Equal(t, 1, batch.Rows())\n\t\trequire.NoError(t, batch.Send())\n\t\tvar col1 map[string]any\n\t\trequire.NoError(t, conn.QueryRow(ctx, \"SELECT * FROM test_tuple\").Scan(&col1))\n\t\tassert.Equal(t, col1Data, col1)\n\t})\n}"
      - |-
        def parse_region(url)
                    parts = URI.parse(url).host.split('.')
                    parts.each_with_index do |part, index|
                      if part == 'sqs'
                        # assume region is the part right after the 'sqs' part
                        return parts[index + 1]
                      end
                    end
                    nil # no region found
                  end
  - source_sentence: |-
      Cancel a running workflow by sync job ID.

              This will search for workflows with IDs matching the pattern sync-{sync_job_id}-*
              and cancel them. The workflow will catch the CancelledError and update the
              sync job status to CANCELLED.

              Args:
                  sync_job_id: The sync job ID to cancel

              Returns:
                  True if a workflow was found and cancelled, False otherwise
    sentences:
      - |-
        async def cancel_sync_job_workflow(self, sync_job_id: str) -> bool:
                """
                """
                try:
                    client = await temporal_client.get_client()

                    # List workflows to find the one matching our sync job
                    # Note: In production, you might want to store the workflow ID
                    # when starting it for direct lookup
                    workflows = []
                    async for workflow in client.list_workflows(
                        query=f'WorkflowId STARTS_WITH "sync-{sync_job_id}-"'
                    ):
                        workflows.append(workflow)

                    if not workflows:
                        logger.warning(f"No running workflow found for sync job {sync_job_id}")
                        return False

                    # Cancel the workflow(s)
                    for workflow in workflows:
                        handle = client.get_workflow_handle(workflow.id)
                        await handle.cancel()
                        logger.info(
                            f"Successfully cancelled workflow {workflow.id} for sync job {sync_job_id}"
                        )

                    return True

                except Exception as e:
                    logger.error(f"Failed to cancel workflow for sync job {sync_job_id}: {e}")
                    raise
      - |-
        def __init__(name, account):
                """
                
                """
      - |-
        renderRows = async (
          table,
          viewname,
          { columns, layout },
          extra,
          rows,
          state
        ) => {
          //console.log(columns);
          //console.log(layout);
          if (!columns || !layout) return "View not yet built";

          const fields = table.getFields();

          const role = extra.req.user ? extra.req.user.role_id : 100;
          var views = {};
          const getView = async (name, relation) => {
            if (views[name]) return views[name];
            const view_select = parse_view_select(name, relation);
            const view = View.findOne({ name: view_select.viewname });
            if (!view) return false;
            if (view.table_id === table.id) view.table = table;
            else view.table = Table.findOne({ id: view.table_id });
            view.view_select = view_select;
            views[name] = view;
            return view;
          };
          await set_load_actions_join_fieldviews({
            table,
            layout,
            fields,
            req: extra.req,
            res: extra.res,
          });

          const owner_field = await table.owner_fieldname();
          const subviewExtra = { ...extra };
          if (extra.req?.generate_email) {
            // no mjml markup for for nested subviews, only for the top view
            subviewExtra.req = { ...extra.req, isSubView: true };
          }
          return await asyncMap(rows, async (row) => {
            await eachView(layout, async (segment) => {
              // do all the parsing with data here? make a factory
              const view = await getView(segment.view, segment.relation);
              if (!view)
                throw new InvalidConfiguration(
                  `View ${viewname} incorrectly configured: cannot find view ${segment.view}`
                );
              view.check_viewtemplate();
              if (view.viewtemplateObj.renderRows && view.view_select.type === "Own") {
                segment.contents = (
                  await view.viewtemplateObj.renderRows(
                    view.table,
                    view.name,
                    view.configuration,
                    subviewExtra,
                    [row],
                    state
                  )
                )[0];
              } else {
                let state1 = {};
                const pk_name = table.pk_name;
                const get_row_val = (k) => {
                  //handle expanded joinfields
                  if (row[k] === null) return null;
                  if (row[k]?.id === null) return null;
                  return row[k]?.id || row[k];
                };
                const get_user_id = () => (extra.req.user ? extra.req.user.id : 0);
                if (view.view_select.type === "RelationPath" && view.table_id) {
                  const targetTbl = Table.findOne({ id: view.table_id });
                  const relation = new Relation(
                    segment.relation,
                    targetTbl.name,
                    displayType(await view.get_state_fields())
                  );
                  state1 = pathToState(
                    relation,
                    relation.isFixedRelation() ? get_user_id : get_row_val
                  );
                } else {
                  switch (view.view_select.type) {
                    case "Own":
                      state1 = { [pk_name]: get_row_val(pk_name) };
                      break;
                    case "Independent":
                      state1 = {};
                      break;
                    case "ChildList":
                    case "OneToOneShow":
                      state1 = {
                        [view.view_select.through
                          ? `${view.view_select.throughTable}.${view.view_select.through}.${view.view_select.table_name}.${view.view_select.field_name}`
                          : view.view_select.field_name]: get_row_val(pk_name),
                      };
                      break;
                    case "ParentShow":
                      //todo set by pk name of parent tablr
                      state1 = {
                        id: get_row_val(view.view_select.field_name),
                      };
                      break;
                  }
                }
                const extra_state = segment.extra_state_fml
                  ? eval_expression(
                      segment.extra_state_fml,
                      {
                        ...dollarizeObject(state),
                        session_id: getSessionId(extra.req),
                        ...row,
                      },
                      extra.req.user,
                      `Extra state formula for view ${view.name}`
                    )
                  : {};
                const { id, ...outerState } = state;
                //console.log(segment);
                if (segment.state === "local") {
                  const state2 = { ...state1, ...extra_state };
                  const qs = stateToQueryString(state2, true);
                  if (
                    view.name === viewname &&
                    JSON.stringify(state) === JSON.stringify(state2)
                  )
                    throw new InvalidConfiguration(
                      `View ${view.name} embeds itself with same state; inifinite loop detected`
                    );
                  segment.contents = div(
                    {
                      class: "d-inline",
                      "data-sc-embed-viewname": view.name,
                      "data-sc-local-state": `/view/${view.name}${qs}`,
                    },
                    await view.run(state2, subviewExtra, view.isRemoteTable())
                  );
                } else {
                  const state2 = { ...outerState, ...state1, ...extra_state };
                  const qs = stateToQueryString(state2, true);

                  if (
                    view.name === viewname &&
                    JSON.stringify(state) === JSON.stringify(state2)
                  )
                    throw new InvalidConfiguration(
                      `View ${view.name} embeds itself with same state; inifinite loop detected`
                    );
                  segment.contents = div(
                    {
                      class: "d-inline",
                      "data-sc-embed-viewname": view.name,
                      "data-sc-view-source": `/view/${view.name}${qs}`,
                    },
                    await view.run(state2, subviewExtra, view.isRemoteTable())
                  );
                }
              }
            });
            const user_id = extra.req.user ? extra.req.user.id : null;

            const is_owner =
              table.ownership_formula && user_id && role > table.min_role_read
                ? await table.is_owner(extra.req.user, row)
                : owner_field && user_id && row[owner_field] === user_id;

            return render(
              row,
              fields,
              layout,
              viewname,
              table,
              role,
              extra.req,
              is_owner,
              state,
              extra
            );
          });
        }
  - source_sentence: >-
      AddFlags adds flags related to NodeLifecycleController for controller
      manager to the specified FlagSet.
    sentences:
      - "func (o *NodeLifecycleControllerOptions) AddFlags(fs *pflag.FlagSet) {\n\tif o == nil {\n\t\treturn\n\t}\n\n\tfs.DurationVar(&o.NodeStartupGracePeriod.Duration, \"node-startup-grace-period\", o.NodeStartupGracePeriod.Duration,\n\t\t\"Amount of time which we allow starting Node to be unresponsive before marking it unhealthy.\")\n\tfs.DurationVar(&o.NodeMonitorGracePeriod.Duration, \"node-monitor-grace-period\", o.NodeMonitorGracePeriod.Duration,\n\t\t\"Amount of time which we allow running Node to be unresponsive before marking it unhealthy. \"+\n\t\t\t\"Must be N times more than kubelet's nodeStatusUpdateFrequency, \"+\n\t\t\t\"where N means number of retries allowed for kubelet to post node status. \"+\n\t\t\t\"This value should also be greater than the sum of HTTP2_PING_TIMEOUT_SECONDS and HTTP2_READ_IDLE_TIMEOUT_SECONDS\")\n\tfs.Float32Var(&o.NodeEvictionRate, \"node-eviction-rate\", 0.1, \"Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.\")\n\tfs.Float32Var(&o.SecondaryNodeEvictionRate, \"secondary-node-eviction-rate\", 0.01, \"Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.\")\n\tfs.Int32Var(&o.LargeClusterSizeThreshold, \"large-cluster-size-threshold\", 50, fmt.Sprintf(\"Number of nodes from which %s treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller. Notice: If nodes reside in multiple zones, this threshold will be considered as zone node size threshold for each zone to determine node eviction rate independently.\", names.NodeLifecycleController))\n\tfs.Float32Var(&o.UnhealthyZoneThreshold, \"unhealthy-zone-threshold\", 0.55, \"Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. \")\n}"
      - "func (v Value) IsNull() bool {\n\treturn v.Val == nil || v.Typ == querypb.Type_NULL_TYPE\n}"
      - |-
        public function response(array $errors)
            {
                if ($this->ajax() || $this->wantsJson()) {
                    return new JsonResponse($errors, 422);
                }

                return $this->redirector->to($this->getRedirectUrl())
                                                ->withInput($this->except($this->dontFlash))
                                                ->withErrors($errors, $this->errorBag);
            }
  - source_sentence: >-
      Count of all the processing errors in this task and its subtasks.


      Generated from protobuf field <code>int32 total_processing_error_count =
      21;</code>

      @return int
    sentences:
      - |-
        fn add_helper(&self, msg: SignedMessage) -> Result<(), Error> {
                let from = msg.from();
                let cur_ts = self.cur_tipset.lock().clone();
                add_helper(
                    self.api.as_ref(),
                    self.bls_sig_cache.as_ref(),
                    self.pending.as_ref(),
                    msg,
                    self.get_state_sequence(&from, &cur_ts)?,
                )
            }
      - |-
        public function getTotalProcessingErrorCount()
            {
                return $this->total_processing_error_count;
            }
      - "def datetime_utc_to_local(dt):\n\t\"\"\"\n\t\n\t\"\"\"\n\tdt = dt.replace(tzinfo=dateutil.tz.tzutc())\n\tdt = dt.astimezone(dateutil.tz.tzlocal())\n\treturn dt.replace(tzinfo=None)"
  - source_sentence: >-
      Computes the absolute value of each element retrieved from a strided input
      array `x` via a callback function and assigns each result to an element in
      a strided output array `y`.


      @param {NonNegativeInteger} N - number of indexed elements

      @param {Collection} x - input array/collection

      @param {integer} strideX - `x` stride length

      @param {NonNegativeInteger} offsetX - starting `x` index

      @param {Collection} y - destination array/collection

      @param {integer} strideY - `y` stride length

      @param {NonNegativeInteger} offsetY - starting `y` index

      @param {Callback} clbk - callback

      @param {*} [thisArg] - callback execution context

      @returns {Collection} `y`


      @example

      function accessor( v ) {
          return v * 2.0;
      }


      var x = [ 1.0, -2.0, 3.0, -4.0, 5.0 ];

      var y = [ 0.0, 0.0, 0.0, 0.0, 0.0 ];


      absBy( x.length, x, 1, 0, y, 1, 0, accessor );


      console.log( y );

      // => [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
    sentences:
      - |-
        public ArrayList<Skyline> findSkyline(int start, int end) {
                // Base case: only one building, return its skyline.
                if (start == end) {
                    ArrayList<Skyline> list = new ArrayList<>();
                    list.add(new Skyline(building[start].left, building[start].height));
                    list.add(new Skyline(building[end].right, 0)); // Add the end of the building
                    return list;
                }

                int mid = (start + end) / 2;

                ArrayList<Skyline> sky1 = this.findSkyline(start, mid); // Find the skyline of the left half
                ArrayList<Skyline> sky2 = this.findSkyline(mid + 1, end); // Find the skyline of the right half
                return this.mergeSkyline(sky1, sky2); // Merge the two skylines
            }
      - |-
        def get_supported_systems_info
                  request(
                    :expects    => 200,
                    :idempotent => true,
                    :method     => 'GET',
                    :parser     => Fog::ToHashDocument.new,
                    :path       => 'supportedSystemsInfo'
                  )
                end
      - "function absBy( N, x, strideX, offsetX, y, strideY, offsetY, clbk, thisArg ) {\n\treturn mapBy( N, x, strideX, offsetX, y, strideY, offsetY, abs, clbk, thisArg ); // eslint-disable-line max-len\n}"
pipeline_tag: sentence-similarity
library_name: sentence-transformers

SentenceTransformer based on Shuu12121/CodeModernBERT-Owl-v1

This is a sentence-transformers model finetuned from Shuu12121/CodeModernBERT-Owl-v1. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.

Model Details

Model Description

  • Model Type: Sentence Transformer
  • Base model: Shuu12121/CodeModernBERT-Owl-v1
  • Maximum Sequence Length: 1024 tokens
  • Output Dimensionality: 768 dimensions
  • Similarity Function: Cosine Similarity

Model Sources

Full Model Architecture

SentenceTransformer(
  (0): Transformer({'max_seq_length': 1024, 'do_lower_case': False, 'architecture': 'ModernBertModel'})
  (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
)

Usage

Direct Usage (Sentence Transformers)

First install the Sentence Transformers library:

pip install -U sentence-transformers

Then you can load this model and run inference.

from sentence_transformers import SentenceTransformer

# Download from the 🤗 Hub
model = SentenceTransformer("sentence_transformers_model_id")
# Run inference
sentences = [
    'Computes the absolute value of each element retrieved from a strided input array `x` via a callback function and assigns each result to an element in a strided output array `y`.\n\n@param {NonNegativeInteger} N - number of indexed elements\n@param {Collection} x - input array/collection\n@param {integer} strideX - `x` stride length\n@param {NonNegativeInteger} offsetX - starting `x` index\n@param {Collection} y - destination array/collection\n@param {integer} strideY - `y` stride length\n@param {NonNegativeInteger} offsetY - starting `y` index\n@param {Callback} clbk - callback\n@param {*} [thisArg] - callback execution context\n@returns {Collection} `y`\n\n@example\nfunction accessor( v ) {\n    return v * 2.0;\n}\n\nvar x = [ 1.0, -2.0, 3.0, -4.0, 5.0 ];\nvar y = [ 0.0, 0.0, 0.0, 0.0, 0.0 ];\n\nabsBy( x.length, x, 1, 0, y, 1, 0, accessor );\n\nconsole.log( y );\n// => [ 2.0, 4.0, 6.0, 8.0, 10.0 ]',
    'function absBy( N, x, strideX, offsetX, y, strideY, offsetY, clbk, thisArg ) {\n\treturn mapBy( N, x, strideX, offsetX, y, strideY, offsetY, abs, clbk, thisArg ); // eslint-disable-line max-len\n}',
    'public ArrayList<Skyline> findSkyline(int start, int end) {\n        // Base case: only one building, return its skyline.\n        if (start == end) {\n            ArrayList<Skyline> list = new ArrayList<>();\n            list.add(new Skyline(building[start].left, building[start].height));\n            list.add(new Skyline(building[end].right, 0)); // Add the end of the building\n            return list;\n        }\n\n        int mid = (start + end) / 2;\n\n        ArrayList<Skyline> sky1 = this.findSkyline(start, mid); // Find the skyline of the left half\n        ArrayList<Skyline> sky2 = this.findSkyline(mid + 1, end); // Find the skyline of the right half\n        return this.mergeSkyline(sky1, sky2); // Merge the two skylines\n    }',
]
embeddings = model.encode(sentences)
print(embeddings.shape)
# [3, 768]

# Get the similarity scores for the embeddings
similarities = model.similarity(embeddings, embeddings)
print(similarities)
# tensor([[1.0000, 0.8429, 0.0136],
#         [0.8429, 1.0000, 0.1084],
#         [0.0136, 0.1084, 1.0000]])

Training Details

Training Dataset

Unnamed Dataset

  • Size: 799,680 training samples
  • Columns: sentence_0, sentence_1, and label
  • Approximate statistics based on the first 1000 samples:
    sentence_0 sentence_1 label
    type string string float
    details
    • min: 8 tokens
    • mean: 72.08 tokens
    • max: 1024 tokens
    • min: 13 tokens
    • mean: 165.78 tokens
    • max: 1024 tokens
    • min: 1.0
    • mean: 1.0
    • max: 1.0
  • Samples:
    sentence_0 sentence_1 label
    Set the column title

    @param column - column number (first column is: 0)
    @param title - new column title
    setHeader = function(column, newValue) {
    const obj = this;

    if (obj.headers[column]) {
    const oldValue = obj.headers[column].textContent;
    const onchangeheaderOldValue = (obj.options.columns && obj.options.columns[column] && obj.options.columns[column].title)
    Elsewhere this is known as a "Weak Value Map". Whereas a std JS WeakMap
    is weak on its keys, this map is weak on its values. It does not retain these
    values strongly. If a given value disappears, then the entries for it
    disappear from every weak-value-map that holds it as a value.

    Just as a WeakMap only allows gc-able values as keys, a weak-value-map
    only allows gc-able values as values.

    Unlike a WeakMap, a weak-value-map unavoidably exposes the non-determinism of
    gc to its clients. Thus, both the ability to create one, as well as each
    created one, must be treated as dangerous capabilities that must be closely
    held. A program with access to these can read side channels though gc that do
    not* rely on the ability to measure duration. This is a separate, and bad,
    timing-independent side channel.

    This non-determinism also enables code to escape deterministic replay. In a
    blockchain context, this could cause validators to differ from each other,
    preventing consensus, and thus preventing ...
    makeFinalizingMap = (finalizer, opts) => {
    const { weakValues = false } = opts
    Creates a function that memoizes the result of func. If resolver is
    provided, it determines the cache key for storing the result based on the
    arguments provided to the memoized function. By default, the first argument
    provided to the memoized function is used as the map cache key. The func
    is invoked with the this binding of the memoized function.

    Note: The cache is exposed as the cache property on the memoized
    function. Its creation may be customized by replacing the _.memoize.Cache
    constructor with one whose instances implement the
    Map
    method interface of delete, get, has, and set.

    @static
    @memberOf _
    @since 0.1.0
    @category Function
    @param {Function} func The function to have its output memoized.
    @param {Function} [resolver] The function to resolve the cache key.
    @returns {Function} Returns the new memoized function.
    @example

    var object = { 'a': 1, 'b': 2 };
    var othe...
    function memoize(func, resolver) {
    if (typeof func != 'function'
  • Loss: MultipleNegativesRankingLoss with these parameters:
    {
        "scale": 20.0,
        "similarity_fct": "cos_sim"
    }
    

Training Hyperparameters

Non-Default Hyperparameters

  • per_device_train_batch_size: 120
  • per_device_eval_batch_size: 120
  • fp16: True
  • multi_dataset_batch_sampler: round_robin

All Hyperparameters

Click to expand
  • overwrite_output_dir: False
  • do_predict: False
  • eval_strategy: no
  • prediction_loss_only: True
  • per_device_train_batch_size: 120
  • per_device_eval_batch_size: 120
  • per_gpu_train_batch_size: None
  • per_gpu_eval_batch_size: None
  • gradient_accumulation_steps: 1
  • eval_accumulation_steps: None
  • torch_empty_cache_steps: None
  • learning_rate: 5e-05
  • weight_decay: 0.0
  • adam_beta1: 0.9
  • adam_beta2: 0.999
  • adam_epsilon: 1e-08
  • max_grad_norm: 1
  • num_train_epochs: 3
  • max_steps: -1
  • lr_scheduler_type: linear
  • lr_scheduler_kwargs: {}
  • warmup_ratio: 0.0
  • warmup_steps: 0
  • log_level: passive
  • log_level_replica: warning
  • log_on_each_node: True
  • logging_nan_inf_filter: True
  • save_safetensors: True
  • save_on_each_node: False
  • save_only_model: False
  • restore_callback_states_from_checkpoint: False
  • no_cuda: False
  • use_cpu: False
  • use_mps_device: False
  • seed: 42
  • data_seed: None
  • jit_mode_eval: False
  • use_ipex: False
  • bf16: False
  • fp16: True
  • fp16_opt_level: O1
  • half_precision_backend: auto
  • bf16_full_eval: False
  • fp16_full_eval: False
  • tf32: None
  • local_rank: 0
  • ddp_backend: None
  • tpu_num_cores: None
  • tpu_metrics_debug: False
  • debug: []
  • dataloader_drop_last: False
  • dataloader_num_workers: 0
  • dataloader_prefetch_factor: None
  • past_index: -1
  • disable_tqdm: False
  • remove_unused_columns: True
  • label_names: None
  • load_best_model_at_end: False
  • ignore_data_skip: False
  • fsdp: []
  • fsdp_min_num_params: 0
  • fsdp_config: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
  • fsdp_transformer_layer_cls_to_wrap: None
  • accelerator_config: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
  • deepspeed: None
  • label_smoothing_factor: 0.0
  • optim: adamw_torch
  • optim_args: None
  • adafactor: False
  • group_by_length: False
  • length_column_name: length
  • ddp_find_unused_parameters: None
  • ddp_bucket_cap_mb: None
  • ddp_broadcast_buffers: False
  • dataloader_pin_memory: True
  • dataloader_persistent_workers: False
  • skip_memory_metrics: True
  • use_legacy_prediction_loop: False
  • push_to_hub: False
  • resume_from_checkpoint: None
  • hub_model_id: None
  • hub_strategy: every_save
  • hub_private_repo: None
  • hub_always_push: False
  • hub_revision: None
  • gradient_checkpointing: False
  • gradient_checkpointing_kwargs: None
  • include_inputs_for_metrics: False
  • include_for_metrics: []
  • eval_do_concat_batches: True
  • fp16_backend: auto
  • push_to_hub_model_id: None
  • push_to_hub_organization: None
  • mp_parameters:
  • auto_find_batch_size: False
  • full_determinism: False
  • torchdynamo: None
  • ray_scope: last
  • ddp_timeout: 1800
  • torch_compile: False
  • torch_compile_backend: None
  • torch_compile_mode: None
  • include_tokens_per_second: False
  • include_num_input_tokens_seen: False
  • neftune_noise_alpha: None
  • optim_target_modules: None
  • batch_eval_metrics: False
  • eval_on_start: False
  • use_liger_kernel: False
  • liger_kernel_config: None
  • eval_use_gather_object: False
  • average_tokens_across_devices: False
  • prompts: None
  • batch_sampler: batch_sampler
  • multi_dataset_batch_sampler: round_robin
  • router_mapping: {}
  • learning_rate_mapping: {}

Training Logs

Epoch Step Training Loss
0.0750 500 0.2167
0.1501 1000 0.1158
0.2251 1500 0.1081
0.3001 2000 0.1079
0.3752 2500 0.0994
0.4502 3000 0.0941
0.5252 3500 0.0873
0.6002 4000 0.0967
0.6753 4500 0.0863
0.7503 5000 0.0829
0.8253 5500 0.0821
0.9004 6000 0.0821
0.9754 6500 0.0794
1.0504 7000 0.0418
1.1255 7500 0.0237
1.2005 8000 0.0233
1.2755 8500 0.0231
1.3505 9000 0.0248
1.4256 9500 0.0245
1.5006 10000 0.0237
1.5756 10500 0.025
1.6507 11000 0.0232
1.7257 11500 0.0231
1.8007 12000 0.0218
1.8758 12500 0.0233
1.9508 13000 0.0221
2.0258 13500 0.0177
2.1008 14000 0.0072
2.1759 14500 0.0066
2.2509 15000 0.0068
2.3259 15500 0.0069
2.4010 16000 0.0062
2.4760 16500 0.0068
2.5510 17000 0.0064
2.6261 17500 0.0061
2.7011 18000 0.0062
2.7761 18500 0.0058
2.8511 19000 0.0057
2.9262 19500 0.0058

Framework Versions

  • Python: 3.10.12
  • Sentence Transformers: 5.0.0
  • Transformers: 4.53.1
  • PyTorch: 2.7.0+cu128
  • Accelerate: 1.7.0
  • Datasets: 3.6.0
  • Tokenizers: 0.21.2

Citation

BibTeX

Sentence Transformers

@inproceedings{reimers-2019-sentence-bert,
    title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
    author = "Reimers, Nils and Gurevych, Iryna",
    booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
    month = "11",
    year = "2019",
    publisher = "Association for Computational Linguistics",
    url = "https://arxiv.org/abs/1908.10084",
}

MultipleNegativesRankingLoss

@misc{henderson2017efficient,
    title={Efficient Natural Language Response Suggestion for Smart Reply},
    author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
    year={2017},
    eprint={1705.00652},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}