---
tags:
- sentence-transformers
- sentence-similarity
- feature-extraction
- dense
- generated_from_trainer
- dataset_size:277492
- loss:CachedMultipleNegativesRankingLoss
base_model: benjamintli/modernbert-code-v3-hard-negatives
widget:
- source_sentence: '// Uint is a helper routine that allocates a new uint value to
store v and
// returns a pointer to it. This is useful when assigning optional parameters.'
sentences:
- "func (c *Animation) GetCurrentTimeWithParams(v *AnimationGetCurrentTimeParams)\
\ (float64, error) {\n\tresp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(),\
\ &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: \"Animation.getCurrentTime\"\
, Params: v})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar chromeData struct\
\ {\n\t\tResult struct {\n\t\t\tCurrentTime float64\n\t\t}\n\t}\n\n\tif resp ==\
\ nil {\n\t\treturn 0, &gcdmessage.ChromeEmptyResponseErr{}\n\t}\n\n\t// test\
\ if error first\n\tcerr := &gcdmessage.ChromeErrorResponse{}\n\tjson.Unmarshal(resp.Data,\
\ cerr)\n\tif cerr != nil && cerr.Error != nil {\n\t\treturn 0, &gcdmessage.ChromeRequestErr{Resp:\
\ cerr}\n\t}\n\n\tif err := json.Unmarshal(resp.Data, &chromeData); err != nil\
\ {\n\t\treturn 0, err\n\t}\n\n\treturn chromeData.Result.CurrentTime, nil\n}"
- "func Uint(v uint) *uint {\n\tp := new(uint)\n\t*p = v\n\treturn p\n}"
- "def after_init_app(self, app: FlaskUnchained):\n \"\"\"\n Configure\
\ the JSON encoder for Flask to be able to serialize Enums,\n LocalProxy\
\ objects, and SQLAlchemy models.\n \"\"\"\n self.set_json_encoder(app)\n\
\ app.before_first_request(self.register_model_resources)"
- source_sentence: 'Returns a template for the parent of this template.
@throws ValidationException if the template has no parent.'
sentences:
- "func BodyContainsOr(values ...string) ResponseCondition {\n\treturn func(res\
\ *http.Response) error {\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err\
\ != nil {\n\t\t\treturn fmt.Errorf(\"failed to read response body: %s\", err)\n\
\t\t}\n\n\t\tfor _, value := range values {\n\t\t\tif strings.Contains(string(body),\
\ value) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"could\
\ not find '%v' in body '%s'\", values, string(body))\n\t}\n}"
- "protected function after_update($result) {\n global $DB;\n\n if\
\ (!$result) {\n $this->beforeupdate = null;\n return;\n\
\ }\n\n // The parent ID has changed, we need to fix all the paths\
\ of the children.\n if ($this->beforeupdate->get('parentid') != $this->get('parentid'))\
\ {\n $beforepath = $this->beforeupdate->get('path') . $this->get('id')\
\ . '/';\n\n $like = $DB->sql_like('path', '?');\n $likesearch\
\ = $DB->sql_like_escape($beforepath) . '%';\n\n $table = '{' . self::TABLE\
\ . '}';\n $sql = \"UPDATE $table SET path = REPLACE(path, ?, ?) WHERE\
\ \" . $like;\n $DB->execute($sql, array(\n $beforepath,\n\
\ $this->get('path') . $this->get('id') . '/',\n \
\ $likesearch\n ));\n\n // Resolving sortorder holes left\
\ after changing parent.\n $table = '{' . self::TABLE . '}';\n \
\ $sql = \"UPDATE $table SET sortorder = sortorder -1 \"\n \
\ . \" WHERE competencyframeworkid = ? AND parentid = ? AND sortorder\
\ > ?\";\n $DB->execute($sql, array($this->get('competencyframeworkid'),\n\
\ $this->beforeupdate->get('parentid'),\n\
\ $this->beforeupdate->get('sortorder')\n\
\ ));\n }\n\n $this->beforeupdate\
\ = null;\n }"
- "public PathTemplate parentTemplate() {\n int i = segments.size();\n Segment\
\ seg = segments.get(--i);\n if (seg.kind() == SegmentKind.END_BINDING) {\n\
\ while (i > 0 && segments.get(--i).kind() != SegmentKind.BINDING) {}\n \
\ }\n if (i == 0) {\n throw new ValidationException(\"template does\
\ not have a parent\");\n }\n return new PathTemplate(segments.subList(0,\
\ i), urlEncoding);\n }"
- source_sentence: 'Build a potentially nested fieldgroup
@param mixed $valueOrGroup Value of item, or title of group
@param string|array $titleOrOptions Title of item, or options in grouip
@return ArrayData Data for this item'
sentences:
- "protected function getFieldOption($valueOrGroup, $titleOrOptions)\n {\n \
\ // Return flat option\n if (!is_array($titleOrOptions)) {\n \
\ return parent::getFieldOption($valueOrGroup, $titleOrOptions);\n \
\ }\n\n // Build children from options list\n $options = new\
\ ArrayList();\n foreach ($titleOrOptions as $childValue => $childTitle)\
\ {\n $options->push($this->getFieldOption($childValue, $childTitle));\n\
\ }\n\n return new ArrayData(array(\n 'Title' => $valueOrGroup,\n\
\ 'Options' => $options\n ));\n }"
- "public static function minify($content, array $options = [])\n {\n \
\ $min = preg_replace(['/[\\n\\r]/', '/\\>[^\\S ]+/s', '/[^\\S ]+\\', '<', '\\\\1'], trim($content));\n $min = str_replace(['>\
\ <'], ['><'], $min);\n \n if (ArrayHelper::getValue($options, 'comments',\
\ false)) {\n $min = preg_replace('//Uis', '', $min);\n\
\ }\n \n return $min;\n }"
- "private function loadXInclude(XInclude $xinclude, $filePath){\n //load\
\ DOMDocument\n $xml = new DOMDocument();\n $loadSuccess = $xml->load($filePath);\n\
\ $node = $xml->documentElement;\n if($loadSuccess && !is_null($node)){\n\
\ //parse the href content\n $parser = new ParserFactory($xml);\n\
\ $parser->loadContainerStatic($node, $xinclude->getBody());\n \
\ }else{\n throw new XIncludeException('Cannot load the XInclude\
\ DOM XML', $xinclude);\n }\n }"
- source_sentence: "Check for new unread messages and send them to the custom api\n\
\n @param client_id: ID of client user"
sentences:
- "public function getLatMap()\n {\n if (null === $this->latMap) {\n \
\ $this->latMap = $this->getTransliterationMap(Settings::ALPHABET_LAT);\n\
\ }\n\n return $this->latMap;\n }"
- "def check_new_messages(client_id):\n \"\"\"Check for new unread messages and\
\ send them to the custom api\n\n @param client_id: ID of client user\n \
\ \"\"\"\n # Return if driver is not defined or if whatsapp is not logged in.\n\
\ # Stop the timer as well\n if client_id not in drivers or not drivers[client_id]\
\ or not drivers[client_id].is_logged_in():\n timers[client_id].stop()\n\
\ return\n\n # Acquire a lock on thread\n if not acquire_semaphore(client_id,\
\ True):\n return\n\n try:\n # Get all unread messages\n \
\ res = drivers[client_id].get_unread()\n # Mark all of them as seen\n\
\ for message_group in res:\n message_group.chat.send_seen()\n\
\ # Release thread lock\n release_semaphore(client_id)\n \
\ # If we have new messages, do something with it\n if res:\n \
\ print(res)\n except:\n pass\n finally:\n # Release lock\
\ anyway, safekeeping\n release_semaphore(client_id)"
- "def get_uppermost_library_root_state(self):\n \"\"\"Find state_copy of\
\ uppermost LibraryState\n\n Method checks if there is a parent library\
\ root state and assigns it to be the current library root state till\n \
\ there is no further parent library root state.\n \"\"\"\n\n library_root_state\
\ = self.get_next_upper_library_root_state()\n parent_library_root_state\
\ = library_root_state\n # initial a library root state has to be found\
\ and if there is no further parent root state\n # parent_library_root_state\
\ and library_root_state are no more identical\n while parent_library_root_state\
\ and library_root_state is parent_library_root_state:\n if library_root_state:\n\
\ parent_library_root_state = library_root_state.parent.get_next_upper_library_root_state()\n\
\n if parent_library_root_state:\n library_root_state\
\ = parent_library_root_state\n\n return library_root_state"
- source_sentence: If MultiTenantMiddleware is used, filter queryset by request.site_id
sentences:
- "def reduce_ticks(ax, which, maxticks=3):\n \"\"\"Given a pyplot axis, resamples\
\ its `which`-axis ticks such that are at most\n `maxticks` left.\n\n Parameters\n\
\ ----------\n ax : axis\n The axis to adjust.\n which : {'x'\
\ | 'y'}\n Which axis to adjust.\n maxticks : {3, int}\n Maximum\
\ number of ticks to use.\n\n Returns\n -------\n array\n An array\
\ of the selected ticks.\n \"\"\"\n ticks = getattr(ax, 'get_{}ticks'.format(which))()\n\
\ if len(ticks) > maxticks:\n # make sure the left/right value is not\
\ at the edge\n minax, maxax = getattr(ax, 'get_{}lim'.format(which))()\n\
\ dw = abs(maxax-minax)/10.\n start_idx, end_idx = 0, len(ticks)\n\
\ if ticks[0] < minax + dw:\n start_idx += 1\n if ticks[-1]\
\ > maxax - dw:\n end_idx -= 1\n # get reduction factor\n \
\ fac = int(len(ticks) / maxticks)\n ticks = ticks[start_idx:end_idx:fac]\n\
\ return ticks"
- "function (isPublic, name, data, ttl, published_at, coreid) {\n var rawFn\
\ = function (msg) {\n try {\n msg.setMaxAge(parseInt((ttl\
\ && (ttl >= 0)) ? ttl : 60));\n if (published_at) {\n \
\ msg.setTimestamp(moment(published_at).toDate());\n \
\ }\n }\n catch (ex) {\n logger.error(\"\
onCoreHeard - \" + ex);\n }\n return msg;\n };\n\n\
\ var msgName = (isPublic) ? \"PublicEvent\" : \"PrivateEvent\";\n \
\ var userID = (this.userID || \"\").toLowerCase() + \"/\";\n name =\
\ (name) ? name.toString() : name;\n if (name && name.indexOf && (name.indexOf(userID)\
\ == 0)) {\n name = name.substring(userID.length);\n }\n\n \
\ data = (data) ? data.toString() : data;\n this.sendNONTypeMessage(msgName,\
\ { event_name: name, _raw: rawFn }, data);\n }"
- "def get_queryset(self):\n '''\n If MultiTenantMiddleware is used,\
\ filter queryset by request.site_id\n '''\n queryset = super(PageList,\
\ self).get_queryset()\n if hasattr(self.request, 'site_id'):\n \
\ queryset = queryset.filter(site_id=self.request.site_id)\n return\
\ queryset"
datasets:
- benjamintli/code-retrieval-hard-negatives-llm-verified-merged
- benjamintli/code-retrieval-combined-v2
pipeline_tag: sentence-similarity
library_name: sentence-transformers
metrics:
- cosine_accuracy@1
- cosine_accuracy@3
- cosine_accuracy@5
- cosine_accuracy@10
- cosine_precision@1
- cosine_precision@3
- cosine_precision@5
- cosine_precision@10
- cosine_recall@1
- cosine_recall@3
- cosine_recall@5
- cosine_recall@10
- cosine_ndcg@10
- cosine_mrr@10
- cosine_map@100
model-index:
- name: SentenceTransformer based on benjamintli/modernbert-code-v3-hard-negatives
results:
- task:
type: information-retrieval
name: Information Retrieval
dataset:
name: eval
type: eval
metrics:
- type: cosine_accuracy@1
value: 0.8943333333333333
name: Cosine Accuracy@1
- type: cosine_accuracy@3
value: 0.943
name: Cosine Accuracy@3
- type: cosine_accuracy@5
value: 0.963
name: Cosine Accuracy@5
- type: cosine_accuracy@10
value: 0.976
name: Cosine Accuracy@10
- type: cosine_precision@1
value: 0.8943333333333333
name: Cosine Precision@1
- type: cosine_precision@3
value: 0.31433333333333335
name: Cosine Precision@3
- type: cosine_precision@5
value: 0.1926
name: Cosine Precision@5
- type: cosine_precision@10
value: 0.0976
name: Cosine Precision@10
- type: cosine_recall@1
value: 0.8943333333333333
name: Cosine Recall@1
- type: cosine_recall@3
value: 0.943
name: Cosine Recall@3
- type: cosine_recall@5
value: 0.963
name: Cosine Recall@5
- type: cosine_recall@10
value: 0.976
name: Cosine Recall@10
- type: cosine_ndcg@10
value: 0.9359015737200269
name: Cosine Ndcg@10
- type: cosine_mrr@10
value: 0.9229293650793654
name: Cosine Mrr@10
- type: cosine_map@100
value: 0.9239732035430454
name: Cosine Map@100
---
# SentenceTransformer based on benjamintli/modernbert-code-v3-hard-negatives
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [benjamintli/modernbert-code-v3-hard-negatives](https://huggingface.co/benjamintli/modernbert-code-v3-hard-negatives) on the [code-retrieval-hard-negatives-llm-verified-merged](https://huggingface.co/datasets/benjamintli/code-retrieval-hard-negatives-llm-verified-merged) dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
## Model Details
### Model Description
- **Model Type:** Sentence Transformer
- **Base model:** [benjamintli/modernbert-code-v3-hard-negatives](https://huggingface.co/benjamintli/modernbert-code-v3-hard-negatives)
- **Maximum Sequence Length:** 1024 tokens
- **Output Dimensionality:** 768 dimensions
- **Similarity Function:** Cosine Similarity
- **Training Dataset:**
- [code-retrieval-hard-negatives-llm-verified-merged](https://huggingface.co/datasets/benjamintli/code-retrieval-hard-negatives-llm-verified-merged)
### Model Sources
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
- **Repository:** [Sentence Transformers on GitHub](https://github.com/huggingface/sentence-transformers)
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
### Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 1024, 'do_lower_case': False, 'architecture': 'OptimizedModule'})
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
)
```
## Usage
### Direct Usage (Sentence Transformers)
First install the Sentence Transformers library:
```bash
pip install -U sentence-transformers
```
Then you can load this model and run inference.
```python
from sentence_transformers import SentenceTransformer
# Download from the 🤗 Hub
model = SentenceTransformer("modernbert-code-v4-hard-negatives")
# Run inference
queries = [
"If MultiTenantMiddleware is used, filter queryset by request.site_id",
]
documents = [
"def get_queryset(self):\n '''\n If MultiTenantMiddleware is used, filter queryset by request.site_id\n '''\n queryset = super(PageList, self).get_queryset()\n if hasattr(self.request, 'site_id'):\n queryset = queryset.filter(site_id=self.request.site_id)\n return queryset",
'def reduce_ticks(ax, which, maxticks=3):\n """Given a pyplot axis, resamples its `which`-axis ticks such that are at most\n `maxticks` left.\n\n Parameters\n ----------\n ax : axis\n The axis to adjust.\n which : {\'x\' | \'y\'}\n Which axis to adjust.\n maxticks : {3, int}\n Maximum number of ticks to use.\n\n Returns\n -------\n array\n An array of the selected ticks.\n """\n ticks = getattr(ax, \'get_{}ticks\'.format(which))()\n if len(ticks) > maxticks:\n # make sure the left/right value is not at the edge\n minax, maxax = getattr(ax, \'get_{}lim\'.format(which))()\n dw = abs(maxax-minax)/10.\n start_idx, end_idx = 0, len(ticks)\n if ticks[0] < minax + dw:\n start_idx += 1\n if ticks[-1] > maxax - dw:\n end_idx -= 1\n # get reduction factor\n fac = int(len(ticks) / maxticks)\n ticks = ticks[start_idx:end_idx:fac]\n return ticks',
'function (isPublic, name, data, ttl, published_at, coreid) {\n var rawFn = function (msg) {\n try {\n msg.setMaxAge(parseInt((ttl && (ttl >= 0)) ? ttl : 60));\n if (published_at) {\n msg.setTimestamp(moment(published_at).toDate());\n }\n }\n catch (ex) {\n logger.error("onCoreHeard - " + ex);\n }\n return msg;\n };\n\n var msgName = (isPublic) ? "PublicEvent" : "PrivateEvent";\n var userID = (this.userID || "").toLowerCase() + "/";\n name = (name) ? name.toString() : name;\n if (name && name.indexOf && (name.indexOf(userID) == 0)) {\n name = name.substring(userID.length);\n }\n\n data = (data) ? data.toString() : data;\n this.sendNONTypeMessage(msgName, { event_name: name, _raw: rawFn }, data);\n }',
]
query_embeddings = model.encode_query(queries)
document_embeddings = model.encode_document(documents)
print(query_embeddings.shape, document_embeddings.shape)
# [1, 768] [3, 768]
# Get the similarity scores for the embeddings
similarities = model.similarity(query_embeddings, document_embeddings)
print(similarities)
# tensor([[ 0.8836, -0.0275, 0.0176]])
```
## Evaluation
### Metrics
#### Information Retrieval
* Dataset: `eval`
* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
| Metric | Value |
|:--------------------|:-----------|
| cosine_accuracy@1 | 0.8943 |
| cosine_accuracy@3 | 0.943 |
| cosine_accuracy@5 | 0.963 |
| cosine_accuracy@10 | 0.976 |
| cosine_precision@1 | 0.8943 |
| cosine_precision@3 | 0.3143 |
| cosine_precision@5 | 0.1926 |
| cosine_precision@10 | 0.0976 |
| cosine_recall@1 | 0.8943 |
| cosine_recall@3 | 0.943 |
| cosine_recall@5 | 0.963 |
| cosine_recall@10 | 0.976 |
| **cosine_ndcg@10** | **0.9359** |
| cosine_mrr@10 | 0.9229 |
| cosine_map@100 | 0.924 |
## Training Details
### Training Dataset
#### code-retrieval-hard-negatives-llm-verified-merged
* Dataset: [code-retrieval-hard-negatives-llm-verified-merged](https://huggingface.co/datasets/benjamintli/code-retrieval-hard-negatives-llm-verified-merged) at [459ec4b](https://huggingface.co/datasets/benjamintli/code-retrieval-hard-negatives-llm-verified-merged/tree/459ec4ba07a32a325d73b065fdd40cb017cb9aea)
* Size: 277,492 training samples
* Columns: query, positive, negative_0, negative_1, negative_2, negative_3, negative_4, and negative_5
* Approximate statistics based on the first 1000 samples:
| | query | positive | negative_0 | negative_1 | negative_2 | negative_3 | negative_4 | negative_5 |
|:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
| type | string | string | string | string | string | string | string | string |
| details |
A valid parentheses sequence is a non-empty string where each character is either '(' or ')', which satisfies the following constraint:
You can find a way to repeat erasing adjacent pairs of parentheses '()' until it becomes empty.
For example, '(())' and '()((()()))' are valid parentheses sequences, but ')()(' and '(()' are not.
Mike has a valid parentheses sequence. He really likes everything about his sequence, except the fact that it is quite long. So Mike has recently decided that he will replace his parentheses sequence with a new one in the near future. But not every valid parentheses sequence will satisfy him. To help you understand his requirements we'll introduce the pseudocode of function F(S):
FUNCTION F( S - a valid parentheses sequence )
BEGIN
balance = 0
max_balance = 0
FOR index FROM 1 TO LENGTH(S)
BEGIN
if S[index] == '(' then balance = balance + 1
if S[index] == ')' then balance = balance - 1
max_balance = max( max_balance, balance )
END
... | try:
for i in range(int(input())):
s=input()
balance=0
max_balance=0
for i in s:
if i=='(':balance+=1
else:
balance-=1
max_balance=max(max_balance,balance)
print('('*max_balance,')'*max_balance,sep="")
except Exception as e:
print(e)
| t=int(input())
for tt in range(t):
a,b,p=map(int,input().split())
s=input()
n=len(s)
cost = [0]*n
cost[-1] = 0
typ = ''
i=n-2
while i>=0:
if s[i]==typ:
cost[i] = cost[i+1]
else:
typ = s[i]
cost[i] = cost[i+1] + (a if typ=='A' else b)
i-=1
i=0
while cost[i] > p:
i+=1
print(i+1) | test=int(input())
for i in range(test):
s=input()
b=len(s)
list1=[]
for j in range(len(s)):
if s[j]=='.':
list1.append(j)
for i in list1:
if b-i-1 in list1 :
if i!=b-i-1 and ((s[i] and s[b-i-1]) != 'a' ):
s=s[:i]+'a'+s[i+1:b-i-1]+'a'+s[b-i:]
else:
s=s[:i]+'a'+s[i+1:]
else:
s=s[:i]+s[b-i-1]+s[i+1:]
if s==s[::-1]:
print(s)
else:
print(-1)
| from collections import Counter
def solve(A,B):
a = Counter(A)
b = Counter(B)
ans = 0
for i in a:
if i in b:
ans += min(a[i],b[i])
return ans
t = int(input())
for _ in range(t):
A = input()
B = input()
print(solve(A,B)) | l=list(map(int,input()))
t=-1
x=-1
y=-1
for i in range(len(l)):
s=l[i]
a=i+1
b=i+1
for j in range(i+1,len(l)):
if l[i] s=s+l[j]
b=j+1
else:
break
if s>t:
t=s
x=a
y=b
print(t,end=":")
print(x,y,sep="-") | t=eval(input())
a=[]
b=[]
top=-1
for __ in range(0,t):
x=input().split()
if(x[0]!="-1" and x[0]!="0"):
add=int(x[0])
if top!=-1 and add>a[top][0] :
b[top]+=1
else:
a.append((add,x[1]))
b.append(0)
top+=1
elif (x[0]=="-1"):
#print("%s %s" %(b[top],a[top][1]))
print((b[top]), end=' ')
print(a[top][1])
foo=a.pop()
bar=b.pop()
top-=1 | t=eval(input())
a=[]
b=[]
top=-1
for __ in range(0,t):
x=input().split()
if(x[0]!="-1" and x[0]!="0"):
add=int(x[0])
if top!=-1 and add>a[top][0] :
b[top]+=1
else:
a.append((add,x[1]))
b.append(0)
top+=1
elif (x[0]=="-1"):
#print("%s %s" %(b[top],a[top][1]))
print((b[top]), end=' ')
print(a[top][1])
foo=a.pop()
bar=b.pop()
top-=1 |
| Chef has a cubic die with 6 faces kept on an infinite plane. Each face has a distinct integer in the range [1,6] written on it, but the exact arrangement of the numbers on the faces of the die is unknown to Chef. Curiosity gets the better of Chef and he wants to find out o(1), o(2), ..., o(6), where o(i) is the number written opposite to the number i.
Chef performs the following N-1 steps to learn the exact arrangement of the numbers on the die. In the i-th step, Chef pushes the die in some direction (there are 4 possible directions), and the die rolls 90o in this direction. The picture below demonstrates a die and the result that it produced after rolling in each of the 4 directions respectively. For this die, we have o(1)=4, o(2)=5, o(3)=6, o(4)=1, o(5)=2, o(6)=3.
Chef records N numbers A1, A2, ..., AN, where Ai is the number written on the top of the die before the i-th step. However, the information on the direction in which he pushes the die each time are lost. Can you help h... | from itertools import permutations
def solve(n,a):
ans=[]
for des in desire:
check=1
for i in range(n-1):
if (a[i]==a[i+1]):
return [-1]
if a[i+1]==des[a[i]-1]:
check=0
break
if check:
ans=des
break
if ans:
return ans
return [-1]
per=permutations([1,2,3,4,5,6])
desire=[]
for p in per:
check=1
for i in range(1,7):
if p[i-1]==i:
check=0
break
if check:
doublecheck=1
for i in range(6):
if p[p[i]-1]!=i+1:
doublecheck=0
break
if doublecheck:
desire.append(p)
#print(desire)
for _ in range(int(input())):
n=int(input())
a=list(map(int,input().split( )))
print(*solve(n,a)) | def solve():
n = int(input())
lst = list(map(int,input().split()))
if sum(lst) <= n // 2:
print(n//2)
print("0 " * (n // 2))
else:
print(n//2 + (n // 2) % 2)
print("1 " * (n//2 + (n // 2) % 2))
for i in range(int(input())):
solve() | import sys
input = lambda: sys.stdin.readline().rstrip()
T = int(input())
for _ in range(T):
N = int(input())
A = [int(a) for a in input().split()]
if max(A) == min(A):
print(1)
print(*([1] * N))
elif N % 2 == 0:
print(2)
print(*([1, 2] * (N // 2)))
else:
for i in range(N):
if A[i-1] == A[i]:
print(2)
print(*(([1, 2] * N)[:i][::-1] + ([1, 2] * N)[:N-i]))
break
else:
print(3)
print(*([3] + [1, 2] * (N // 2)))
| import numpy as np
N=10**6+1
t=eval(input())
inp = ()
t1=ord('z')
#bag=[[0 for _ in xrange(t1)] for _ in xrange(N+1)]
bag=np.zeros((N+1,t1),dtype=np.int)
#print bag
while t:
t-=1
inp=input().split()
t2=ord(inp[3]) - ord('a')
t3=int(inp[1])
t4=int(inp[2]) + 1
if inp[0]=="1":
#print "enter"
bag[t3][t2]+=int(inp[2])
if inp[0]=="2":
sum=0
for i in range(t3,t4):
sum+=bag[i][t2]
print(sum)
#
# for j in range(ord('z')-ord('a')):
# for i in range(N+1):
# if bag[i][j]!=0:
# print bag[i][j] ,i,j
| # from math import log2
# N = 10000
# for i in range(1,N):
# # print(i)
# for m in range(i):
# if( (m^(m+1))==i ):
# print(i)
# print(m,m+1,bin(m)[2:])
# print()
# break
# # else:
# # print(-1)
# # print()
T = int(input())
ans = []
for _ in range(T):
N = int(input())
# x = log2(N+1)
if(N==1):
ans.append(2)
elif('0' not in bin(N)[2:]):
ans.append(N//2)
else:
ans.append(-1)
for i in ans:
print(i) | # from math import log2
# N = 10000
# for i in range(1,N):
# # print(i)
# for m in range(i):
# if( (m^(m+1))==i ):
# print(i)
# print(m,m+1,bin(m)[2:])
# print()
# break
# # else:
# # print(-1)
# # print()
T = int(input())
ans = []
for _ in range(T):
N = int(input())
# x = log2(N+1)
if(N==1):
ans.append(2)
elif('0' not in bin(N)[2:]):
ans.append(N//2)
else:
ans.append(-1)
for i in ans:
print(i) | # from math import log2
# N = 10000
# for i in range(1,N):
# # print(i)
# for m in range(i):
# if( (m^(m+1))==i ):
# print(i)
# print(m,m+1,bin(m)[2:])
# print()
# break
# # else:
# # print(-1)
# # print()
T = int(input())
ans = []
for _ in range(T):
N = int(input())
# x = log2(N+1)
if(N==1):
ans.append(2)
elif('0' not in bin(N)[2:]):
ans.append(N//2)
else:
ans.append(-1)
for i in ans:
print(i) |
| DevuLand is a very strange place. There are n villages in it. Some of the villages are occupied by dinosaurs while the remaining ones by villagers.
You are given the information of DevuLand
by an array D of size n. If D[i] is non-negative, it means that there are D[i] villagers in that village.
Otherwise, it means that are -D[i]
dinosaurs in that village.
It is also guaranteed that total number of villagers in DevuLand is equal to total number of dinosaurs.
Once dinosaurs got very hungry and started eating villagers. Frightened villagers gathered immediately and met their Sarpanch Deviji. Deviji, being a very daring and negotiable person, met to the head
of dinosaurs. Soon both parties called a truce. It was decided that the villagers will provide laddus to
the dinosaurs. So everyday, each villager will take exactly one laddu to one of the dinosaurs in such a way that no dinosaur remains hungry (note that this is possible because number of villagers is the same as the numbe... | # cook your dish here
for _ in range(int(input())):
n = int(input())
a = list(map(int, input().split()))
curr = 0
ans = 0
for x in a:
curr += x
ans += abs(curr)
print(ans) | from collections import deque
T=int(input())
def break_down(num):
count=0
while(len(num)!=1):
temp=0
for i in range(0,len(num)):
temp=temp+int(num[i])
num=str(temp)
count=count+1
return (int(num),count)
def digit_sum(num):
temp=0
for i in range(0,len(num)):
temp=temp+int(num[i])
num=temp
return (num)
while(T):
queue=deque()
count_n=0
count_d=0
T=T-1
N,d=[i for i in input().split()]
n,count_n=break_down(N)
D,count_D=break_down(d)
dic={}
if(D==1 or D==2 or D==4 or D==5 or D==7 or D==8):
mini=1
elif(D==3 or D==6):
mini=min(digit_sum(str(n+3)),digit_sum(str(n+6)),digit_sum(str(n+9)))
else:
mini=n
queue.append((int(N),0))
ele=int(N)
count=0
while(len(queue)!=0):
ele,count=queue.popleft()
if(ele==mini):
break
else:
if(len(str(ele))==1):
temp1=ele+int(d)
queue.append((temp1,count+1))... | # cook your dish here
test_cases = int(input())
for i in range(test_cases):
no_of_elements = int(input())
sequence = list(map(int, input().split()))
d1 = sequence[1] - sequence[0]
d2 = sequence[2] - sequence[1]
d3 = (sequence[3] - sequence[0])/3
d4 = (sequence[3] - sequence[1])/2
d5 = (sequence[2] - sequence[0])/2
if (d2 == d4):
d = d2
elif(d3 == d5):
d = d3
elif(d1 == d3):
d = d1
elif(d1 == d5):
d = d1
if (d == d1):
for i in range(no_of_elements):
sequence[i] = int(sequence[0] + i*d)
else:
for i in range(no_of_elements):
sequence[i] = int(sequence[-1] - ((no_of_elements - i - 1)*d))
for i in sequence:
print(i, end=" ")
print('\n')
| from collections import Counter
try:
for _ in range(int(input())):
n=int(input())
s=input()
d1=dict(Counter(s))
u,d,r,l=0,0,0,0
if 'U' in d1:
u=d1['U']
else:
u=0
if 'D' in d1:
d=d1['D']
else:
d=0
if 'R' in d1:
r=d1['R']
else:
r=0
if 'L' in d1:
l=d1['L']
else:
l=0
x=0
y=0
if l==r:
x=0
elif l>r:
x=-(l-r)
elif r>l:
x=r-l
if u==d:
y=0
elif d>u:
y=-(d-u)
elif u>d:
y=u-d
# print(x,y)
if x==0 and y==0:
print(n)
continue
print(n-(abs(x)+abs(y)))
except:
pass
| from bisect import bisect_left, insort_left
a = []
n = int(input())
for _ in range(n):
#print(a)
s, d = list(map(int, input().split()))
if len(a) == 0:
print(s, s+d - 1)
a.append((s, s + d - 1))
continue
p = bisect_left(a, (s, s + d - 1))
#print('p', p)
ok = True
if p > 0 and a[p-1][1] >= s:
ok = False
if p < len(a) and a[p][0] <= s + d - 1:
ok = False
if ok:
insort_left(a, (s, s + d - 1))
print(s, s + d - 1)
else:
ok = False
for i in range(len(a)):
if i == 0:
if a[0][0] > d:
print(1,d)
a = [(1, d)] + a
ok = True
break
else:
if a[i - 1][1] + d < a[i][0]:
print(a[i - 1][1] + 1, a[i - 1][1] + d)
insort_left(a, (a[i - 1][1] + 1, a[i - 1][1] + d))
ok = True
break
... | import fractions
for t in range(int(input())):
h,u,d = list(map(int,input().split()))
g = fractions.gcd(u,d)
if (h%g!=0):
print(-1)
else:
m = 0
n = 0
while (True):
n = (float(m)*u-h)/d
if (n>0 and int(n) == n):
break
m+=1
print(int(m+n)) | import fractions
for t in range(int(input())):
h,u,d = list(map(int,input().split()))
g = fractions.gcd(u,d)
if (h%g!=0):
print(-1)
else:
m = 0
n = 0
while (True):
n = (float(m)*u-h)/d
if (n>0 and int(n) == n):
break
m+=1
print(int(m+n)) |
* Loss: [CachedMultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cachedmultiplenegativesrankingloss) with these parameters:
```json
{
"scale": 20.0,
"similarity_fct": "cos_sim",
"mini_batch_size": 128,
"gather_across_devices": false,
"directions": [
"query_to_doc"
],
"partition_mode": "joint",
"hardness_mode": null,
"hardness_strength": 0.0
}
```
### Evaluation Dataset
#### code-retrieval-combined-v2
* Dataset: [code-retrieval-combined-v2](https://huggingface.co/datasets/benjamintli/code-retrieval-combined-v2) at [2b971a6](https://huggingface.co/datasets/benjamintli/code-retrieval-combined-v2/tree/2b971a6d597823ab7ff10b898ae6f3c0fdbbfa23)
* Size: 31,516 evaluation samples
* Columns: query and positive
* Approximate statistics based on the first 1000 samples:
| | query | positive |
|:--------|:-----------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------|
| type | string | string |
| details | This gets the version of OpenALPR
:return: Version information | def get_version(self):
"""
This gets the version of OpenALPR
:return: Version information
"""
ptr = self._get_version_func(self.alpr_pointer)
version_number = ctypes.cast(ptr, ctypes.c_char_p).value
version_number = _convert_from_charp(version_number)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return version_number |
| Remove all unnecessary comments from a lexer or parser file | public String stripUnnecessaryComments(String javaContent, AntlrOptions options) {
if (!options.isOptimizeCodeQuality()) {
return javaContent;
}
javaContent = stripMachineDependentPaths(javaContent);
if (options.isStripAllComments()) {
javaContent = stripAllComments(javaContent);
}
return javaContent;
} |
| Serialize reply to array or JSON.
@param {Object} packet
@param {String} packet.method "get", "search", "post", "put", "delete", "sub", "unsub".
@param {String} packet.resource
@param {String} packet.id
@param {*} packet.body
@param {Number} [packet.status]
@param {Number\|String} [packet.date]
@param {Object} [packet.headers]
@param {Boolean} [json] true to generate JSON instead of array.
@returns {Array\|String\|null} | function reply(packet, json) {
return _create(packet, packet.status \|\| 500, (METHODS[packet.method] \|\| '') + packet.resource, json);
} |
* Loss: [CachedMultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cachedmultiplenegativesrankingloss) with these parameters:
```json
{
"scale": 20.0,
"similarity_fct": "cos_sim",
"mini_batch_size": 128,
"gather_across_devices": false,
"directions": [
"query_to_doc"
],
"partition_mode": "joint",
"hardness_mode": null,
"hardness_strength": 0.0
}
```
### Training Hyperparameters
#### Non-Default Hyperparameters
- `eval_strategy`: steps
- `per_device_train_batch_size`: 1024
- `per_device_eval_batch_size`: 1024
- `num_train_epochs`: 1
- `warmup_steps`: 0.05
- `bf16`: True
- `dataloader_num_workers`: 4
- `load_best_model_at_end`: True
- `push_to_hub`: True
- `hub_model_id`: modernbert-code-v4-hard-negatives
- `batch_sampler`: no_duplicates
#### All Hyperparameters