Commit
·
0f6d058
1
Parent(s):
511c4b4
Add missing
Browse files- .gitattributes +4 -0
- inference_java.py +12 -6
- inference_javascript.py +12 -6
- inference_python.py +12 -6
- javascript_add/tasky_commits_javascript_0_32909.jsonl +3 -0
- javascript_add/tasky_commits_javascript_329090_361999.jsonl +3 -0
- javascript_add/tasky_commits_javascript_460726_493635.jsonl +3 -0
- javascript_add/tasky_commits_javascript_65818_98727.jsonl +3 -0
.gitattributes
CHANGED
|
@@ -531,3 +531,7 @@ javascript/tasky_commits_javascript_559453_592362.jsonl filter=lfs diff=lfs merg
|
|
| 531 |
python/tasky_commits_python_427817_460726.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 532 |
python/tasky_commits_python_4311079_4343988.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 533 |
python/tasky_commits_python_5397076_5429985.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 531 |
python/tasky_commits_python_427817_460726.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 532 |
python/tasky_commits_python_4311079_4343988.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 533 |
python/tasky_commits_python_5397076_5429985.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 534 |
+
javascript_add/tasky_commits_javascript_65818_98727.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 535 |
+
javascript_add/tasky_commits_javascript_0_32909.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 536 |
+
javascript_add/tasky_commits_javascript_329090_361999.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 537 |
+
javascript_add/tasky_commits_javascript_460726_493635.jsonl filter=lfs diff=lfs merge=lfs -text
|
inference_java.py
CHANGED
|
@@ -28,7 +28,7 @@ def parse_args():
|
|
| 28 |
required=True,
|
| 29 |
help="Ending file number to download. Valid values: 0 - 1023",
|
| 30 |
)
|
| 31 |
-
parser.add_argument("--batch_size", type=int, default=
|
| 32 |
parser.add_argument(
|
| 33 |
"--model_name",
|
| 34 |
type=str,
|
|
@@ -103,16 +103,22 @@ if __name__ == "__main__":
|
|
| 103 |
print("Exists:", tasky_commits_path)
|
| 104 |
exit()
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
| 107 |
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
|
| 108 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 109 |
model.to(device)
|
| 110 |
model.eval()
|
| 111 |
|
| 112 |
-
path = "java_add_messages.jsonl"
|
| 113 |
-
ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 114 |
-
ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 115 |
-
df = pd.DataFrame(ds, index=None)
|
| 116 |
#tasky_commits_path = f"tasky_commits_javascript_{args.start}_{args.end}.jsonl"
|
| 117 |
#if os.path.exists(f"javascript/{tasky_commits_path}"):
|
| 118 |
# print("Exists:", tasky_commits_path)
|
|
@@ -127,7 +133,7 @@ if __name__ == "__main__":
|
|
| 127 |
# Write two jsonl files:
|
| 128 |
# 1) Probas for all of C4
|
| 129 |
# 2) Probas + texts for samples predicted as tasky
|
| 130 |
-
tasky_commits_path = f"tasky_commits_java_{args.start}_{args.end}.jsonl"
|
| 131 |
|
| 132 |
with open(tasky_commits_path, "w") as f:
|
| 133 |
for i in range(len(preds)):
|
|
|
|
| 28 |
required=True,
|
| 29 |
help="Ending file number to download. Valid values: 0 - 1023",
|
| 30 |
)
|
| 31 |
+
parser.add_argument("--batch_size", type=int, default=8, help="Batch size")
|
| 32 |
parser.add_argument(
|
| 33 |
"--model_name",
|
| 34 |
type=str,
|
|
|
|
| 103 |
print("Exists:", tasky_commits_path)
|
| 104 |
exit()
|
| 105 |
|
| 106 |
+
path = "java_add_messages.jsonl"
|
| 107 |
+
ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 108 |
+
if args.start > len(ds): exit()
|
| 109 |
+
ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 110 |
+
df = pd.DataFrame(ds, index=None)
|
| 111 |
+
|
| 112 |
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
| 113 |
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
|
| 114 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 115 |
model.to(device)
|
| 116 |
model.eval()
|
| 117 |
|
| 118 |
+
#path = "java_add_messages.jsonl"
|
| 119 |
+
#ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 120 |
+
#ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 121 |
+
#df = pd.DataFrame(ds, index=None)
|
| 122 |
#tasky_commits_path = f"tasky_commits_javascript_{args.start}_{args.end}.jsonl"
|
| 123 |
#if os.path.exists(f"javascript/{tasky_commits_path}"):
|
| 124 |
# print("Exists:", tasky_commits_path)
|
|
|
|
| 133 |
# Write two jsonl files:
|
| 134 |
# 1) Probas for all of C4
|
| 135 |
# 2) Probas + texts for samples predicted as tasky
|
| 136 |
+
tasky_commits_path = f"java_add/tasky_commits_java_{args.start}_{args.end}.jsonl"
|
| 137 |
|
| 138 |
with open(tasky_commits_path, "w") as f:
|
| 139 |
for i in range(len(preds)):
|
inference_javascript.py
CHANGED
|
@@ -28,7 +28,7 @@ def parse_args():
|
|
| 28 |
required=True,
|
| 29 |
help="Ending file number to download. Valid values: 0 - 1023",
|
| 30 |
)
|
| 31 |
-
parser.add_argument("--batch_size", type=int, default=
|
| 32 |
parser.add_argument(
|
| 33 |
"--model_name",
|
| 34 |
type=str,
|
|
@@ -103,16 +103,22 @@ if __name__ == "__main__":
|
|
| 103 |
print("Exists:", tasky_commits_path)
|
| 104 |
exit()
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
| 107 |
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
|
| 108 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 109 |
model.to(device)
|
| 110 |
model.eval()
|
| 111 |
|
| 112 |
-
path = "javascript_add_messages.jsonl"
|
| 113 |
-
ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 114 |
-
ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 115 |
-
df = pd.DataFrame(ds, index=None)
|
| 116 |
#tasky_commits_path = f"tasky_commits_javascript_{args.start}_{args.end}.jsonl"
|
| 117 |
#if os.path.exists(f"javascript/{tasky_commits_path}"):
|
| 118 |
# print("Exists:", tasky_commits_path)
|
|
@@ -127,7 +133,7 @@ if __name__ == "__main__":
|
|
| 127 |
# Write two jsonl files:
|
| 128 |
# 1) Probas for all of C4
|
| 129 |
# 2) Probas + texts for samples predicted as tasky
|
| 130 |
-
tasky_commits_path = f"tasky_commits_javascript_{args.start}_{args.end}.jsonl"
|
| 131 |
|
| 132 |
with open(tasky_commits_path, "w") as f:
|
| 133 |
for i in range(len(preds)):
|
|
|
|
| 28 |
required=True,
|
| 29 |
help="Ending file number to download. Valid values: 0 - 1023",
|
| 30 |
)
|
| 31 |
+
parser.add_argument("--batch_size", type=int, default=8, help="Batch size")
|
| 32 |
parser.add_argument(
|
| 33 |
"--model_name",
|
| 34 |
type=str,
|
|
|
|
| 103 |
print("Exists:", tasky_commits_path)
|
| 104 |
exit()
|
| 105 |
|
| 106 |
+
path = "javascript_add_messages.jsonl"
|
| 107 |
+
ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 108 |
+
if args.start > len(ds): exit()
|
| 109 |
+
ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 110 |
+
df = pd.DataFrame(ds, index=None)
|
| 111 |
+
|
| 112 |
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
| 113 |
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
|
| 114 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 115 |
model.to(device)
|
| 116 |
model.eval()
|
| 117 |
|
| 118 |
+
#path = "javascript_add_messages.jsonl"
|
| 119 |
+
#ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 120 |
+
#ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 121 |
+
#df = pd.DataFrame(ds, index=None)
|
| 122 |
#tasky_commits_path = f"tasky_commits_javascript_{args.start}_{args.end}.jsonl"
|
| 123 |
#if os.path.exists(f"javascript/{tasky_commits_path}"):
|
| 124 |
# print("Exists:", tasky_commits_path)
|
|
|
|
| 133 |
# Write two jsonl files:
|
| 134 |
# 1) Probas for all of C4
|
| 135 |
# 2) Probas + texts for samples predicted as tasky
|
| 136 |
+
tasky_commits_path = f"javascript_add/tasky_commits_javascript_{args.start}_{args.end}.jsonl"
|
| 137 |
|
| 138 |
with open(tasky_commits_path, "w") as f:
|
| 139 |
for i in range(len(preds)):
|
inference_python.py
CHANGED
|
@@ -28,7 +28,7 @@ def parse_args():
|
|
| 28 |
required=True,
|
| 29 |
help="Ending file number to download. Valid values: 0 - 1023",
|
| 30 |
)
|
| 31 |
-
parser.add_argument("--batch_size", type=int, default=
|
| 32 |
parser.add_argument(
|
| 33 |
"--model_name",
|
| 34 |
type=str,
|
|
@@ -103,16 +103,22 @@ if __name__ == "__main__":
|
|
| 103 |
print("Exists:", tasky_commits_path)
|
| 104 |
exit()
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
| 107 |
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
|
| 108 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 109 |
model.to(device)
|
| 110 |
model.eval()
|
| 111 |
|
| 112 |
-
path = "python_add_messages.jsonl"
|
| 113 |
-
ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 114 |
-
ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 115 |
-
df = pd.DataFrame(ds, index=None)
|
| 116 |
|
| 117 |
texts = df["message"].to_list()
|
| 118 |
commits = df["commit"].to_list()
|
|
@@ -123,7 +129,7 @@ if __name__ == "__main__":
|
|
| 123 |
# Write two jsonl files:
|
| 124 |
# 1) Probas for all of C4
|
| 125 |
# 2) Probas + texts for samples predicted as tasky
|
| 126 |
-
tasky_commits_path = f"tasky_commits_python_{args.start}_{args.end}.jsonl"
|
| 127 |
|
| 128 |
with open(tasky_commits_path, "w") as f:
|
| 129 |
for i in range(len(preds)):
|
|
|
|
| 28 |
required=True,
|
| 29 |
help="Ending file number to download. Valid values: 0 - 1023",
|
| 30 |
)
|
| 31 |
+
parser.add_argument("--batch_size", type=int, default=8, help="Batch size")
|
| 32 |
parser.add_argument(
|
| 33 |
"--model_name",
|
| 34 |
type=str,
|
|
|
|
| 103 |
print("Exists:", tasky_commits_path)
|
| 104 |
exit()
|
| 105 |
|
| 106 |
+
path = "python_add_messages.jsonl"
|
| 107 |
+
ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 108 |
+
if args.start > len(ds): exit()
|
| 109 |
+
ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 110 |
+
df = pd.DataFrame(ds, index=None)
|
| 111 |
+
|
| 112 |
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
| 113 |
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
|
| 114 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 115 |
model.to(device)
|
| 116 |
model.eval()
|
| 117 |
|
| 118 |
+
#path = "python_add_messages.jsonl"
|
| 119 |
+
#ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
|
| 120 |
+
#ds = ds[range(args.start, min(args.end, len(ds)))]
|
| 121 |
+
#df = pd.DataFrame(ds, index=None)
|
| 122 |
|
| 123 |
texts = df["message"].to_list()
|
| 124 |
commits = df["commit"].to_list()
|
|
|
|
| 129 |
# Write two jsonl files:
|
| 130 |
# 1) Probas for all of C4
|
| 131 |
# 2) Probas + texts for samples predicted as tasky
|
| 132 |
+
tasky_commits_path = f"python_add/tasky_commits_python_{args.start}_{args.end}.jsonl"
|
| 133 |
|
| 134 |
with open(tasky_commits_path, "w") as f:
|
| 135 |
for i in range(len(preds)):
|
javascript_add/tasky_commits_javascript_0_32909.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ecd7febfb0ed024386ca341d95c9ba58b380add8e7a325b7788d8bee30d5482b
|
| 3 |
+
size 4892266
|
javascript_add/tasky_commits_javascript_329090_361999.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f14fa8e4462a7ff38d002940ce29754f5a693632b40d2732bd60587ac50a28e8
|
| 3 |
+
size 4929381
|
javascript_add/tasky_commits_javascript_460726_493635.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:261ca373d5221f247225d77f7136fa6430d5cdffa67f9cb3ad014198e5a6c8ea
|
| 3 |
+
size 4995585
|
javascript_add/tasky_commits_javascript_65818_98727.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:533ba4d512fc98be2f51fab4d9f9b67ac7f8cda71bed6ced0f78959a8b10e189
|
| 3 |
+
size 4946988
|