text
stringlengths
1
93.6k
def code2session(self, code: str) -> bool:
"""获取/刷新session"""
resp = self.session.get(
WXAPI_TOKEN,
params={"mp_id": 1, "js_code": code},
headers={"wx-open-id": self.open_id, "Content-Type": "application/json"},
)
resp.raise_for_status()
resp_json = resp.json()
code = resp_json["err_no"]
if code != 0:
raise APIError(code, resp_json["err_tips"])
if d := resp_json.get("data"):
self.open_id = d["open_id"]
return True
return False
def search(self, question: str) -> bool:
"""搜题"""
resp = self.session.post(
WXAPI_SEARCH,
headers={"wx-open-id": self.open_id},
json={"query": question, "channel": 1},
)
resp.raise_for_status()
resp_json = resp.json()
code = resp_json["err_no"]
if code != 0:
raise APIError(code, resp_json["err_tips"])
self.items = resp_json["data"]["result"]["items"]
return len(self.items) >= 1
def get(self, index: int = 0) -> tuple[str, str]:
"""获取搜题结果"""
question_info = self.items[index]
return (
question_info["question_answer"]["question_plain_text"],
question_info["question_answer"]["answer_plain_text"],
)
if __name__ == "__main__":
patten = "国防是阶级斗争的产物,它伴随着()的形成而产生。"
xxy = XxyWxAPI("oKtmq5YGlp26rm6eL-aRKew1ZRHs")
xxy.search(patten)
q, a = xxy.get(0)
print("题 --- ", q)
print("答 --- ", a)
# <FILESEP>
import torch
import torch.nn as nn
import time
import bratsUtils
import numpy as np
import torch.optim as optim
import torch.nn.functional as F
import os
import dataProcessing.utils as utils
import systemsetup
class Segmenter:
def __init__(self, expConfig, trainDataLoader, valDataLoader, challengeValDataLoader):
self.expConfig = expConfig
self.trainDataLoader = trainDataLoader
self.valDataLoader = valDataLoader
self.challengeValDataLoader = challengeValDataLoader
self.experiment = expConfig.experiment
self.checkpointsBasePathLoad = systemsetup.CHECKPOINT_BASE_PATH
self.checkpointsBasePathSave= systemsetup.CHECKPOINT_BASE_PATH
self.predictionsBasePath = systemsetup.PREDICTIONS_BASE_PATH
self.startFromEpoch = 0
self.bestMeanDice = 0
self.bestMeanDiceEpoch = 0
self.movingAvg = 0
self.bestMovingAvg = 0
self.bestMovingAvgEpoch = 1e9
self.EXPONENTIAL_MOVING_AVG_ALPHA = 0.95
self.EARLY_STOPPING_AFTER_EPOCHS = 120
# restore model if requested
if hasattr(expConfig, "RESTORE_ID") and hasattr(expConfig, "RESTORE_EPOCH"):
self.startFromEpoch = self.loadFromDisk(expConfig.RESTORE_ID, expConfig.RESTORE_EPOCH) + 1
print("Loading checkpoint with id {} at epoch {}".format(expConfig.RESTORE_ID, expConfig.RESTORE_EPOCH))
# Run on GPU or CPU
if torch.cuda.is_available():
print("using cuda (", torch.cuda.device_count(), "device(s))")
if torch.cuda.device_count() > 1:
expConfig.net = nn.DataParallel(expConfig.net)
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
print("using cpu")
expConfig.net = expConfig.net.to(self.device)
def validateAllCheckpoints(self):