query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Clears all the variables from the workspace of the spyder application.
def clear_all(): gl = globals().copy() for var in gl: if var[0] == '_': continue if 'func' in str(globals()[var]): continue if 'module' in str(globals()[var]): continue del globals()[var]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n self.vars = []", "def controlVariablesClear() :\n s.clearScriptAll()", "def ClearWorkspace(workspace_name=''):\n ClearWorkspaceCC(workspace_name)", "def clear(self):\n self.globalDefines = {}\n self.axiom = self.setAxiomFromString(\"\")\n self.clearProduct...
[ "0.70930827", "0.67948645", "0.6680693", "0.6621833", "0.65619415", "0.6522764", "0.6467503", "0.6458815", "0.63563365", "0.6324468", "0.63060844", "0.6302263", "0.62758696", "0.62447083", "0.6243702", "0.6192215", "0.6141518", "0.6140391", "0.6136033", "0.6091357", "0.607381...
0.64478254
9
FresnelReflection takes the smallest angle between the ray direction and the normal. Thus the flipped normal will also work.
def test_antinormal_reflection(self): n1 = 1.0 n2 = 1.5 normal = (0.0, 0.0, -1.0) angle = 0.0 ray = Ray(position=(0.0, 0.0, 0.0), direction=(0.0, 0.0, 1.0), wavelength=None) fresnel = FresnelReflection() assert np.isclose(fresnel.reflectivity(angle, n1, n2), 0.04) new_ray = fresnel.transform(ray, {"normal": normal}) assert np.allclose(flip(ray.direction), new_ray.direction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reflect(self, ray):\n normal = self.normal(ray.position)\n if normal.dot(ray.direction) > 0:\n normal = -normal\n return Ray(\n ray.direction - 2 * dot(ray.direction, normal) * normal, ray.position)", "def refract(self, ray, rho):\n normal = self.normal(ray.position)\n if normal.do...
[ "0.6952331", "0.59180486", "0.58824384", "0.57535976", "0.57386696", "0.55146694", "0.5512906", "0.5464423", "0.5416558", "0.5369161", "0.5335698", "0.52933735", "0.52365685", "0.5117482", "0.50288486", "0.49778453", "0.49726534", "0.49646103", "0.49436632", "0.49390393", "0....
0.8033125
0
Tests the API endpoint to get hashrate resale details with missing field
def test_mining_hashrate_resale_details_with_missing_field(params): client = Client(key, secret) client.mining_hashrate_resale_details.when.called_with(**params).should.throw( ParameterRequiredError )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mining_hashrate_resale_details():\n\n client = Client(key, secret)\n response = client.mining_hashrate_resale_details(123, \"user_name\")\n response.should.equal(mock_item)", "def test_retire_rate_plan(self):\n pass", "def test_validation_get_valid_resampling(self):\n self.asser...
[ "0.6968484", "0.62199587", "0.61830664", "0.6144193", "0.61011153", "0.59623325", "0.58962244", "0.58485985", "0.5811617", "0.5798121", "0.5735763", "0.5712043", "0.56655514", "0.5655179", "0.56484246", "0.5585292", "0.5584498", "0.5572597", "0.5553329", "0.55408216", "0.5536...
0.7106652
0
Tests the API endpoint to get hashrate resale details
def test_mining_hashrate_resale_details(): client = Client(key, secret) response = client.mining_hashrate_resale_details(123, "user_name") response.should.equal(mock_item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_retire_rate_plan(self):\n pass", "def test_retrieve_list_resgate_to_user_authenticated(self):\n sample_resgate(user=self.user, value=500)\n sample_resgate(user=self.user, value=200)\n\n response = self.client.get(RESGATE_URL)\n\n resgates = Resgate.objects.all().order_...
[ "0.63951224", "0.6310697", "0.6241689", "0.62342453", "0.60122466", "0.59688497", "0.5942244", "0.5845871", "0.5801503", "0.578017", "0.5748117", "0.5727169", "0.57210433", "0.5657565", "0.56426114", "0.56349385", "0.5620731", "0.56199795", "0.5606225", "0.5600434", "0.559901...
0.74108505
0
A method that returns the difference between two operands
def calc(operand_1, operand_2): return operand_1 - operand_2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtraction(a, b):\n return a - b", "def subtract(a, b):\n return a - b", "def subtract(a, b):\n return a - b", "def subtract(lhs, rhs):\n return _make.subtract(lhs, rhs)", "def subtract(*args):\n return args[0] - reduce(lambda x, y: x + y, args[1:])", "def minus(self, a, b):\n retu...
[ "0.75801355", "0.7548788", "0.7548788", "0.75324667", "0.75270516", "0.74799347", "0.7476643", "0.74425304", "0.7346755", "0.7334118", "0.7311729", "0.7311729", "0.7311729", "0.7274825", "0.71000874", "0.7097167", "0.7027035", "0.70068324", "0.69969356", "0.6982394", "0.69644...
0.74922514
6
This function should return a list of two agents that will form the team, initialized using firstIndex and secondIndex as their agent index numbers. isRed is True if the red team is being created, and will be False if the blue team is being created. As a potentially helpful development aid, this function can take additional stringvalued keyword arguments ("first" and "second" are such arguments in the case of this function), which will come from the redOpts and blueOpts commandline arguments to capture.py. For the nightly contest, however, your team will be created without any extra arguments, so you should make sure that the default behavior is what you want for the nightly contest.
def createTeam(firstIndex, secondIndex, isRed, first='OffensiveReflexAgent', second='DefensiveReflexAgent'): return [eval(first)(firstIndex), eval(second)(secondIndex)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createTeam(firstIndex, secondIndex, isRed,\n first = 'ReflexCaptureAgent', second = 'DefensiveReflexAgent'):\n return [eval(first)(firstIndex), eval(second)(secondIndex)]", "def createTeam(firstIndex, secondIndex, isRed,\n first = 'OffensiveReflexAgent', second = 'DefensiveReflex...
[ "0.8380259", "0.8160164", "0.8160164", "0.81365675", "0.8123886", "0.8115439", "0.8114686", "0.8110517", "0.8087326", "0.8084383", "0.8084383", "0.8084383", "0.80327135", "0.7990393", "0.797317", "0.7959049", "0.794438", "0.7907669", "0.7899954", "0.7830475", "0.78248924", ...
0.80977505
8
Picks among the actions with the highest Q(s,a).
def chooseAction(self, gameState): actions = gameState.getLegalActions(self.index) # You can profile your evaluation time by uncommenting these lines # start = time.time() values = [self.evaluate(gameState, a) for a in actions] # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start) maxValue = max(values) bestActions = [a for a, v in zip(actions, values) if v == maxValue] foodLeft = len(self.getFood(gameState).asList()) return random.choice(bestActions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maxQ(self,state):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n \r\n for a in self.actions:\r\n q = self.Q(state,a)\r\n #print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)", "def...
[ "0.7763381", "0.77043587", "0.76056725", "0.755616", "0.73318", "0.7316181", "0.71344316", "0.71120584", "0.6966468", "0.6956663", "0.6948445", "0.69366306", "0.6920287", "0.6893597", "0.68934804", "0.68932414", "0.6878565", "0.6877915", "0.68764764", "0.6848047", "0.6837404"...
0.0
-1
Takes a url and email, sends POST request and display body
def main(): post_url = argv[1] params = { 'email': argv[2] } query_string = parse.urlencode(params) post_data = query_string.encode("ascii") with request.urlopen(post_url, post_data) as post_response: response_text = post_response.read() print(response_text.decode("UTF-8"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n return send_email(request.args)", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\...
[ "0.66343814", "0.630883", "0.6291691", "0.62672955", "0.62664545", "0.6248591", "0.62434167", "0.61401975", "0.6046999", "0.5980318", "0.5960414", "0.5922105", "0.5918039", "0.5868241", "0.5860079", "0.5837165", "0.5816196", "0.58109957", "0.5810417", "0.57976925", "0.5796052...
0.71457523
0
Return stock move name by type.
def next_move(ttype): count = db.session.query(StockMove.id).count() + 1 return str('SO/' if ttype =='sale' else 'PO/') + str(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def typeToName(type: int) -> unicode:\n ...", "def get_item_name(sp, item_type, item_id):\n if item_type == 'playlist':\n name = sp.playlist(playlist_id=item_id, fields='name').get('name')\n elif item_type == 'album':\n name = sp.album(album_id=item_id).get('name')\n elif item_type ...
[ "0.6108094", "0.59010094", "0.5656677", "0.555516", "0.5488487", "0.547423", "0.5448434", "0.54116297", "0.5408066", "0.5407569", "0.5393827", "0.53872436", "0.53552365", "0.53311074", "0.5316032", "0.53117967", "0.5291321", "0.5275399", "0.5212203", "0.51964265", "0.51749265...
0.63632435
0
require_path defaults to True unless match_subdomains is enabled.
def _match_hostname(url, condition, require_path=None, require_no_path=False): scheme, _, other = url.partition(":") if scheme not in ( "git", # lxc-python2 "git+https", # asyncssh "http", "https", "svn", # wsgiref ): return False if condition.startswith("http://"): condition = condition[7:] hostname, _, path = condition.partition("/") if ":" in hostname: hostname = hostname.split(":", 1)[0] if "." not in other: # pragma: no cover return False # '/dev/' in http://www.reportlab.com/ other = other.lstrip("/") match_subdomains = hostname.startswith("*.") if match_subdomains: hostname = hostname[2:] subdomain, other = other.split(".", 1) if subdomain in ["www"]: logger.debug("url {} subdomain www".format(url)) return False if not other.startswith(hostname): return None if require_path is None: require_path = not match_subdomains # Require at least a suffix other = other[len(hostname) :] other = other.lstrip("/") if not other: if require_no_path: return True if require_path: logger.debug("url {} no path".format(url)) return False if path: if not other.startswith(path): logger.debug("url {} not path {}".format(url, path)) return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matches_path(cls, path):\n return path.startswith('/') or \\\n path.startswith('./') or \\\n path.startswith('../') or \\\n path.startswith('file://')", "def should_domain_substitute(path, relative_path, search_regex, used_dep_set, used_dip_set):\n relat...
[ "0.59390235", "0.5838241", "0.58182794", "0.58182794", "0.5769316", "0.5747707", "0.5747707", "0.563547", "0.5611369", "0.55556506", "0.55022365", "0.5498899", "0.54806775", "0.53935033", "0.5303024", "0.5217876", "0.51932895", "0.5184053", "0.51652503", "0.5151885", "0.51287...
0.5738598
7
Helper method to create a requests Session
def get_session(): jwt_secret = base64.urlsafe_b64decode(os.getenv('AUTH0_CLIENT_SECRET')) claims = { 'sub': 'rf|airflow-user', 'iat': datetime.utcnow(), 'exp': datetime.utcnow() + timedelta(hours=3) } encoded_jwt = jwt.encode(claims, jwt_secret, algorithm='HS256') session = requests.Session() session.headers.update({'Authorization': 'Bearer {}'.format(encoded_jwt)}) return session
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_session():\n return requests.Session()", "def create(self):\n\t\tif self._session:\n\t\t\tself.close()\n\n\t\tif not self._session:\n\t\t\tself._session = requests.Session()\n\t\t\tself._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n\t\t\tself._session.mount('https://', ra...
[ "0.8304606", "0.7970149", "0.7811028", "0.7757718", "0.75642025", "0.7562853", "0.7443917", "0.738472", "0.7361329", "0.7354772", "0.73463327", "0.73270935", "0.7302046", "0.7212053", "0.7189299", "0.70787716", "0.70699424", "0.7052273", "0.70521116", "0.7011763", "0.69860953...
0.67009187
37
Load and return the vowel training dataset. Returns (X_train, X_test, y_train, y_test) Tuple A tuple of data and target
def load_vowel(): train = _load_vowel_train() test = _load_vowel_test() return (train[0], train[1].reshape(-1, 1), test[0], test[1].reshape(-1, 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_vowel_test():\n vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1)\n X = vowel_data[:, -10:]\n y = vowel_data[:, 1].astype(int)\n return (X, y)", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_c...
[ "0.7972531", "0.6284241", "0.62223625", "0.61976546", "0.6173541", "0.6136231", "0.6088554", "0.60739297", "0.60504085", "0.60448134", "0.60420364", "0.60369146", "0.60268086", "0.60214686", "0.60152215", "0.6013476", "0.6010893", "0.5968949", "0.5953889", "0.5910949", "0.589...
0.8670741
0
Load and return the vowel testing dataset. Returns (X, y) Tuple A tuple of data and target
def _load_vowel_test(): vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1) X = vowel_data[:, -10:] y = vowel_data[:, 1].astype(int) return (X, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_vowel():\n train = _load_vowel_train()\n test = _load_vowel_test()\n return (train[0], train[1].reshape(-1, 1), test[0], test[1].reshape(-1, 1))", "def learn_vowels(self, data=None):\n #pdb.set_trace()\n if not data:\n data = self.memory\n # find acoustic prototy...
[ "0.8300088", "0.6068408", "0.5904717", "0.5585258", "0.55028343", "0.54649633", "0.54600555", "0.5418553", "0.5395617", "0.53787655", "0.53458136", "0.5342534", "0.52974266", "0.5277391", "0.526091", "0.52232075", "0.52189106", "0.5207462", "0.5204022", "0.5163287", "0.514756...
0.83285666
0
Load and return the breast cancer wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. Returns (X_train, X_test, y_train, y_test) Tuple A tuple of data and target The copy of UCI ML Breast Cancer Wisconsin (Original) dataset is
def load_breast_cancer(): bc_data_train = np.load(_BREAST_CANCER_FOLDER+'bc_data.train') bc_data_test = np.load(_BREAST_CANCER_FOLDER+'bc_data.test') bc_target_train = np.load(_BREAST_CANCER_FOLDER+'bc_target.train') bc_target_test = np.load(_BREAST_CANCER_FOLDER+'bc_target.test') for i in range(len(bc_target_test)): if bc_target_test[i] == 2: bc_target_test[i] = 0 elif bc_target_test[i] == 4: bc_target_test[i] = 1 for i in range(len(bc_target_train)): if bc_target_train[i] == 2: bc_target_train[i] = 0 elif bc_target_train[i] == 4: bc_target_train[i] = 1 return (bc_data_train, bc_target_train.reshape(-1, 1), bc_data_test, bc_target_test.reshape(-1, 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_breast_cancer():\n data = load_breast_cancer_sk()\n X = pd.DataFrame(data.data, columns=data.feature_names)\n y = pd.Series(data.target)\n y = y.map(lambda x: data[\"target_names\"][x])\n\n X.ww.init()\n y = ww.init_series(y)\n return X, y", "def load_data():\n df = pd.read_csv(\...
[ "0.7141485", "0.69764185", "0.6372257", "0.6351349", "0.6293626", "0.60629076", "0.5941613", "0.59359765", "0.5885802", "0.5858357", "0.58188754", "0.57530105", "0.5714098", "0.5703062", "0.56975883", "0.5686469", "0.5659355", "0.5605132", "0.560234", "0.55977595", "0.5596762...
0.76880604
0
add(Vector,Vector) adds two vectors
def add(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return FreeCAD.Vector(first.x+other.x, first.y+other.y, first.z+other.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_add(v1, v2):\n return v1[0] + v2[0], v1[1] + v2[1]", "def vector_add(a, b):\n assert(len(a) == len(b))\n\n from operator import add\n return tuple(map(add, a, b))", "def vectorAdd(a, b):\n return [a[i] + b[i] for i, j in enumerate(a)]", "def __add__(self,other):\n return Vect...
[ "0.8628645", "0.8308469", "0.793092", "0.7919341", "0.7902262", "0.78230643", "0.7809441", "0.77952737", "0.77811813", "0.7755966", "0.775354", "0.7749087", "0.77490765", "0.7742617", "0.77359337", "0.7711612", "0.7693798", "0.7677173", "0.7629188", "0.75305533", "0.7523501",...
0.8094569
2
sub(Vector,Vector) subtracts second vector from first one
def sub(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return FreeCAD.Vector(first.x-other.x, first.y-other.y, first.z-other.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_sub(v1,v2):\n return Vector(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z)", "def vec_sub(x, y):\r\n return [a - b for a, b in zip(x, y)]", "def vector_subtract(v1, v2):\n return v1[0] - v2[0], v1[1] - v2[1]", "def vector_substraction(a, b):\n return a[0] - b[0], a[1] - b[1]", "def vec_sub (x,...
[ "0.8428072", "0.8130589", "0.81168246", "0.80479693", "0.7928738", "0.7876339", "0.78062", "0.7775048", "0.77688444", "0.7703792", "0.7700997", "0.7661297", "0.76243055", "0.76243055", "0.76243055", "0.7616531", "0.7509781", "0.7507299", "0.7501865", "0.7447214", "0.7437817",...
0.8258668
1
scale(Vector,Float) scales (multiplies) a vector by a factor
def scale(first,scalar): if isinstance(first,FreeCAD.Vector): return FreeCAD.Vector(first.x*scalar, first.y*scalar, first.z*scalar)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale_vector(vector, scale):\n return vector[0] * scale, vector[1] * scale, vector[2] * scale", "def scale_vector(vector, f):\n f = float(f)\n return [vector[0] * f, vector[1] * f, vector[2] * f]", "def scale(s: (float, int), v: Vector) -> Vector:\n coords = list()\n res = Vector(coords)\n ...
[ "0.8370318", "0.7960726", "0.7441456", "0.7425613", "0.7421588", "0.74008507", "0.7352211", "0.7281855", "0.725304", "0.72357947", "0.7201499", "0.7179996", "0.7178041", "0.7161515", "0.713151", "0.71268904", "0.70673966", "0.6991172", "0.6965906", "0.69597846", "0.6956664", ...
0.7347928
7
lengh(Vector) gives vector length
def length(first): if isinstance(first,FreeCAD.Vector): return math.sqrt(first.x*first.x + first.y*first.y + first.z*first.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vec_len(x):\r\n \r\n length = math.sqrt(x[0]**2 + x[1]**2)\r\n return length", "def _get_vector_size(self):\n if len(self):\n return len(self.values()[0])\n else:\n return 0", "def vector_len( vector ):\n \n if not isinstance(vector, np.ndarray ):\n ...
[ "0.76649845", "0.7522427", "0.7505321", "0.74064803", "0.73988277", "0.7376112", "0.7350648", "0.7289443", "0.7279308", "0.72593445", "0.7243668", "0.71736383", "0.70988554", "0.70317763", "0.69320846", "0.69186604", "0.69020325", "0.69020325", "0.6873299", "0.6854931", "0.68...
0.71695125
12
dist(Vector,Vector) returns the distance between both points/vectors
def dist(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return length(sub(first,other))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def dist(p1,p2):\n\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) *...
[ "0.8330045", "0.7836225", "0.7793538", "0.7769077", "0.7764226", "0.76751816", "0.7628056", "0.7599897", "0.758881", "0.75808233", "0.75630975", "0.75542706", "0.7522357", "0.7487544", "0.7484254", "0.74748534", "0.74724805", "0.7463907", "0.74554265", "0.7436666", "0.7422981...
0.77351147
5
normalized(Vector) returns a unit vector
def normalized(first): if isinstance(first,FreeCAD.Vector): l=length(first) return FreeCAD.Vector(first.x/l, first.y/l, first.z/l)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalized(self):\n try:\n m = abs(self)\n return self / m\n except ZeroDivisionError as e:\n raise Exception(\"Attempted to normalize a zero vector, return a unit vector at zero degrees\") from e\n # return Vector(1, 0)", "def normalized(self):\n ...
[ "0.84013855", "0.8248628", "0.8215351", "0.81663924", "0.81663924", "0.81663924", "0.8158663", "0.8142691", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8133052", "0.8125412", "0.81115973"...
0.7754226
39
dotproduct(Vector,Vector) returns the dot product of both vectors
def dotproduct(first, other): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return (first.x*other.x + first.y*other.y + first.z*other.z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def dot_product(v1, v2):\n return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]", "def dotProduct(v1, v2):\n n1 = normalize(v1)\...
[ "0.86972415", "0.8648256", "0.85929865", "0.85891086", "0.8570757", "0.8549981", "0.8507858", "0.847935", "0.84727925", "0.84720486", "0.84266394", "0.8419811", "0.8343178", "0.8338558", "0.83323467", "0.8256427", "0.82338524", "0.82235616", "0.82016593", "0.8183103", "0.8173...
0.80322015
23
crossproduct(Vector,Vector) returns the cross product of both vectors. If only one is specified, cross product is made with vertical axis, thus returning its perpendicular in XY plane
def crossproduct(first, other=FreeCAD.Vector(0,0,1)): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return FreeCAD.Vector(first.y*other.z - first.z*other.y, first.z*other.x - first.x*other.z, first.x*other.y - first.y*other.x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_product(v1, v2):\n return cg3d_vector.CG3dVector(\n v1[1] * v2[2] - v2[1] * v1[2],\n v1[2] * v2[0] - v2[2] * v1[0],\n v1[0] * v2[1] - v2[0] * v1[1]\n )", "def vector_cross(x, y):\n\n if(len(x) != len(y)):\n raise ValueError(\"vector lengths differ\")\n elif(len(x...
[ "0.8131378", "0.802793", "0.79265344", "0.7779056", "0.77629125", "0.77045447", "0.76780075", "0.75904584", "0.7525282", "0.7513922", "0.7469388", "0.7426301", "0.7420362", "0.7362331", "0.73169047", "0.7263604", "0.72250664", "0.7215048", "0.72099155", "0.71860886", "0.71698...
0.72218657
17
angle(Vector,Vector) returns the angle in radians between the two vectors. If only one is given, angle is between the vector and the horizontal East direction
def angle(first, other=FreeCAD.Vector(1,0,0)): if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector): return math.acos(dotproduct(normalized(first),normalized(other)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))", "def angle(vec1, vec2):\n\n return math.acos(dotproduct(vec1, vec2) / (length(vec1) * length(vec2)))", "def angle_between_vectors(vec1, vec2):\n vec = vec1 - vec2\n vec = vec.perp...
[ "0.7984714", "0.7903248", "0.7839204", "0.7763829", "0.76707727", "0.7667527", "0.7662782", "0.76549405", "0.76480985", "0.76240003", "0.7578548", "0.75698555", "0.7558118", "0.75398844", "0.7527476", "0.75202245", "0.7511977", "0.7507649", "0.7465797", "0.7461353", "0.745387...
0.7572855
11
sets the desired capacity of the underlying ASG directly. note that this is for internal control. for scaling purposes, please use scale() instead.
def set_desired_capacity(self, new_desired_capacity): scale_out = new_desired_capacity - self.desired_capacity assert scale_out >= 0 if scale_out == 0: return CompletedFuture(False) remaining_instances = self.client.get_remaining_instances(self.resource_group, self.instance_type) futures = [] for scale_set in sorted(self.scale_sets.values(), key=lambda x: (x.priority, x.name)): if scale_set.capacity < _SCALE_SET_SIZE_LIMIT: if self.slow_scale: new_group_capacity = scale_set.capacity + 1 else: new_group_capacity = min(_SCALE_SET_SIZE_LIMIT, scale_set.capacity + scale_out, scale_set.capacity + remaining_instances) if scale_set.provisioning_state == 'Updating': logger.warn("Update of {} already in progress".format(scale_set.name)) continue if scale_set.provisioning_state == 'Failed': logger.error("{} failed provisioning. Skipping it for scaling.".format(scale_set.name)) continue scale_out -= (new_group_capacity - scale_set.capacity) remaining_instances -= (new_group_capacity - scale_set.capacity) # Update our cached version self.scale_sets[scale_set.name].capacity = new_group_capacity futures.append(self.client.update_scale_set(scale_set, new_group_capacity)) logger.info("Scaling Azure Scale Set {} to {}".format(scale_set.name, new_group_capacity)) if scale_out == 0 or remaining_instances == 0: break if remaining_instances == 0: logger.warning("Out of quota for {}!".format(self.instance_type)) if scale_out > 0: logger.error("Not enough scale sets to reach desired capacity {} for {}".format(new_desired_capacity, self)) self.desired_capacity = new_desired_capacity - scale_out logger.info("ASG: {} new_desired_capacity: {}".format(self, new_desired_capacity)) return TransformingFuture(True, AllCompletedFuture(futures))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_capacity(self, cap):\n self._capacity.type = 'value'\n self._capacity._value = float(cap) # TODO getter/setter", "def set_capacity(self, cap):\n return self.get_interaction().set_capacity(cap)", "def set_capacity(self, capacity):\r\n params = {\r\n 'AutoScalingGroupName...
[ "0.77368546", "0.770329", "0.7405971", "0.71707654", "0.67850167", "0.6660137", "0.63342935", "0.6240899", "0.6176888", "0.61686575", "0.6136855", "0.6032943", "0.599189", "0.5988754", "0.5920193", "0.5911704", "0.58976525", "0.58910733", "0.58642304", "0.58624625", "0.581370...
0.6512873
6
scale down asg by terminating the given node. returns a future indicating when the request completes.
def scale_nodes_in(self, nodes): for node in nodes: self.nodes.remove(node) return self.terminate_instances(node.instance_id for node in nodes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def stop_node(request: web.Request) -> web.Response:\n req_ctx = RequestContext.parse_obj(request)\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n return await start_long_running_task(\n request,\n _stop_dynamic_service_with_progress,\n task_contex...
[ "0.5773211", "0.5584699", "0.5190178", "0.51015365", "0.49526966", "0.4843822", "0.47758818", "0.47302267", "0.47145423", "0.47145423", "0.46995267", "0.46891746", "0.4611184", "0.45706004", "0.45680726", "0.4559255", "0.4542594", "0.45021996", "0.44928265", "0.4487056", "0.4...
0.45100704
17
A custom python read function for interfacing with nii image files.
def read_fn(file_references, mode, params=None): def _augment(img): """An image augmentation function""" return flip(img, axis=2) for f in file_references: subject_id = f[0] data_path = '../../../data/IXI_HH/1mm' # Read the image nii with sitk t1_fn = os.path.join(data_path, '{}/T1_1mm.nii.gz'.format(subject_id)) t1 = sitk.GetArrayFromImage(sitk.ReadImage(str(t1_fn))) # Normalise volume images t1 = whitening(t1) # Create a 4D image (i.e. [x, y, z, channels]) images = np.expand_dims(t1, axis=-1).astype(np.float32) if mode == tf.estimator.ModeKeys.PREDICT: yield {'features': {'x': images}} # Augment if used in training mode if mode == tf.estimator.ModeKeys.TRAIN: images = _augment(images) # Check if the reader is supposed to return training examples or full # images if params['extract_examples']: images = extract_random_example_array( image_list=images, example_size=params['example_size'], n_examples=params['n_examples']) for e in range(params['n_examples']): yield {'features': {'x': images[e].astype(np.float32)}} else: yield {'features': {'x': images}} return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_image(path, file_format='nii.gz'):\n path = path + '.' + file_format\n if file_format == 'npy':\n image = np.load(path)\n elif file_format == 'npz':\n image = np.load(path)['arr_0']\n elif file_format in ('png', 'jpg'):\n image = np.array(imageio.imread(path))\n elif fi...
[ "0.7021543", "0.69873", "0.6858336", "0.6605103", "0.65766186", "0.65725684", "0.6543384", "0.65361625", "0.6512165", "0.6454746", "0.6408091", "0.6406229", "0.63912904", "0.6366565", "0.63648134", "0.6295551", "0.62764174", "0.62492245", "0.61694163", "0.61654574", "0.614689...
0.0
-1
An image augmentation function
def _augment(img): return flip(img, axis=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def augment(self, image):\n pass", "def image_augmentation(img):\n return np.fliplr(img)", "def get_augmenter():\n\n augmenter = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Small gaussian blur with random sigma between 0 and 0.5....
[ "0.75550324", "0.7301866", "0.72235656", "0.7217813", "0.72143877", "0.7079705", "0.6807167", "0.6775062", "0.67542374", "0.6687042", "0.6650061", "0.6619291", "0.6592331", "0.6575989", "0.65661603", "0.6518033", "0.64495766", "0.6393832", "0.63352853", "0.6332074", "0.628048...
0.7171862
5
Configures the target schema in which the tweets data will be stored, creates the schema and the table if not existing yet
def init_db(conn: Connection) -> None: logger.info(f"{Fore.YELLOW}Initializing database ...{Style.RESET_ALL}") # Create specified schema if not exists if not conn.dialect.has_schema(conn, schema_name): logger.info(f"{Fore.YELLOW}Schema {schema_name} does not exist, creating it ...{Style.RESET_ALL}") conn.execute(schema.CreateSchema(schema_name)) logger.info(f"{Fore.GREEN}Schema {schema_name} successfully created !{Style.RESET_ALL}") else: logger.info(f"{Fore.GREEN}Schema {schema_name} was found, continuing database initialization " f"...{Style.RESET_ALL}") # Create tables Base.metadata.create_all(bind=conn) logger.info(f"{Fore.GREEN}Schema {schema_name} successfully configured !{Style.RESET_ALL}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def set_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tweets(\n id INTEGER PRIMARY KEY,\n ...
[ "0.7223083", "0.7214102", "0.6795453", "0.6649024", "0.65013885", "0.64839", "0.64839", "0.6444698", "0.6426618", "0.64096576", "0.6398911", "0.63895595", "0.6386641", "0.63824946", "0.63433653", "0.63402724", "0.63384145", "0.63329905", "0.6317877", "0.63123184", "0.6277626"...
0.0
-1
Inserts fetched tweet data to the target database table
def insert_tweets(conn: Connection, fetch_data: Iterable[Dict]) -> None: s = Session(bind=conn) meta = MetaData() meta.reflect(bind=conn) s.add_all([Tweet(**t) for t in fetch_data]) s.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fillTweetInDB(self):\n sqlInsertTweets = \"INSERT INTO tweet content VALUES %s\"\n mycursor.executemany(sqlInsertTweets,self.content)\n mydb.commit()", "def insert_into_tweets(self, infos):\n query = \"insert into tweets(tweet_id, insert_date, created_at, hashtag) values(?, ?, ?, ...
[ "0.787744", "0.7727903", "0.7385115", "0.7071366", "0.6870072", "0.6737006", "0.6506767", "0.64741445", "0.64702773", "0.63960886", "0.63523656", "0.63268894", "0.6325997", "0.62656724", "0.6228846", "0.6178256", "0.6176296", "0.6122586", "0.6111885", "0.6098994", "0.6073444"...
0.76859015
2
returns fields key value dict
def prepare_from_tx(cls, txcomment, session=None): data_dict = deepcopy(txcomment.__dict__) data_dict['block_num'] = txcomment.block_num data_dict['transaction_num'] = txcomment.transaction_num data_dict['operation_num'] = txcomment.operation_num data_dict['timestamp'] = txcomment.timestamp data_dict['type'] = txcomment.type data_dict['txcomment'] = txcomment data_dict['session'] = session or object_session(txcomment) prepared = cls._prepare_for_storage(data_dict=data_dict) return prepared
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields(node):\r\n return dict(iter_fields(node))", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in se...
[ "0.7502895", "0.7173949", "0.69838166", "0.6890921", "0.68630266", "0.6766923", "0.67547655", "0.673777", "0.673483", "0.67211217", "0.6610475", "0.6577992", "0.65449065", "0.6493254", "0.6487551", "0.6482813", "0.6455735", "0.6454793", "0.6442877", "0.64390624", "0.6432849",...
0.0
-1
returns Post or Comment instance
def from_tx(cls, txcomment, session=None, **kwargs): if txcomment.is_comment: obj_cls = Comment cls_name = 'Comment' elif txcomment.is_post: obj_cls = Post cls_name = 'Post' else: raise ValueError('txcomment must by either post or comment') prepared = cls.prepare_from_tx(txcomment, session=session, **kwargs) logger.debug('%s.add: tx: %s prepared:%s', cls_name, txcomment, prepared) return obj_cls(**prepared)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_post(self):\n post_pk = self.kwargs.get('post_pk', 0)\n return get_object_or_404(Post, pk=post_pk)", "def _get_post(self):\n return self.get_object().content_object", "def get_post(self):\n\t\tself.post = graph.get_object(POST_ID)", "def get_post(post_pk):\n where = \"WHERE p...
[ "0.69633675", "0.66851956", "0.6488416", "0.6333677", "0.6118673", "0.6012395", "0.59859", "0.59783304", "0.5883995", "0.5803353", "0.5775314", "0.57464737", "0.5668173", "0.559327", "0.5543723", "0.5539568", "0.5487841", "0.5474023", "0.54665846", "0.54636973", "0.54636973",...
0.0
-1
returns unique Post or Comment instance
def as_unique_from_tx(cls, txcomment, session=None, **kwargs): prepared = cls.prepare_from_tx(txcomment, session=session, **kwargs) if txcomment.is_comment: obj_cls = Comment elif txcomment.is_post: obj_cls = Post else: raise ValueError('txcomment must by either post or comment') return obj_cls.as_unique(session, **prepared)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_post(self):\n post_pk = self.kwargs.get('post_pk', 0)\n return get_object_or_404(Post, pk=post_pk)", "def get_unique_instance(type_):\n global unique_object_id\n ret = None\n\n if type_ is tuple:\n ret = tuple([unique_object_id])\n\n unique_object_id += 1\n return ret...
[ "0.65143263", "0.64079964", "0.6296819", "0.6184585", "0.59477484", "0.5869962", "0.5766771", "0.57316494", "0.5705596", "0.56982195", "0.5665292", "0.56357944", "0.56308913", "0.55791014", "0.55728555", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0.55584514", "0...
0.60853547
4
categories is an ordered dictionary
def __init__(self, position, area, categories, align=TOPLEFT, step=None, edge=DEFAULT_EDGE, colour=light_grey, deselect=True, button_labels=None): self.categories = categories if step is None: self.step = Button.default_height else: self.step = step self.deselect = deselect super().__init__(position, area, self.step * len(categories), align=align, edge=edge, button_size=self.step, colour=colour) self.button_tags = {} if button_labels is None: self.button_labels = {} else: self.button_labels = button_labels self.buttons = self.gen_buttons(categories) self.all_buttons = flatten(self.buttons) self.visible_buttons = list(self.buttons.keys()) self.update_display()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def categories(self):\n\t\treturn (sorted(self.dictData.keys()))", "def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins...
[ "0.7909571", "0.7163796", "0.7039748", "0.7027904", "0.7026439", "0.6947721", "0.68675554", "0.684357", "0.68159634", "0.6771762", "0.6768741", "0.67397535", "0.6650347", "0.6645734", "0.6628202", "0.6627645", "0.662422", "0.66078293", "0.6583735", "0.64916193", "0.6485819", ...
0.0
-1
Provide the text contents to the rest of the class.
def setUp(self): self.message = "notification message"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_text(self):\n pass", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n\n self.text = text", "def text(self) -> str:", "def __init__(self):\n self.text = ''", "def get_text(self):", "d...
[ "0.7451575", "0.738682", "0.738682", "0.7353386", "0.7223362", "0.7178723", "0.7166255", "0.7111192", "0.7060302", "0.6893968", "0.6893968", "0.6893968", "0.6893968", "0.6893968", "0.6882488", "0.68126476", "0.6810179", "0.6803394", "0.6770941", "0.6770941", "0.6770941", "0...
0.0
-1
Test that exceptions are caught.
def test_invalid_webhook(self, mock_send): logging.disable(logging.CRITICAL) # Don't log to stderr during this unit test mock_send.side_effect = OSError("Some error") send_notification("invalid_webhook", self.message) mock_send.assert_called() logging.disable(logging.NOTSET) # Reset the logging
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_exception_handling(self) -> None:\n try:\n 1 / 0\n assert False, \"should have thrown a ZeroDivisionError\"\n except (ZeroDivisionError, TypeError, NameError) as err:\n assert type(err) is ZeroDivisionError\n\n # From within tests, use `pytest.raises` ...
[ "0.7859332", "0.7556377", "0.7296645", "0.71099347", "0.70711637", "0.6970611", "0.69576824", "0.6924376", "0.6830435", "0.6750002", "0.6712215", "0.67063254", "0.66900784", "0.6665721", "0.6664793", "0.6655494", "0.6646311", "0.66175795", "0.65986204", "0.65889573", "0.65824...
0.0
-1
Test that a valid message is sent to a valid webhook.
def test_valid_webhook(self, mock_send): send_notification("valid_webhook", self.message) mock_send.assert_called()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_simple_message(self):\n messaging = {\n 'sender': {'id': '1331235'},\n 'recipient': {'id': '1111111'},\n 'message': {'text': 'Hello world.'}\n }\n event = self.create_message_event(messaging)\n c = Client()\n response = c.post(self.webhoo...
[ "0.73162293", "0.72650135", "0.70089185", "0.6989183", "0.6965128", "0.6857879", "0.68053627", "0.67763484", "0.6760223", "0.6692867", "0.66774166", "0.66758436", "0.665029", "0.66362166", "0.66284573", "0.65982944", "0.65941006", "0.6593365", "0.65475965", "0.6529135", "0.65...
0.7538486
0
Provide a default report for the rest of the class.
def setUp(self): self.report = dict(title="Report 1", url="https://report1") self.data_model = dict( metrics=dict(metric_type=dict(name="type")), sources=dict( quality_time=dict( parameters=dict( status=dict( api_values={ "target met (green)": "target_met", "near target met (yellow)": "near_target_met", "target not met (red)": "target_not_met", "technical debt target met (grey)": "debt_target_met", "unknown (white)": "unknown", } ) ) ) ), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_report(self):\n raise NotImplementedError", "def report():\n pass", "def report(self, report_options=None):\n raise NotImplementedError()", "def report(self, **options):\n pass", "def init_report(self, report):\n report.text('warning', 'init_report() not imp...
[ "0.7427176", "0.7222436", "0.7195777", "0.71178955", "0.7061699", "0.6907026", "0.68175685", "0.67216134", "0.67009014", "0.6695978", "0.66680425", "0.66522896", "0.6502363", "0.64432687", "0.64130306", "0.6410636", "0.636051", "0.63515186", "0.6334159", "0.6313862", "0.63041...
0.58354574
36
Test that the text is correct.
def test_changed_status_text(self): scale = "count" metric1 = dict( type="metric_type", name="Metric", unit="units", scale=scale, recent_measurements=[ dict(count=dict(value=0, status="near_target_met")), dict(count=dict(value=42, status="target_not_met")), ], ) metric2 = dict( type="metric_type", name="Metric", unit="units", scale=scale, recent_measurements=[ dict(count=dict(value=5, status="target_met")), dict(count=dict(value=10, status="target_not_met")), ], ) metric_notification_data1 = MetricNotificationData(metric1, self.data_model, "status_changed") metric_notification_data2 = MetricNotificationData(metric2, self.data_model, "status_changed") notification = Notification( self.report, [metric_notification_data1, metric_notification_data2], "destination_uuid", {} ) text = build_notification_text(notification) self.assertEqual( "[Report 1](https://report1) has 2 metrics that are notable:\n\n" "* Metric status is red (target not met), was yellow (near target met). Value is 42 units, was 0 units.\n" "* Metric status is red (target not met), was green (target met). Value is 10 units, was 5 units.\n", text, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_text(self, text):\n pass", "def assert_text(self,actual, expected):\n actual_stripped = actual.rstrip('/')\n assert expected == actual_stripped, \"The text does not match:\\n\\tExpected : {0} \\n\\tActual : {1}\\n\".format(expected, actual_stripped)", "def test_text(self):\r\n ...
[ "0.7780006", "0.71805084", "0.7163137", "0.69507134", "0.69128454", "0.6879445", "0.6878319", "0.68413955", "0.6837145", "0.6819704", "0.6761079", "0.67541283", "0.67370874", "0.6677631", "0.6638475", "0.6612591", "0.6599315", "0.6586486", "0.6543073", "0.65285075", "0.652316...
0.0
-1
Test that the text is correct.
def test_unchanged_status_text(self): scale = "count" metric1 = dict(type="metric_type", name="Metric", unit="units", scale=scale, recent_measurements=[dict(count=dict(value=0, status="near_target_met")), dict(count=dict(value=42, status="near_target_met"))]) metric2 = dict(type="metric_type", name="Metric", unit="units", scale=scale, recent_measurements=[dict(count=dict(value=5, status="target_met")), dict(count=dict(value=10, status="target_not_met"))]) metric_notification_data1 = MetricNotificationData(metric1, self.data_model, "status_long_unchanged") metric_notification_data2 = MetricNotificationData(metric2, self.data_model, "status_long_unchanged") notification = Notification(self.report, [metric_notification_data1, metric_notification_data2], "destination_uuid", {}) text = build_notification_text(notification) self.assertEqual( "[Report 1](https://report1) has 2 metrics that are notable:\n\n" "* Metric has been yellow (near target met) for three weeks. Value: 42 units.\n" "* Metric has been red (target not met) for three weeks. Value: 10 units.\n", text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_text(self, text):\n pass", "def assert_text(self,actual, expected):\n actual_stripped = actual.rstrip('/')\n assert expected == actual_stripped, \"The text does not match:\\n\\tExpected : {0} \\n\\tActual : {1}\\n\".format(expected, actual_stripped)", "def test_text(self):\r\n ...
[ "0.7778528", "0.717947", "0.71633434", "0.6949054", "0.6913475", "0.68788403", "0.6877094", "0.68400156", "0.68368715", "0.6818046", "0.6760472", "0.6753992", "0.6737595", "0.6676952", "0.6637825", "0.6612554", "0.6598669", "0.658587", "0.65414304", "0.65287834", "0.652236", ...
0.0
-1
Test that the text is correct.
def test_unknown_text(self): metric1 = dict( type="metric_type", name="Metric", unit="units", scale="count", recent_measurements=[ dict(count=dict(value=0, status="near_target_met")), dict(count=dict(value=None, status="unknown")), ], ) metric_notification_data1 = MetricNotificationData(metric1, self.data_model, "status_changed") notification = Notification(self.report, [metric_notification_data1], "destination_uuid", {}) text = build_notification_text(notification) self.assertEqual( "[Report 1](https://report1) has 1 metric that is notable:\n\n" "* Metric status is white (unknown), was yellow (near target met). Value is ? units, was 0 units.\n", text, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_text(self, text):\n pass", "def assert_text(self,actual, expected):\n actual_stripped = actual.rstrip('/')\n assert expected == actual_stripped, \"The text does not match:\\n\\tExpected : {0} \\n\\tActual : {1}\\n\".format(expected, actual_stripped)", "def test_text(self):\r\n ...
[ "0.7778226", "0.7179298", "0.71623003", "0.69488645", "0.6911859", "0.6878704", "0.6875989", "0.6840048", "0.68342376", "0.681757", "0.6759105", "0.6752436", "0.6735727", "0.66753113", "0.6636938", "0.6611503", "0.6597898", "0.65839463", "0.65408915", "0.6528578", "0.65214723...
0.0
-1
./fasttext skipgram input dumps.txt output model dim 256 minCount 1
def preexe(): from collections import OrderedDict as odict term_index = odict() term_vec = pickle.loads(open('term_vec.pkl', 'rb').read()) with open('./dumps.txt', 'r') as f: datasets = [] for fi, line in enumerate(f): if fi > 50000: break if fi%500 == 0: print("now iter {}".format(fi)) terms = line.strip().split() for slide in range(0, len(terms) - 4, 1 ): ans = terms[slide+4] buff = [] try: [buff.append(term_vec[term]) for term in terms[slide: slide+4]] except KeyError as e: continue datasets.append( (buff, ans, terms[slide: slide+5]) ) if term_index.get(ans) is None: term_index[ans] = len(term_index) open('datasets.pkl', 'wb').write(pickle.dumps(datasets)) open('term_index.pkl', 'wb').write(pickle.dumps(term_index))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_fasttext_skipgram(self, corpus_path,\n output_path,\n **kwargs):\n print(\"Training Fasttext model using Skipgram method\")\n self.fasttext_model = fasttext.train_unsupervised(corpus_path, model='skipgram', **kwargs)\n sel...
[ "0.7335781", "0.6271761", "0.6103409", "0.5918872", "0.5912268", "0.59084326", "0.5804064", "0.57811296", "0.56691426", "0.5648485", "0.5640333", "0.5547922", "0.5508764", "0.54452556", "0.5404064", "0.5391015", "0.53892845", "0.5379182", "0.53635854", "0.535121", "0.5332329"...
0.0
-1
Function to join path and filename.
def pjoin(self, in_dir, file_name): return os.path.join(in_dir, file_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def join(path: str, fileName: str) -> str:\n if os.name == 'nt':\n return ntpath.join(path, fileName)\n else:\n return posixpath.join(path, fileName)", "def filename_path_join(path, filename):\n\n # Raise an error if filename is None\n if filename is None:\n ...
[ "0.8078713", "0.7777003", "0.77231276", "0.7715079", "0.7641831", "0.76057595", "0.7435873", "0.74198025", "0.7322059", "0.71884507", "0.71656954", "0.7080947", "0.7009298", "0.694353", "0.69216216", "0.6880216", "0.6874053", "0.6855651", "0.68301845", "0.6820693", "0.681826"...
0.7675447
4
Function that loads and sets the necessary variables.
def set_variables(self): root_dir = os.path.dirname(os.path.realpath(__file__)) self.scratch_dir = os.path.join(root_dir, 'scratch') self.input_dir = os.path.join(root_dir, 'input_data') self.web_dir = os.path.join(root_dir, 'webserver') #os.chdir(self.scratch_dir) # Input data BIOSAFE self.legal_weights = pd.read_csv( self.pjoin(self.input_dir, 'legalWeights.csv'), index_col = 0) self.links_law = pd.read_csv( self.pjoin(self.input_dir, 'linksLaw.csv'), index_col = 0) self.links_eco1 = pd.read_csv( self.pjoin(self.input_dir, 'linksEco.csv'), index_col = 0) self.lut = pd.read_excel( self.pjoin(self.input_dir, 'BIOSAFE_20190711.xlsx'), sheet_name = 'lut_RWES').fillna(method='ffill') # this lookup table (lut) has: # ecotope codes of BIOSAFE in the 1st column: oldEcotope # aggregated/translated ectotopes in 2nd column: newEcotope # Ecotopes used in Virtual River self.vr_eco = pd.read_csv( self.pjoin(self.input_dir, 'VR_ecotopes.csv')) # Aggregate BIOSAFE ecotopes into RWES ecotopes self.links_eco2 = bsf.aggregateEcotopes(self.links_eco1, self.lut) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_vars():\n\tda_vinci.base.usepackage(\"pgfkeys\")\n\tda_vinci.base.add_preamble(setup_script)", "def load_variables(cls):\n cls._variablesDict = fileops.get_json_dict(cls.get_variables_filepath())", "def init_vars(self):\n\n load_dotenv()\n self.smart_cube = True if os.environ.get(...
[ "0.6802117", "0.67515796", "0.6646807", "0.663246", "0.66175544", "0.64861125", "0.6454291", "0.64139426", "0.63909316", "0.63547635", "0.6338879", "0.62177134", "0.61795104", "0.61608815", "0.6121431", "0.6116283", "0.60934865", "0.60814786", "0.60814786", "0.60814786", "0.6...
0.5848839
33
Sets up biosafe and stores it as an object variable.
def setup_biosafe(self): # Generate dummy data in the right format species_presence = pd.DataFrame( np.random.randint(2, size=len(self.links_law)), columns=['speciesPresence'], index=self.links_law.index) ecotope_area = pd.DataFrame( np.ones(len(self.links_eco2.columns)-1) * 1e5, columns = ['area_m2'], index = self.links_eco2.columns.values[0:-1]) # Simplify ecotope tables to VR ecotopes unique_eco = np.unique( np.hstack((self.vr_eco.ecotope1.values, self.vr_eco.ecotope2.values))) links_eco3 = self.links_eco2.reindex(columns=unique_eco) ecotope_area = ecotope_area.reindex(index=unique_eco) # Run a first version of Biosafe self.bsf_model = bsf.biosafe( self.legal_weights, self.links_law, links_eco3, species_presence, ecotope_area) #PotTax = self.bsf_model.TFI() #PotAll = self.bsf_model.FI() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self):\n self.ae = None", "def __init__(self, barcamp, handler):\n self.barcamp = barcamp\n self.handler = handler\n self.app = self.handler.app\n self.config = self.handler.app.config\n self.user = self.handler.user", "def setup():\n global zb\n # Signal...
[ "0.56603616", "0.5621632", "0.55927515", "0.5504273", "0.53797036", "0.5317156", "0.5243224", "0.52396697", "0.52148014", "0.5166802", "0.51143235", "0.51060176", "0.50999767", "0.50967854", "0.50958705", "0.50950307", "0.50940466", "0.5094038", "0.50789654", "0.5071926", "0....
0.6278834
0
Calculate the total area of all ecotopes on the playing board.
def ecotope_area_sums(self, board): # clean up the input and merge into a single dataframe cols = ['geometry', 'z_reference', 'landuse', 'biosafe'] board_clean = board.loc[board.biosafe, cols] board_eco = pd.merge(board_clean, self.vr_eco, on=['z_reference', 'landuse']) # optional: output gdf to shp # gdf = board_eco.copy() # gdf['biosafe'] = gdf.biosafe.values.astype('int') # gdf.to_file('board_eco.shp') # calculate the total area of all columns # note: landuse-z_reference combinations not in vr_ecotopes are # excluded area_eco1 = board_eco.groupby('ecotope1').sum() area_eco2 = board_eco.groupby('ecotope2').sum() area_fractions = pd.concat([area_eco1.fraction1, area_eco2.fraction2], axis=1, sort=True) area_total = area_fractions.fillna(0).sum(axis=1).reset_index() area_total.columns = ['ecotope', 'area_m2'] # assert that that total area of the ecotopes matches the biosafe # hexagons try: assert int(area_total.sum().area_m2) == int(board_clean.shape[0]),\ ("ERROR: There appears to be one or more polygons that is not " + "detected correctly, resulting in a missmatch of the VR ecotopes") except AssertionError as error: print(error) pass area_out = area_total.set_index('ecotope') area_out.index.name=None return area_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += ...
[ "0.72191346", "0.69285345", "0.6818615", "0.6791541", "0.6719441", "0.6589277", "0.6585048", "0.6579154", "0.65724313", "0.65674067", "0.6542835", "0.651549", "0.6497927", "0.64875937", "0.64872205", "0.6446514", "0.6436132", "0.6436132", "0.6436132", "0.6436132", "0.6428201"...
0.7589144
0
Function that processes the current board (including the initial board at the start of the Virtual River game).
def process_board(self, hexagons, reference=False): # Input data Virtual River board = gpd.GeoDataFrame.from_features(hexagons.features) if reference: self.board_reference = board else: self.board_intervention = board # Evaluate the board eco_area = self.ecotope_area_sums(board) self.bsf_model.ecotopeArea = eco_area PotTax = self.bsf_model.TFI() if reference: self.PotTax_reference = PotTax else: self.PotTax_intervention = PotTax return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initBoard(self):\n pass", "def set_board(board):", "def advance(self, board):", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient ...
[ "0.72907543", "0.711742", "0.6935223", "0.686896", "0.68323153", "0.6819589", "0.6773056", "0.6760123", "0.6743392", "0.6696341", "0.66691166", "0.6662628", "0.66314083", "0.6610427", "0.6580219", "0.6562503", "0.65226245", "0.6518735", "0.6464358", "0.64634323", "0.64540416"...
0.0
-1
Function that calculates the biodiversity score based on the Biosafe output. the numbers 29.33 and 1.4349 follow from running MC simulations to determine the lowest and highest possible scores. The biodiversity score reflects the 0100% range between the two.
def set_score(self): if self.PotTax_intervention is None: if self.PotTax_reference is not None: self.score = (((self.PotTax_reference.sum().TFI - 29.33) / 1.4349) / 100) else: print("There is no Biosafe output to score") return else: self.score = (((self.PotTax_intervention.sum().TFI - 29.33) / 1.4349) / 100) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval", "def calc_score(score...
[ "0.6018452", "0.589307", "0.58814985", "0.58546895", "0.5849375", "0.5819225", "0.57758874", "0.5774465", "0.573999", "0.571513", "0.56816", "0.56671625", "0.56617856", "0.5648553", "0.5645618", "0.56252235", "0.55905104", "0.5585876", "0.5570236", "0.55351394", "0.550514", ...
0.5997721
1
Function to plot the biosafe output, not called as such in the Virtual River. biodiversity_graph() is called instead in Virtual River.
def plot(self): # plot the data for checking fig, [[ax1,ax2],[ax3,ax4], [ax5,ax6]] = plt.subplots( 3,2, figsize=(10,8)) # Relative height self.board_reference.plot( column='z_reference', cmap='GnBu_r', legend=True, ax=ax1) self.board_intervention.plot( column='z_reference', cmap='GnBu_r', legend=True, ax=ax2) # Landuse self.board_reference.plot( column='landuse', legend=True, ax=ax3, cmap='viridis', scheme='equal_interval', k=11) self.board_intervention.plot( column='landuse', legend=True, ax=ax4, cmap='viridis', scheme='equal_interval', k=11) index = np.arange(7) xticks = self.PotTax_reference.index.values bar_width = 0.3 # plot the initial and new situation comparison label = ("reference: " + str(round(self.PotTax_reference.sum().TFI, 2))) reference = ax5.bar( index, self.PotTax_reference.values.flatten(), bar_width, label=label, tick_label=xticks) label = ("intervention: " + str(round(self.PotTax_intervention.sum().TFI, 2))) intervention = ax5.bar( index+bar_width, self.PotTax_intervention.values.flatten(), bar_width, label=label, tick_label=xticks) ax5.set_ylabel("total value") ax5.legend(loc='best') for tick in ax5.get_xticklabels(): tick.set_rotation(90) # plot the percentage increase/decrease between the initial and new # situation data = self.PotTax_percentage.values.flatten() percentage = ax6.bar( index, data, bar_width, label="percentage", tick_label=xticks) ax6.set_ylabel("increase (%)") minimum = min(data) maximum = max(data) size = len(str(int(round(maximum)))) maximum = int(str(maximum)[:1]) maximum = (maximum + 1) * (10**(size-1)) ax6.set_ylim([min(0, minimum), maximum]) for tick in ax6.get_xticklabels(): tick.set_rotation(90)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_graph(self) -> None:", "def biodiversity_graph(self, graph=\"percentage\"):\n plt.ioff()\n fig, ax = plt.subplots()\n index = np.arange(7)\n bar_width = 0.3\n offset = bar_width / 2\n if graph == \"score\":\n if self.PotTax_reference is not None:\n ...
[ "0.66557336", "0.6585414", "0.6435863", "0.6365141", "0.61312085", "0.60789055", "0.6042521", "0.6041088", "0.60108477", "0.6002608", "0.59909225", "0.597029", "0.59550214", "0.5904745", "0.5902543", "0.5886251", "0.58612454", "0.5829464", "0.5781532", "0.5781532", "0.5781532...
0.0
-1
Function that compares the intervention (current board state) with the reference (initial board state) and stores the differences between the two, both absolute and percentages.
def compare(self): self.PotTax_increase = self.PotTax_intervention - self.PotTax_reference self.PotTax_percentage = ( (self.PotTax_increase / self.PotTax_reference) * 100) """ # this sets the PotTax_percentage to actual percentages. self.PotTax_percentage['TFI'] = pd.Series( ["{0:.2f}%".format(val * 100) for val in self.PotTax_percentage['TFI']], index = self.PotTax_percentage.index) """ return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_change(self, current, previous):\n current = float(current)\n previous = float(previous)\n if current == previous:\n return 0\n try:\n r = (abs(current - previous) / previous) * 100\n if r > 100:\n r = 100\n return rou...
[ "0.62179923", "0.60718656", "0.6032007", "0.59902894", "0.5899323", "0.5853778", "0.58384997", "0.5829881", "0.56770056", "0.56709003", "0.5670638", "0.5651848", "0.5637561", "0.56240946", "0.5624037", "0.5612395", "0.5599351", "0.5595555", "0.5589821", "0.5587549", "0.557466...
0.6039716
2
Function to generate the output graphs displayed in the Tygron engine.
def biodiversity_graph(self, graph="percentage"): plt.ioff() fig, ax = plt.subplots() index = np.arange(7) bar_width = 0.3 offset = bar_width / 2 if graph == "score": if self.PotTax_reference is not None: # this requires a deepcopy, otherwise the xticks updates also # updates the PotTax_percentage indexes. xticks = deepcopy(self.PotTax_reference.index.values) for i, item in enumerate(xticks): if item == "DragonDamselflies": xticks[i] = "Dragon &\nDamselflies" if item == "HigherPlants": xticks[i] = "Higher\nPlants" #label = ("reference: " + # str(round(self.PotTax_reference.sum().TFI, 2))) label = "initial board" reference = ax.bar( index-offset, self.PotTax_reference.values.flatten(), bar_width, label=label, tick_label=xticks) if self.PotTax_intervention is not None: #label = ("intervention: " + # str(round(self.PotTax_intervention.sum().TFI, 2))) label = "current board" intervention = ax.bar( index+offset, self.PotTax_intervention.values.flatten(), bar_width, label=label, tick_label=xticks) ax.set_title("Biodiversity scores") ax.set_ylabel("total value") legend = ax.legend(loc='best', facecolor='black', edgecolor='w', fancybox=True, framealpha=0.5, fontsize="large") plt.setp(legend.get_texts(), color='w') else: if self.PotTax_percentage is not None: # this requires a deepcopy, otherwise the xticks updates also # updates the PotTax_percentage indexes. xticks = deepcopy(self.PotTax_percentage.index.values) for i, item in enumerate(xticks): if item == "DragonDamselflies": xticks[i] = "Dragon &\nDamselflies" if item == "HigherPlants": xticks[i] = "Higher\nPlants" data = self.PotTax_percentage.values.flatten() percentage = ax.bar( index, data, bar_width, label="percentage", tick_label=xticks) ax.set_title("Biodiversity change") ax.set_ylabel("change (%)") # the xticks rotation could probably be handled better. for tick in ax.get_xticklabels(): tick.set_rotation(90) tick.set_fontsize(14) # set the color of all figure borders, axis ticks and text to white. ax.spines['bottom'].set_color('w') ax.spines['top'].set_color('w') ax.spines['right'].set_color('w') ax.spines['left'].set_color('w') ax.tick_params(axis='x', colors='w') ax.tick_params(axis='y', colors='w') ax.yaxis.label.set_color('w') ax.yaxis.label.set_fontsize(14) ax.xaxis.label.set_color('w') ax.xaxis.label.set_fontsize(14) ax.title.set_fontsize(20) ax.title.set_color('w') plt.tight_layout() if graph == "score": plt.savefig(os.path.join(self.web_dir, "biodiversity_score1.png"), edgecolor='w',transparent=True) else: plt.savefig(os.path.join(self.web_dir, "biodiversity_score2.png"), edgecolor='w',transparent=True) plt.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_graphs(self):\n\n try:\n from keras.utils import plot_model\n from keras.utils.vis_utils import model_to_dot\n\n # from IPython.display import SVG\n\n plot_model(self.model, to_file=\"model.png\")\n plot_model(\n self.latent_to_st...
[ "0.6862232", "0.6708507", "0.66322786", "0.63818264", "0.624256", "0.6234403", "0.6228745", "0.62106884", "0.6164413", "0.61330986", "0.61289656", "0.6121739", "0.6121739", "0.6121739", "0.61114043", "0.60845", "0.60834754", "0.6047425", "0.6043085", "0.6017747", "0.60091543"...
0.0
-1
Getter for the reference (initial board state) biosafe output.
def get_reference(self): return self.PotTax_reference
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bribe(self):\r\n return self.bribe", "def state(self):\r\n return str(self)", "def output(self):\n return ''.join([state[1] for state in self.condensed_output_states])", "def get_reference(self):\t\t\n\t\treturn self._reference", "def state(self) -> str:", "def read_acbr(self...
[ "0.6169057", "0.6052161", "0.60329854", "0.6009293", "0.5998664", "0.5996888", "0.5995191", "0.59844786", "0.59398574", "0.58981454", "0.58868086", "0.58868086", "0.58711606", "0.58579654", "0.58579654", "0.58579654", "0.58579654", "0.58579654", "0.58579654", "0.58522797", "0...
0.0
-1
Getter for the current intervention (board state) biosafe output.
def get_intervention(self): return self.PotTax_intervention
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_output(self):\r\n return self.on", "def current_state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"current_state\")", "def getAI(self):\n device = self.reducetoolbar.detectorcombobox.currentText()\n ai = self.calibrationsettings.AI(device)\n return ai", ...
[ "0.6088682", "0.5991196", "0.59810793", "0.59794337", "0.59147507", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.5912527", "0.58739257", "0.5803603", "0.57730144", "0.57642055...
0.56280655
32
Getter for the percentage difference.
def get_percentage(self): return self.PotTax_percentage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def get_percent(self):\n return self.percent", "def pct(self):\n\t\treturn self.bottle.pct()", "def percent(self):\r\n return self._percent", "def percentage_update(self...
[ "0.7979501", "0.7979501", "0.766055", "0.74162847", "0.7349282", "0.71601516", "0.7119589", "0.7054972", "0.70371413", "0.70371413", "0.7024928", "0.70031303", "0.6962077", "0.69187254", "0.68619835", "0.68011016", "0.6790376", "0.676223", "0.67388844", "0.6734876", "0.670523...
0.712392
6
Getter for the score.
def get_score(self): return self.score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getScore(self):\r\n return self._score", "def get_score(self):\n return self.__score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def getScore(self):\n return self._s...
[ "0.91512865", "0.9107829", "0.908982", "0.908982", "0.908982", "0.9061592", "0.8963942", "0.8736967", "0.86330646", "0.8615216", "0.8579917", "0.8570744", "0.8394626", "0.8275839", "0.8265239", "0.8202533", "0.8185984", "0.8136764", "0.80746853", "0.8073835", "0.79118806", ...
0.91986686
2
Function that prints the biosafe output. Useful for doing multiple runs (e.g. MC), not called in Virtual River.
def print_output(self): print("Reference score: " + str(self.PotTax_reference.sum().TFI)) print("Intervention score: " + str(self.PotTax_intervention.sum().TFI)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_out():\n pass", "def print(self):\r\n self.print_avec_separateur()", "def print(self):\n self.print_avec_separateur(\" \")", "def printOutput(self):\n pass", "def display():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # Call the w...
[ "0.7084671", "0.67312264", "0.6478312", "0.6462846", "0.64194113", "0.6415898", "0.6407844", "0.6329717", "0.630891", "0.63030106", "0.62839437", "0.6273448", "0.6249168", "0.61708874", "0.61392146", "0.6125898", "0.6113273", "0.61126554", "0.61101013", "0.6100479", "0.609391...
0.0
-1
Function to test the code separately from the Virtual River.
def test(): root_path = os.path.dirname(os.path.realpath(__file__)) test_path = os.path.join(root_path, 'test_files') with open(os.path.join(test_path, 'hexagons0.geojson')) as f: hexagons_old = load(f) with open(os.path.join(test_path, 'hexagons1.geojson')) as f: hexagons_new = load(f) return hexagons_new, hexagons_old
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_01_lighting(self):", "def _lv_test(self):\n raise NotImplementedError('Levene Test is not implemented')", "def runTest(self):\n E = main()\n self.assertInside(E, energy, 1e-5)", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(se...
[ "0.6968613", "0.6791529", "0.6691997", "0.6609146", "0.6609146", "0.6609146", "0.6609146", "0.6609146", "0.6608754", "0.6608754", "0.6608754", "0.65086514", "0.6462126", "0.6446393", "0.6410319", "0.63936573", "0.6387084", "0.6344221", "0.63053054", "0.6279565", "0.6279565", ...
0.0
-1
Run variant ensembl predictor alone with custom options. See options details at
def run_vep_annotator(vep_data: str, vcf_path: str, out_path: str, fasta: str, vep_custom: Union[str,list]=None, overwrite: bool=False, vep_n_fork: int=4): vep_path = os.path.normpath(os.path.join(__file__, "../../tools/ensembl-vep/vep")) need_run = True if os.path.exists(out_path) and not overwrite: need_run = False if need_run: print("STATUS: RUNNING VEP") if os.path.exists(out_path): os.remove(out_path) print("removed existing file: %s" % out_path) cmd = """%s \ --dir %s \ --af \ --af_gnomad \ --af_esp \ --clin_sig_allele 0 \ --max_af \ --af_1k \ --no_progress \ --no_stats \ --appris \ --biotype \ --buffer_size 500 \ --canonical \ --ccds \ --check_existing \ --distance 5000 \ --hgvs \ --fork %s \ --numbers \ --mane \ --pick \ --polyphen b \ --protein \ --pubmed \ --regulatory \ --sift b \ --species homo_sapiens \ --symbol \ --transcript_version \ --tsl \ --uniprot \ --input_file %s \ --output_file %s \ --fasta %s \ --cache \ --offline """ % (vep_path, vep_data, vep_n_fork, vcf_path, out_path, fasta) if vep_custom is not None: if type(vep_custom) == list: for v_custom in vep_custom: cmd += "--custom %s " % v_custom elif type(vep_custom) == str: cmd += "--custom %s " % vep_custom else: raise ValueError("vep_custom should be of type list or str") os.system(cmd) else: print("output file %s already exists and overwrite is set to False" % out_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main() -> None:\n parser = ArgumentParser(description=\"SoccerPredictor:\", formatter_class=ArgumentDefaultsHelpFormatter)\n subparsers = parser.add_subparsers(title=\"Modes to run\", dest=\"command\")\n\n # Trainer args\n trainer_parser = subparsers.add_parser(RunMode.Train.value, help=\"Trains mo...
[ "0.6514513", "0.61850375", "0.5984537", "0.59499717", "0.5898596", "0.5883336", "0.5870205", "0.5862893", "0.57968825", "0.5790698", "0.5787918", "0.5760887", "0.57562155", "0.5749064", "0.57390314", "0.57390314", "0.5728084", "0.5717244", "0.57032615", "0.569978", "0.5696714...
0.0
-1
Like `Flask.app.route` but takes only a function that returns HtmlSanitizedStr
def safe_route(app: Flask, rule, **options) -> Callable[[RouteFunction], None]: original_decorator = app.route(rule, **options) def decorator(fn: RouteFunction): return original_decorator(compose(str, fn)) # type: ignore return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_html(func):\n\n cleaner = re.compile(\"<.*?>\")\n def new_func(*args, strip_html=False, **kwargs):\n name = func(*args, **kwargs)\n if strip_html:\n if isinstance(name, str):\n return html.unescape(re.sub(cleaner, \"\", name))\n elif isinstance(nam...
[ "0.60575014", "0.5710384", "0.54833734", "0.54531986", "0.52721405", "0.52415216", "0.5219625", "0.51628417", "0.51594687", "0.5052998", "0.50139946", "0.5011002", "0.5004929", "0.4984093", "0.49426973", "0.49259076", "0.49226424", "0.48769718", "0.48679265", "0.48450527", "0...
0.5955667
1
Loads an observations CSV file.
def load_timeline(filename): try: # Create an empty timeline timeline = ObservationTimeline() # Dictionary mapping agent's name to held item carrying = {} # Read data from input file with open(filename, newline='') as csvfile: obs = csv.reader(csvfile, delimiter='\n') for row in obs: # Unpack each row col = tuple(row[0].split(',')) # If too many or too few arguments, Error if not len(col) == 4: raise ValueError("Unpacking row error") # Adds observation to timeline timeline.add(Observation(col[0], col[1], col[2])) # If agent is carrying item, add to timeline if not col[3] == '': carrying.update({col[0]: col[3]}) # Return Tuple of carried item dict and ObsTimeline return (carrying, timeline) except OSError: raise OSError("Cannot open file")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadCSV(input_file):", "def read_csv(self, filepath, obs_vars = ['obs'], header = True):\n # determine if the type file is gzip\n filetype, encoding = mimetypes.guess_type(filepath)\n if encoding == 'gzip':\n self.data = pd.read_csv(filepath, compression='gzip')\n else:...
[ "0.72104836", "0.68858993", "0.66881233", "0.6687184", "0.66345334", "0.6607736", "0.65853924", "0.6583807", "0.6527228", "0.65123886", "0.6504362", "0.6419528", "0.6413457", "0.6371477", "0.6369214", "0.63646847", "0.62995994", "0.6261016", "0.62593544", "0.62590593", "0.625...
0.0
-1
Program entry point. Loads a CSV file of observations Determines how items were exchanged during various rendezvous Prints the exchanges as they happen, if desired Prints the latest owner of a specific item, if desired. Otherwise neatly prints a dictionary mapping suspects to the item they currently own. This program will return an exit code of `1` in one of two
def main(args): # Tuple of carried items and timeline time_tuple = load_timeline(args.observations) # For each Observation in list, calculated final held item for suspectPair in time_tuple[1].rendezvous(): # If user wanted exchanges, print each exchange if args.exchanges: print(suspectPair[0].name + " meets with " + suspectPair[1].name + " to exchange " + time_tuple[0][suspectPair[0].name] + " for " + time_tuple[0][suspectPair[1].name] + ".") # Trades items temp_item = time_tuple[0][suspectPair[0].name] time_tuple[0][suspectPair[0].name] = time_tuple[0][suspectPair[1].name] time_tuple[0][suspectPair[1].name] = temp_item # If no items specified or exchanges is true, # print list of final help items if (args.item == '') or (args.exchanges): pprint.pprint(time_tuple[0], indent=4) # If user specified an item, print who has said item if not args.item == '': for name, i in time_tuple[0].items(): if i == args.item: print(name + " had the " + i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n options = [\"Add\", \"Remove\", \"Update\", \"Oldest person\", \"Persons closest to average\"]\n common_options = [\"Name: \", \"Year: \"]\n file = \"model/hr/persons.csv\"\n title_list = [\"Id\", \"Name\", \"Year\"]\n choice = None\n dont_clear = False\n while choice != '0':\n ...
[ "0.6056687", "0.57561624", "0.5681713", "0.5558769", "0.5533532", "0.55103606", "0.54816645", "0.5471882", "0.5470575", "0.5453268", "0.54475284", "0.5446846", "0.5314073", "0.53068393", "0.5291783", "0.52585655", "0.52576137", "0.5234658", "0.5230194", "0.5194857", "0.51882"...
0.6281538
0
Keep only notNaN column positions in all arrays.
def drop_nan_columns(arrays): # Keep all column indices not_nan_filter = ones(len(arrays[0]), dtype=bool) # Currently keeping all columns! # Keep column indices without missing value in all arrays # for a in arrays: # not_nan_filter &= ~isnan(a) return [a[not_nan_filter] for a in arrays]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def columns_with_na_values(data):\n aux = data.isna().sum() > 0\n return aux.index[aux.values].values", "def remove_nans(coords):\n s = np.apply_along_axis(sum,1,np.isnan(coords[1])) == 0\n coords[0] = (np.asarray(coords[0])[s]).tolist()\n coords[1] = coords[1][s,:]", "def remove_nans(arr):\n ...
[ "0.68530303", "0.67052513", "0.669365", "0.6676535", "0.66358685", "0.6495949", "0.6456195", "0.64343864", "0.6338271", "0.63302857", "0.62897485", "0.62503976", "0.6189583", "0.61509717", "0.61466753", "0.6088549", "0.6019746", "0.5990693", "0.59600526", "0.5936195", "0.5896...
0.7522396
0
Drop slice that contains only value from df.
def drop_uniform_slice_from_dataframe(df, value, axis=0): if axis == 0: dropped = (df == value).all(axis=0) if any(dropped): print('Removed {} column index(ices) whose values are all {}.'. format(dropped.sum(), value)) return df.ix[:, ~dropped] elif axis == 1: dropped = (df == value).all(axis=1) if any(dropped): print('Removed {} row index(ices) whose values are all {}.'.format( dropped.sum(), value)) return df.ix[~dropped, :]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_values(df, value=0, axis=0):\n \n if axis:\n return df.loc[:, (df != value).any(axis=1-axis)]\n else:\n return df.loc[(df != value).any(axis=1-axis)]", "def remove(df, pattern):\n return df[~df.index.isin(df.query(pattern).index)]", "def drop_transafers(df):\n re...
[ "0.6749588", "0.659988", "0.6581077", "0.64866114", "0.64584786", "0.64093405", "0.62364835", "0.6224", "0.6223723", "0.6217032", "0.6179184", "0.6157227", "0.61414427", "0.6132971", "0.6130289", "0.6113223", "0.610663", "0.60877687", "0.60699916", "0.6052917", "0.6052265", ...
0.75481135
0
Split df into n_split blocks (by row).
def split_dataframe(df, n_split, axis=0): # TODO: implement axis logic if df.shape[0] < n_split: raise ValueError( 'n_split ({}) can\'t be greater than the number of rows ({}).'. format(n_split, df.shape[0])) elif n_split <= 0: raise ValueError('n_split ({}) can\'t be less than 0.'.format(n_split)) n = df.shape[0] // n_split splits = [] for i in range(n_split): start_i = i * n end_i = (i + 1) * n splits.append(df.iloc[start_i:end_i, :]) i = n * n_split if i < df.shape[0]: splits.append(df.ix[i:]) return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks", "def split_da...
[ "0.80189484", "0.7166912", "0.68179005", "0.67057437", "0.6623575", "0.6451587", "0.6366774", "0.6241725", "0.61946136", "0.6189358", "0.61609524", "0.6113555", "0.6094658", "0.6066726", "0.6060644", "0.6053875", "0.60479224", "0.5984508", "0.59791416", "0.59433573", "0.59427...
0.7756274
1
Normalize a DataFrame or Series.
def normalize_2d_or_1d(a, method, axis=None, rank_scale=10000, normalizing_mean=None, normalizing_std=None, normalizing_min=None, normalizing_max=None, normalizing_size=None): if rank(a) == 1: n_a = normalize_1d( a, method, rank_scale=rank_scale, normalizing_mean=normalizing_mean, normalizing_std=normalizing_std, normalizing_min=normalizing_min, normalizing_max=normalizing_max, normalizing_size=normalizing_size) if isinstance(a, Series): return Series(n_a, index=a.index) else: return n_a elif rank(a) == 2: if isinstance(a, DataFrame): if axis == 0 or axis == 1: return a.apply( normalize_1d, **{ 'method': method, 'rank_scale': rank_scale, 'normalizing_mean': normalizing_mean, 'normalizing_std': normalizing_std, 'normalizing_min': normalizing_min, 'normalizing_max': normalizing_max, 'normalizing_size': normalizing_size }, axis=axis) else: # Get normalizing size if normalizing_size is not None: size = normalizing_size else: size = a.values.size if method == '-0-': # Get normalizing mean if normalizing_mean is not None: mean = normalizing_mean else: mean = a.values.mean() # Get normalizing STD if normalizing_std is not None: std = normalizing_std else: std = a.values.std() # Normalize if std == 0: print( 'Not \'0-1\' normalizing (std = 0), but \'/ size\' normalizing ...' ) return a / size else: return (a - mean) / std elif method == '0-1': # Get normalizing min if normalizing_min is not None: min_ = normalizing_min else: min_ = a.values.min() # Get normalizing max if normalizing_max is not None: max_ = normalizing_max else: max_ = a.values.max() # Normalize if max_ - min_ == 0: print( 'Not \'0-1\' normalizing (max - min = 0), but \'/ size\' normalizing ...' ) return a / size else: return (a - min_) / (max_ - min_) elif method == 'rank': raise ValueError( 'Normalizing combination of \'rank\' & axis=\'all\' has not been implemented yet.' ) else: raise ValueError('Can\'t normalize >2 dimensional array-like.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self, df):\n return df / df.ix[0, :]", "def normalize_data(df):\n return df / df.ix[0,:]", "def normalize_data(df):\n return df / df.ix[0,:]", "def normalize_data(df):\r\n return df/df.ix[0,:]", "def normalize(self, df):\n return (df - df.mean()) / (df.max() - df.min())...
[ "0.80967456", "0.80151314", "0.80151314", "0.79464513", "0.78324926", "0.7578621", "0.75722766", "0.74043113", "0.7327699", "0.72870654", "0.7236674", "0.71289355", "0.70915204", "0.6990485", "0.69761825", "0.6927697", "0.6883224", "0.68788195", "0.6830347", "0.6810123", "0.6...
0.65927386
39
decorator to register a babel cli handler.
def babel_cli_handler(**options): def decorator(cls): """ decorates the given class and registers an instance of it into available babel cli handlers. :param BabelCLIHandlerBase cls: babel cli handler class. :returns: babel cli handler class. :rtype: BabelCLIHandlerBase """ instance = cls() babel_services.register_cli_handler(instance, **options) return cls return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decorator(cls):\n\n instance = cls()\n babel_services.register_cli_handler(instance, **options)\n\n return cls", "def __init__(self):\n\n super().__init__(BabelCLIHandlersEnum.INIT)", "def add_cmd_handler(self, cmd, func):\n len_args = len(inspect.getargspec(func)[0])\n ...
[ "0.83057785", "0.6101431", "0.6031415", "0.6014547", "0.5841859", "0.5641326", "0.5641326", "0.5525839", "0.55132365", "0.5494176", "0.5488199", "0.54627776", "0.5449929", "0.5416219", "0.53628474", "0.5356179", "0.5328154", "0.5316033", "0.5316033", "0.5316033", "0.5316033",...
0.8217108
1
decorates the given class and registers an instance of it into available babel cli handlers.
def decorator(cls): instance = cls() babel_services.register_cli_handler(instance, **options) return cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def babel_cli_handler(**options):\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available babel cli handlers.\n\n :param BabelCLIHandlerBase cls: babel cli handler class.\n\n :returns: babel cli handler class.\n :rty...
[ "0.7828587", "0.65119535", "0.64277583", "0.6273776", "0.62530273", "0.61781615", "0.61653596", "0.60024714", "0.60024714", "0.5967244", "0.57739705", "0.5762401", "0.5757489", "0.5746612", "0.56892204", "0.5668369", "0.56358856", "0.56313264", "0.5613844", "0.5579808", "0.55...
0.7280425
1
Generate a dict of security data for "initial" data.
def generate_object_data(self): object_dict = { 'content_type' : str(self.target_object._meta), 'object_id' : str(self.target_object._get_pk_val()), } return object_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_security_data(self):\n timestamp = int(time.time())\n security_dict = {\n 'content_type': str(self.target_object._meta),\n 'object_pk': str(self.target_object._get_pk_val()),\n 'timestamp': str(timestamp),\n 'security_hash': self.initial_securi...
[ "0.78241", "0.6145188", "0.6048201", "0.6043185", "0.6016839", "0.6012802", "0.5965179", "0.5900327", "0.58157974", "0.57789785", "0.5743514", "0.5737723", "0.5720123", "0.5668789", "0.563764", "0.5610418", "0.56098014", "0.5600891", "0.5594923", "0.5593127", "0.5536252", "...
0.5106261
81
Return a new (unsaved) shareditem object. Does not set any of the fields that would come from the Request object (i.e. ``user``).
def get_shared_object(self): if not self.is_valid(): raise ValueError("get_shared_object may only be called on valid forms") new = SharedItem( object_id = force_unicode(self.target_object._get_pk_val()), content_type = ContentType.objects.get_for_model(self.target_object), share_date = datetime.datetime.now(), ) return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shared_object(self):\n if not self.is_valid():\n raise ValueError(\"get_shared_object may only be called on valid forms\")\n\n new = SharedItem(\n content_type = ContentType.objects.get_for_model(self.target_object),\n object_id = force_unicode(self.target_...
[ "0.7739369", "0.64699394", "0.6046109", "0.5965286", "0.587094", "0.5790064", "0.5787484", "0.5744283", "0.5732211", "0.56390357", "0.5534467", "0.5512519", "0.5505798", "0.5461782", "0.53700083", "0.53373706", "0.53137255", "0.5306573", "0.5269624", "0.5267749", "0.52600294"...
0.772191
1
Generate a dict of security data for "initial" data.
def generate_object_data(self): object_dict = { 'content_type' : str(self.target_object._meta), 'object_id' : str(self.target_object._get_pk_val()), } return object_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_security_data(self):\n timestamp = int(time.time())\n security_dict = {\n 'content_type': str(self.target_object._meta),\n 'object_pk': str(self.target_object._get_pk_val()),\n 'timestamp': str(timestamp),\n 'security_hash': self.initial_securi...
[ "0.78241", "0.6145188", "0.6048201", "0.6043185", "0.6016839", "0.6012802", "0.5965179", "0.5900327", "0.58157974", "0.57789785", "0.5743514", "0.5737723", "0.5720123", "0.5668789", "0.563764", "0.5610418", "0.56098014", "0.5600891", "0.5594923", "0.5593127", "0.5536252", "...
0.5106261
82
Return a new (unsaved) shareditem object. Does not set any of the fields that would come from the Request object (i.e. ``user``).
def get_shared_object(self): if not self.is_valid(): raise ValueError("get_shared_object may only be called on valid forms") new = SharedItem( content_type = ContentType.objects.get_for_model(self.target_object), object_id = force_unicode(self.target_object._get_pk_val()), share_date = datetime.datetime.now(), ) return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shared_object(self):\n if not self.is_valid():\n raise ValueError(\"get_shared_object may only be called on valid forms\")\n\n new = SharedItem(\n object_id = force_unicode(self.target_object._get_pk_val()),\n content_type = ContentType.objects.get_for_mode...
[ "0.7722135", "0.64705783", "0.6047539", "0.59654415", "0.5869505", "0.57898974", "0.5785594", "0.57411844", "0.57283014", "0.5641411", "0.5534432", "0.5511725", "0.55054003", "0.5462437", "0.5369579", "0.5337859", "0.53142005", "0.5303614", "0.52669346", "0.52664846", "0.5259...
0.7739693
0
Test FeathrClient() get_online_features and batch_get can get data correctly.
def test_feathr_online_store_agg_features(): online_test_table = get_online_test_table_name("nycTaxiCITableMaven") test_workspace_dir = Path( __file__).parent.resolve() / "test_user_workspace" # os.chdir(test_workspace_dir) # The `feathr_runtime_location` was commented out in this config file, so feathr should use # Maven package as the dependency and `noop.jar` as the main file client: FeathrClient = basic_test_setup(os.path.join(test_workspace_dir, "feathr_config_maven.yaml")) location_id = TypedKey(key_column="DOLocationID", key_column_type=ValueType.INT32, description="location id in NYC", full_name="nyc_taxi.location_id") feature_query = FeatureQuery( feature_list=["f_location_avg_fare"], key=location_id) settings = ObservationSettings( observation_path="wasbs://public@azurefeathrstorage.blob.core.windows.net/sample_data/green_tripdata_2020-04.csv", event_timestamp_column="lpep_dropoff_datetime", timestamp_format="yyyy-MM-dd HH:mm:ss") now = datetime.now() # set output folder based on different runtime if client.spark_runtime == 'databricks': output_path = ''.join(['dbfs:/feathrazure_cijob','_', str(now.minute), '_', str(now.second), ".avro"]) else: output_path = ''.join(['abfss://feathrazuretest3fs@feathrazuretest3storage.dfs.core.windows.net/demo_data/output','_', str(now.minute), '_', str(now.second), ".avro"]) client.get_offline_features(observation_settings=settings, feature_query=feature_query, output_path=output_path) # assuming the job can successfully run; otherwise it will throw exception client.wait_job_to_finish(timeout_sec=Constants.SPARK_JOB_TIMEOUT_SECONDS) return backfill_time = BackfillTime(start=datetime( 2020, 5, 20), end=datetime(2020, 5, 20), step=timedelta(days=1)) redisSink = RedisSink(table_name=online_test_table) settings = MaterializationSettings("TestJobName", sinks=[redisSink], feature_names=[ "f_location_avg_fare", "f_location_max_fare"], backfill_time=backfill_time) client.materialize_features(settings) # just assume the job is successful without validating the actual result in Redis. Might need to consolidate # this part with the test_feathr_online_store test case client.wait_job_to_finish(timeout_sec=Constants.SPARK_JOB_TIMEOUT_SECONDS) res = client.get_online_features(online_test_table, '265', [ 'f_location_avg_fare', 'f_location_max_fare']) # just assume there are values. We don't hard code the values for now for testing # the correctness of the feature generation should be guaranteed by feathr runtime. # ID 239 and 265 are available in the `DOLocationID` column in this file: # https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2020-04.csv # View more details on this dataset: https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page assert len(res) == 2 assert res[0] != None assert res[1] != None res = client.multi_get_online_features(online_test_table, ['239', '265'], ['f_location_avg_fare', 'f_location_max_fare']) assert res['239'][0] != None assert res['239'][1] != None assert res['265'][0] != None assert res['265'][1] != None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get(self):\n simple_fields = {\n \"verbose\": False,\n \"min_core_neighbors\": self.min_core_neighbors,\n \"num_features\": 1,\n \"num_unpacked_features\": 2,\n \"num_distance_components\": 1,\n \"radius\": self.radius,\n ...
[ "0.6168632", "0.60641015", "0.60555446", "0.6000268", "0.59343565", "0.59123015", "0.58149856", "0.5790768", "0.5763791", "0.5751622", "0.5747959", "0.5700878", "0.56979954", "0.5681993", "0.566791", "0.5665153", "0.56642336", "0.56637", "0.56631935", "0.5621099", "0.5589105"...
0.66819775
0
Initializes the object to have a pronunciation dictionary available
def __init__(self): self._pronunciations = nltk.corpus.cmudict.dict()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()\n \"\"\"\n API Documentation for CMU dictionary corpus\n http://www.nltk.org/api/nltk.corpus.reader.html#module-nltk.corpus.reader.cmudict\n \"\"\"", "def __init__(self):\n super().__init__()\n self.m...
[ "0.7030081", "0.62855494", "0.62579364", "0.6253256", "0.6249114", "0.61850667", "0.6163011", "0.6113119", "0.6111343", "0.6096763", "0.6093766", "0.60932755", "0.60580236", "0.60405856", "0.60405856", "0.60081416", "0.60046226", "0.59790784", "0.59790784", "0.59735584", "0.5...
0.75347185
1
Returns the number of syllables in a word. If there's more than one pronunciation, take the shorter one. If there is no entry in the dictionary, return 1.
def num_syllables(self, word): # TODO: provide an implementation! word = word.lower() D = self._pronunciations #D = nltk.corpus.cmudict.dict() if(word not in D.keys()): #print word not in CMUDictionary return 1 #count stores no of syllables for each pronunciation of the word count = [] #for each pronunciation for x in D[word]: n = 0 #for each syllable for y in x: #if vowel sound if y[-1].isdigit(): n = n + 1 count.append(n) # return the pronunciation having least syllables return min(count) #return min([len([y for y in x if y[-1].isdigit()]) for x in D[word.lower()]])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_of_syllables(self, word):\n\n if word.lower() in self.cmu_dict:\n return len([phoneme for phoneme in self.cmu_dict[word.lower()][0]\n if phoneme[-1].isdigit()])\n # If word is unknown, assume 1 syllable/3 letters (average for English)\n else:\n ...
[ "0.8187434", "0.7941615", "0.7839003", "0.77053165", "0.76928765", "0.76834965", "0.7579669", "0.7408953", "0.724696", "0.722788", "0.7227621", "0.71323586", "0.70042217", "0.69125587", "0.68785167", "0.68268627", "0.6769647", "0.67252815", "0.67212397", "0.6622493", "0.65549...
0.8625879
0
Returns True if two words (represented as lowercase strings) rhyme, False otherwise.
def rhymes(self, a, b): D = self._pronunciations a = a.lower() b = b.lower() # print "----------------------------------" # print "Rhyming ",a,b if a in D.keys() and b in D.keys(): a = D[a] #print a b = D[b] #print b #stores syllables after the first consonant sound last_syl_a = [] last_syl_b = [] # for each pronunciation of the word for y in a: syl = [] pos = 0 for i in range(0, len(y)): #if vowel if y[i][-1].isdigit(): pos = i break # append all syllables from first vowel for i in range(pos, len(y)): syl.append(y[i]) last_syl_a.append(syl) # print(last_syl_a) # for each pronunciation of the word for y in b: syl = [] pos = 0 for i in range(0, len(y)): # if vowel if y[i][-1].isdigit(): pos = i break # append all syllables after first consonant sound for i in range(pos, len(y)): syl.append(y[i]) last_syl_b.append(syl) # print(last_syl_b) if any(i in last_syl_a for i in last_syl_b): # print "Rhyming - Yes" return True else: # print "Checking if Shorter word is suffix of Longer word's pronunciation" if len(last_syl_a[0]) > len(last_syl_b[0]): big = last_syl_a small = last_syl_b else: big = last_syl_b small = last_syl_a for i in big: for j in small: count = 0 for k in range(0, len(j)): if j[-(k + 1)] == i[-(k + 1)]: count = count + 1 if count == len(j) and count > 0: # print "Rhyming - yes", i,j return True return False else: # Either or Both words not in CMU Dictionary return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def are_words_synonym(self, word1, word2):\n return self.get_intersection((word1, word2))", "def is_lexical(word_i, word_j):\n if word_i.isalpha() and word_j.isalpha():\n return True\n return False", "def exactMatch(self, mention):\n w1 = self.allWords()\n w2 = mention.all...
[ "0.71012914", "0.690699", "0.6702049", "0.6633581", "0.6579332", "0.6557945", "0.6547502", "0.6540323", "0.6418574", "0.6399188", "0.6355479", "0.6346227", "0.6344378", "0.6340427", "0.6335884", "0.6317267", "0.6293736", "0.62759924", "0.6260704", "0.62522906", "0.6198525", ...
0.60315377
31
Takes text where lines are separated by newline characters. Returns True if the text is a limerick, False otherwise. A limerick is defined as a poem with the form AABBA, where the A lines rhyme with each other, the B lines rhyme with each other, and the A lines do not rhyme with the B lines.
def is_limerick(self, text): # TODO: provide an implementation! text = text.lower() p = [] p = text.split('\n') p = [i.strip(' ') for i in p] p = list(filter(None, p)) # all limericks must have 5 lines AABBA if len(p) != 5: return False #words list stores the list of words in each line of the limerick words = [] for i in range(0, 5): p[i] = p[i].strip(".,:;?!") temp = [] T = p[i] temp = self.apostrophe_tokenize(T) words.append(temp) count = [] #print len(words) for i in range(0, 5): #print words[i] n = 0 for j in words[i]: n = n + self.num_syllables(j) count.append(n) # check if any line has fewer than 4 syllables for i in count: if i < 4: return False A1 = count[0] A2 = count[1] B1 = count[2] B2 = count[3] A3 = count[4] # check if B1 has fewer syllables than A1, A2 and A3 if B1 > A1 or B1 > A2 or B1 > A3: return False # check if B2 has fewer syllables than A1, A2 and A3 if B2 > A1 or B2 > A2 or B2 > A3: return False # check if the no of syllables in B1 and B2 differs by more than 2 if abs(B1 - B2) > 2: return False # check if any two A's differ in no of syllables by more than 2 if abs(A1 - A2) > 2 or abs(A1 - A3) > 2 or abs(A2 - A3) > 2: return False #check if A1, A2 and A3 rhyme with each other if self.rhymes(words[0][-1], words[1][-1]) and self.rhymes(words[0][-1], words[4][-1]) and self.rhymes(words[1][-1], words[4][-1]): #check if B1 and B2 rhyme with each other if self.rhymes(words[2][-1],words[3][-1]): #check if A and B do not rhyme if (not self.rhymes(words[0][-1], words[2][-1]) and not self.rhymes(words[0][-1], words[3][-1]) and not self.rhymes(words[1][-1], words[2][-1]) and not self.rhymes(words[1][-1], words[3][-1]) and not self.rhymes(words[4][-1], words[2][-1]) and not self.rhymes(words[4][-1], words[3][-1]) ): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_limerick(self, text):\n\n return False", "def is_limerick(self, text):\n \n sentences = text.splitlines()\n \n #remove blank setences\n sentences = [sentence for sentence in sentences if sentence.strip()] \n \n if len(sentences) != 5 : return False ...
[ "0.7332721", "0.7118218", "0.6140549", "0.58115304", "0.5777548", "0.5629705", "0.5620639", "0.5535499", "0.5423312", "0.5394802", "0.5378505", "0.53308886", "0.5302663", "0.5278468", "0.52720255", "0.5223611", "0.5223611", "0.51562977", "0.51394373", "0.5101829", "0.50781065...
0.7593807
0
Calculates sky background temperature for a given Galactic longitude (gl), Galactic latitude (gb), and frequency (freq in MHz). Coordinates are in degrees. Assuming spectral index of "index", default is 2.55 Return value is in K If frequency array 'freqs' is given, then Tsky is calculated for each frequency in the array, and returned value is list of Tsky's
def tsky(gl, gb, freq, index, freqs=None): # reading the table nsky=np.zeros((90, 180), dtype=float) for ii in xrange(90): for jj in xrange(180): pos=(ii*180+jj)*5 nsky[ii,jj]=float(haslam_table[pos:pos+5]) # Convert to standard l,b b = int(gb + 90.5) if b >= 180: b = 179 l = int(gl + 0.5) if gl >= 360: l = 0 l = int((l / 4)) if freqs == None: tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) return tsky else: temps=[] for freq in freqs: tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) temps.append(tsky) return temps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsky_range(gl, gb, f1, f2, index, freqs=None):\n\n\t# reading the table\n\tnsky=np.zeros((90, 180), dtype=float)\n\tfor ii in xrange(90):\n\t\tfor jj in xrange(180):\n\t\t\tpos=(ii*180+jj)*5\n\t\t\tnsky[ii,jj]=float(haslam_table[pos:pos+5])\n\n\t# Convert to standard l,b\n\tb = int(gb + 90.5)\n\tif b >= 180: b...
[ "0.7263583", "0.55189", "0.5414309", "0.52412045", "0.5169021", "0.51627105", "0.51417047", "0.51059294", "0.5028249", "0.49997443", "0.49332586", "0.4930508", "0.49028358", "0.48790222", "0.48755518", "0.4864208", "0.4823804", "0.4810659", "0.4780731", "0.4776312", "0.475257...
0.82942164
0
Calculates average sky background temperature for a given Galactic longitude (gl), Galactic latitude (gb), and between frequencies f1 and f2 (in MHz). Coordinates are in degrees. Assuming spectral index of "index", default is 2.55 Return value is in K If frequency array 'freqs' is given, then avergae Tsky is calculated for each frequency range f0f1, f1f2,... in the array, and returned value is list of average Tsky's. The size of the returned array is less by 1 than the size of freqs.
def tsky_range(gl, gb, f1, f2, index, freqs=None): # reading the table nsky=np.zeros((90, 180), dtype=float) for ii in xrange(90): for jj in xrange(180): pos=(ii*180+jj)*5 nsky[ii,jj]=float(haslam_table[pos:pos+5]) # Convert to standard l,b b = int(gb + 90.5) if b >= 180: b = 179 l = int(gl + 0.5) if gl >= 360: l = 0 l = int((l / 4)) if freqs == None: tot=0 for ii in xrange(101): freq = f1 + ii*(f2-f1)/100. tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) tot += tsky tot /= 100. return tot else: temps=[] for ff in xrange(1, len(freqs)): tot = 0 for ii in xrange(101): freq = freqs[ff-1] + ii*(freqs[ff]-freqs[ff-1])/100. tsky = 2.7 + nsky[l,b] * (freq/408.0)**(index) tot += tsky tot /= 100. temps.append(tot) return temps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tsky(gl, gb, freq, index, freqs=None):\n\n\t# reading the table\n\tnsky=np.zeros((90, 180), dtype=float)\n\tfor ii in xrange(90):\n\t\tfor jj in xrange(180):\n\t\t\tpos=(ii*180+jj)*5\n\t\t\tnsky[ii,jj]=float(haslam_table[pos:pos+5])\n\n\t# Convert to standard l,b\n\tb = int(gb + 90.5)\n\tif b >= 180: b = 179\n...
[ "0.7237736", "0.5336351", "0.53191733", "0.5311113", "0.52894306", "0.5208346", "0.5117886", "0.5105985", "0.50825256", "0.5041957", "0.4994894", "0.49861515", "0.49033117", "0.48573145", "0.48243278", "0.48106575", "0.4807822", "0.47676003", "0.4739023", "0.47271356", "0.471...
0.71738416
1
Converts Equatorial coordinates to Galactic coordinates
def eq2gal(ra, dec): gal=ephem.Galactic(ephem.Equatorial(ra, dec)) gl=180.0*gal.long.real/math.pi gb=180.0*gal.lat.real/math.pi return (gl, gb)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EquatorialToGalactic(Equatorial):\n \n # ra, dec, s => l,b,s\n ra = Equatorial[:,0]\n dec = Equatorial[:,1]\n s = Equatorial[:,2]\n cd = np.cos(dec)\n sd = np.sin(dec)\n b = np.arcsin(np.sin(decgp)*sd+np.cos(decgp)*cd*np.cos(ra-ragp))\n l = lcp-np.arctan...
[ "0.7226109", "0.69753695", "0.62914294", "0.6262002", "0.6209722", "0.60955137", "0.60862833", "0.59612167", "0.59590286", "0.5948453", "0.5936651", "0.59322864", "0.58702904", "0.58493227", "0.5813463", "0.57856524", "0.5758909", "0.57558006", "0.5741679", "0.5709453", "0.56...
0.67290187
2
Compute inverse (using extended Euclidean algorithm
def compute_inverse(in1, in2): aL = [in1] bL = [in2] tL = [0] t = 1 sL = [1] s = 0 q = math.floor((aL[0] / bL[0])) r = (aL[0] - (q * bL[0])) while r > 0: temp = (tL[0] - (q * bL[0])) tL[0] = t t = temp temp = (sL[0] - (q * s)) sL[0] = s s = temp aL[0] = bL[0] bL[0] = r q = math.floor(aL[0] / bL[0]) r = (aL[0] - (q * bL[0])) inverse = s % in2 return inverse
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self, x, y):", "def erfcinv(a):", "def modular_inverse(e, z):\n g, x, y = extended_euclidean_algorithm(e, z)\n if g != 1: raise Exception('Modular inverse does not exist')\n else: return x % z", "def inverse_in_zn(g, n):\n assert(n >= g)\n x, y, d = extended_euclidean(n, g)\n if...
[ "0.7188857", "0.71758795", "0.70527136", "0.70293945", "0.68511194", "0.6820181", "0.6751133", "0.67222536", "0.6711877", "0.6700519", "0.6607881", "0.6585525", "0.65800756", "0.65740085", "0.65705127", "0.6567452", "0.6539267", "0.6527102", "0.64968294", "0.64808273", "0.647...
0.70715195
2
Create hash from document by sha1 algorithm.
def sha_hash(file_name: str): BLOCKSIZE = 65536 line = '' # format one line for hash with open(file_name, 'rb') as afile: buf = afile.read(BLOCKSIZE) # read each line of doc while len(buf) > 0: line += buf.decode('utf-8') buf = afile.read(BLOCKSIZE) hex = "0x" + sha1(line.encode()) # create sha1 hash return int(hex, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sha1hex(doc):\n doc_id = doc.pop('_id',None)\n doc_rev = doc.get('_rev',None)\n doc_string = str(doc)\n\n if doc_id is not None:\n doc['_id'] = doc_id\n\n if doc_rev is not None:\n doc['_rev'] = doc_rev\n\n return hashlib.sha1(doc_string).hexdigest().upper()", "def SHA1(self) ...
[ "0.77072245", "0.7234007", "0.69447297", "0.6892159", "0.6755166", "0.6729288", "0.67109865", "0.6634047", "0.6630094", "0.65769774", "0.6461438", "0.6441741", "0.6404586", "0.63657", "0.6312148", "0.6248096", "0.62417644", "0.6229102", "0.6181597", "0.617851", "0.61565894", ...
0.6487641
10
Create signature and save it into data/signature.txt
def sign(file_name: str) -> None: print("Signing the file...") file_name = os.path.join('data', file_name) file1 = open("data/key.txt", "r") file2 = open("data/secret_key.txt", "r") p = int(file1.readline().rstrip()) q = int(file1.readline().rstrip()) g = int(file1.readline().rstrip()) h = int(file1.readline().rstrip()) a = int(file2.readline().rstrip()) loop = True while loop: r = random.randint(1, q - 1) c1 = square_multiply(g, r, p) c1 = c1 % q c2 = sha_hash(file_name) + (a * c1) rinverse = compute_inverse(r, q) c2 = (c2 * rinverse) % q if c1 != 0 and c2 != 0: loop = False print('hash = ', sha_hash(file_name)) print('c1 = ', c1) print('c2 = ', c2) file = open("data/signature.txt", "w") file.write(str(c1)) file.write("\n") file.write(str(c2)) print("cipher stored at signature.txt")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_signature(self):\n self.db_file.write(b\"\\x4b\\x57\\x44\\x42\\x00\\x01\\x03\\x01\")", "def send_signature(self, update, context):\n msg_file = 'signature_msg.txt'\n self.send_textfile(msg_file, update, context)", "def GenSampleSignature(text):\r\n demo_keypair = ('RSA.mVgY8RN6U...
[ "0.7170983", "0.6586112", "0.65388995", "0.6313484", "0.62940675", "0.62849694", "0.61516637", "0.61021864", "0.6018372", "0.59855306", "0.59176886", "0.5898463", "0.5864457", "0.58549124", "0.5852976", "0.5838192", "0.5816089", "0.58031625", "0.5802103", "0.5800313", "0.5731...
0.62543976
6
Verification process of signature for file name document
def verification(file_name: str) -> None: print("Verification process...") file_name = os.path.join('data', file_name) file1 = open("data/key.txt", "r") file2 = open("data/signature.txt", "r") p = int(file1.readline().rstrip()) q = int(file1.readline().rstrip()) g = int(file1.readline().rstrip()) h = int(file1.readline().rstrip()) c1 = int(file2.readline().rstrip()) c2 = int(file2.readline().rstrip()) print('c1 = ', c1) print('c2 = ', c2) t1 = sha_hash(file_name) print('hash = ', t1) inverseC2 = compute_inverse(c2, q) t1 = (t1 * inverseC2) % q t2 = compute_inverse(c2, q) t2 = (t2 * c1) % q valid1 = square_multiply(g, t1, p) valid2 = square_multiply(h, t2, p) valid = ((valid1 * valid2) % p) % q if valid == c1: print("Valid signature") else: print("Invalid signature")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_signature_dialog():\n signature_name = input(\"Enter signature identity: \")\n file_path = input(\"Enter file path: \")\n user = input(\"Enter username: \")\n\n if not(os.path.exists(user)):\n raise Exception(ERRORS.NOT_FOUND_USER)\n if not(os.path.exists(f\"{signature_name}.sig\")...
[ "0.66871303", "0.6619433", "0.6604733", "0.65458876", "0.6505863", "0.6505863", "0.63899714", "0.63860995", "0.63552636", "0.6281927", "0.61876994", "0.61288184", "0.6126398", "0.6071543", "0.60413754", "0.6026036", "0.60181516", "0.5999449", "0.59889215", "0.5982611", "0.596...
0.67912775
0
return path for file
def dataPath(self): return fl.File( self._path + '/renderLayerData.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def file_path(self):\n return self.lib.file_path", "def file_path(self):\n ...
[ "0.8236138", "0.8024989", "0.80158114", "0.79347897", "0.7926399", "0.79031473", "0.786907", "0.78308654", "0.7762692", "0.77185005", "0.76789683", "0.7646001", "0.7616632", "0.76083356", "0.75883675", "0.7509448", "0.74975693", "0.7472752", "0.7454609", "0.7448224", "0.74452...
0.0
-1
return path for lights file
def lightPath(self): return mfl.mayaFile( self._path + '/lights.ma' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )", "def darkpath(cam):\n return os.path.join(BASEPATH, cam + \"_dark\")", "def path_for(filename):\n if settings.value(Key.Theme) == Themes.Light.value:\n return (IMAGES_PATH / Themes.Light.value / filename).as_posix()\n ...
[ "0.82484734", "0.6809375", "0.66981214", "0.6469427", "0.6256557", "0.6256362", "0.62507707", "0.6244421", "0.62019056", "0.61183137", "0.61059153", "0.6080485", "0.6063016", "0.60597664", "0.60492027", "0.603537", "0.6034855", "0.602494", "0.5999086", "0.59892964", "0.597265...
0.820123
1
return the path for the shader file
def shaderPath(self): return mfl.mayaFile( self._path + '/shaders.ma' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFragmentShader(self):\n return self.fshader", "def getCompiled(self):\n if self.isCompiled():\n return self.shader\n else:\n raise Exception(\"el shader no ha sido compilado aun\")", "def dataShader(self):\n\t\treturn self._shader", "def location( self, shade...
[ "0.6595272", "0.6361699", "0.634404", "0.63148177", "0.6163704", "0.614001", "0.610097", "0.6075785", "0.6053845", "0.5924668", "0.58730847", "0.5861786", "0.58363956", "0.58298504", "0.58051777", "0.579176", "0.578321", "0.5744046", "0.5737306", "0.5688931", "0.56700575", ...
0.86577046
0
return the path for the aovs file
def aovsPath(self): return fl.File( self._path + '/aovs.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_path(self):\n return self._obs_file()", "def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \".csv\")", "def file_path(self) -> global___Expression:", "def file_pa...
[ "0.7047884", "0.6801188", "0.67156875", "0.6595521", "0.6526361", "0.6481086", "0.64795697", "0.6452638", "0.6423643", "0.6420651", "0.64015955", "0.6397468", "0.63639444", "0.63354146", "0.63254094", "0.630958", "0.6307452", "0.62829566", "0.6233461", "0.6226466", "0.622618"...
0.87796766
0
return the path for the aovs file
def lightLinkPath(self): return fl.File( self._path + '/lights.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aovsPath(self):\n\t\treturn fl.File( self._path + '/aovs.data' )", "def file_path(self):\n return self._obs_file()", "def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \...
[ "0.87794113", "0.70491135", "0.6802213", "0.6716474", "0.65970516", "0.65285367", "0.6482856", "0.6481411", "0.64541376", "0.6425376", "0.6422676", "0.64034206", "0.6400361", "0.63644063", "0.6337509", "0.63264626", "0.6311372", "0.6308107", "0.62853396", "0.6235967", "0.6228...
0.0
-1
return the path for the masterLayer data
def masterPath(self): return fl.File( self._path + '/master.data' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataPath(self):\n return ''", "def data_path(self):\n raise NotImplementedError", "def dataPath(self):\n\t\treturn fl.File( self._path + '/renderLayerData.data' )", "def path(self):\n return self._data_file", "def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep", "de...
[ "0.6769875", "0.6692991", "0.66356105", "0.642204", "0.63508654", "0.6319566", "0.63127893", "0.6197516", "0.6166026", "0.61191916", "0.6109279", "0.6093193", "0.60923404", "0.60806435", "0.60315734", "0.6025355", "0.60208064", "0.60151654", "0.6008396", "0.6005537", "0.59993...
0.8091495
0
export information of scene to path
def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True): if exdata: self.exportData() if exshaders: self.exportShaders() if exlights: self.exportLights() if exaovs: self.exportAovs() if exmaster: self.exportMasterLayerSettings()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def menu_save_scene(self):\n file_name = QtGui.QFileDialog().getSaveFileName(self, \"Save Scene to File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"wb\") as f:\n pickle.dump(self.scene, f, pickle.HIGHEST_PROTOCOL)", "def export(self, savepath):\n logger.debug(f\"Export...
[ "0.7110283", "0.6522757", "0.6518129", "0.6316745", "0.62322736", "0.6161624", "0.6119347", "0.6053778", "0.6050798", "0.6048917", "0.6039331", "0.60387284", "0.603646", "0.6003288", "0.59928995", "0.59725803", "0.5968473", "0.5963264", "0.593944", "0.5870665", "0.5853435", ...
0.0
-1
export master layer settings so we can re apply it
def exportMasterLayerSettings(self): master = rlayer.RenderLayer( 'defaultRenderLayer' ) master.makeCurrent() masterData = {} nodes = ['defaultArnoldRenderOptions','defaultResolution','defaultRenderGlobals'] mnNodes =[ mn.Node( n ) for n in nodes ] for n in mnNodes: for a in n.listAttr( se = True, v = True, w = True ): try: masterData[a] = a.v except: continue pickle.dump( masterData, open( self.masterPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importMasterSettings(self):\n\t\tpickleData = pickle.load( open( self.masterPath.path, \"rb\" ) )\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tfor a in pickleData.keys():\n\t\t\ttry:\n\t\t\t\ta.v = pickleData[a]\n\t\t\texcept:\n\t\t\t\tcontinue", "def restore_export...
[ "0.72975564", "0.5729428", "0.56773335", "0.5671888", "0.5636887", "0.5510333", "0.5465108", "0.54372466", "0.54338557", "0.5410532", "0.53921604", "0.53159", "0.52870184", "0.52698493", "0.5225601", "0.5222329", "0.5184534", "0.5180232", "0.5179325", "0.517133", "0.516891", ...
0.8537631
0
export data from scene, objects overrides in renderlayers.. etc
def exportData(self): lays = rlayer.renderlayers() data = {} for l in lays: if l.name == 'defaultRenderLayer': continue data[l.name] = {'objects':l.objects, # OBJECTS IN LAYER 'values' :l.overridesWithValues, # OVERRIDED ATTRIBUTES ONLY CHANGED VALUES 'conns' :l.overridesWithConnections[0], # OVERRIDED ATTRIBUTES CHANGED CONNECTIONS 'shader' :l.overridedShader # OVERRIDE RENDERLAYER SHADER } pickle.dump( data, open( self.dataPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_scene_data(self, scene, data, tmp_dir):\n pass", "def writeObject(self,view,renderer):\n\n if not view.Source:\n return \"\"\n\n # point light hook\n proxy = getattr(view.Source,\"Proxy\",None)\n if getattr(proxy,\"type\",None) == \"PointLight\":\n ...
[ "0.63344496", "0.62675095", "0.6227879", "0.6224553", "0.6189659", "0.6154672", "0.6018009", "0.59942466", "0.5959543", "0.5952904", "0.591055", "0.5901034", "0.588326", "0.5835366", "0.58250904", "0.5816034", "0.57584465", "0.57536954", "0.5716126", "0.57042253", "0.5703749"...
0.73334426
0
export lights from scene
def exportLights(self): #TODO! REMOVE CONSTRAINS lights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 ) mc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' ) litsToExport = [] for li in lights: finalLi = li.split( '|' ) if len(finalLi) == 1: litsToExport.append( finalLi[0] ) else: litsToExport.append( finalLi[1] ) if litsToExport: mc.select( litsToExport, r=1, ne=1 ) mc.file( self.lightPath.path, op="v=0", typ="mayaAscii", pr=1, es=1 ) #export Light Linking self.exportLightLinking()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_lights(lamps, file, scene, global_matrix, tab_write):\n\n from .render import write_matrix, tab_write\n\n # Incremented after each lamp export to declare its target\n # currently used for Fresnel diffuse shader as their slope vector:\n global exported_lights_count\n # Get all lamps and ke...
[ "0.70548475", "0.6534689", "0.6220881", "0.6124191", "0.6103418", "0.5953071", "0.59508586", "0.592486", "0.5756125", "0.5698417", "0.5659417", "0.56495863", "0.55890954", "0.55873734", "0.5572616", "0.55421257", "0.54985136", "0.54800266", "0.54774386", "0.54722345", "0.5455...
0.7741908
0
export all the lightlinking in the scene
def exportLightLinking(self): lights = [a for a in mc.ls( typ = ['light','aiAreaLight'] ) if not 'eye' in a] allShapes = [s for s in mc.ls( type = 'geometryShape', ni = 1) if not (mc.objectType( s ) in ( 'aiAreaLight','aiSkyDomeLight' ))] litLinks = {} for l in lights: lightLinkShapes = mc.lightlink( query=True, light=l ,shp=1,t=0,set=0,h=0) litLinks[l] = list( set( allShapes ) - set( lightLinkShapes ) )#SHAPES WITH NO LINK TO THIS LIGHT pickle.dump( litLinks, open( self.lightLinkPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exportLights(self):\n\t\t#TODO! REMOVE CONSTRAINS\n\t\tlights = mc.ls( typ=['light','aiAreaLight','aiSkyDomeLight','aiVolumeScattering','aiSky'], l=1 )\n\t\tmc.editRenderLayerGlobals( currentRenderLayer = 'defaultRenderLayer' )\n\t\tlitsToExport = []\n\t\tfor li in lights:\n\t\t\tfinalLi = li.split( '|' )\n\t\...
[ "0.780936", "0.6795471", "0.5878799", "0.5863147", "0.5783751", "0.57724273", "0.5699046", "0.5667319", "0.5579326", "0.5527161", "0.5456355", "0.54548293", "0.5452703", "0.5451087", "0.54337513", "0.5404285", "0.5384086", "0.5363154", "0.53607535", "0.53221077", "0.53212357"...
0.8114935
0
export aovs from scene
def exportAovs(self): aovs = mc.ls( typ = 'aiAOV' ) aovData = {} for a in aovs: aovData[a] = {} aovData[a]['enabled'] = mc.getAttr( a + '.enabled' ) aovData[a]['name'] = mc.getAttr( a + '.name' ) aovData[a]['type'] = mc.getAttr( a + '.type' ) pickle.dump( aovData, open( self.aovsPath.path, "wb" ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def menu_func(self, context):\n op = self.layout.operator(OP_ogre_export.bl_idname, text=\"Ogre3D (.scene and .mesh)\")\n return op", "def create_scene(self):\n \n self.scene=soya.World()", "def import_scene(file_path):\n\n pass", "def exports():", "def menu_save_scene(self):\n fi...
[ "0.6807009", "0.61583364", "0.60997474", "0.6089782", "0.5898155", "0.5882427", "0.5869277", "0.58672017", "0.5798506", "0.56990576", "0.5693678", "0.56637156", "0.5612492", "0.5589226", "0.553898", "0.5532267", "0.55292517", "0.55200547", "0.5514314", "0.5509473", "0.5498411...
0.6433381
1
import all data into scene
def importAll(self, imdata = True, imlights = True, imaovs = True, imshaders = True, immaster = True, asset = '', searchAndReplace = ['',''] ): if immaster: self.importMasterSettings() if imlights and self.lightPath.exists: self.importLights( asset, searchAndReplace ) if imaovs and self.aovsPath.exists: self.importAovs() if imshaders and self.shaderPath.exists: self.importShaders() if imdata and self.dataPath.exists: self.importData( asset, searchAndReplace )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self):", "def import_scene(file_path):\n\n pass", "def load_data(self) -> None:", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_pa...
[ "0.6907678", "0.67906314", "0.6576967", "0.6550817", "0.65185374", "0.62975734", "0.6248734", "0.62315434", "0.6214042", "0.6175726", "0.61386037", "0.61351347", "0.61297685", "0.59827185", "0.5970255", "0.5943906", "0.5901872", "0.589308", "0.58869416", "0.5873126", "0.58356...
0.5926135
16
import lights in scene
def importLights(self, asset = '', searchAndReplace = ['',''] ): if self.lightPath.exists: self.lightPath.imp() if self.lightLinkPath.exists: self.importLightLinking( asset, searchAndReplace )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setD...
[ "0.7125975", "0.6562322", "0.65274394", "0.6518946", "0.6397606", "0.63027537", "0.62996095", "0.6106556", "0.6032271", "0.6012362", "0.59544396", "0.5917386", "0.59018606", "0.58937645", "0.58840954", "0.58633345", "0.5855364", "0.577707", "0.57750875", "0.5726249", "0.56992...
0.65937054
1
import light linking to lights
def importLightLinking(self, asset = '', searchAndReplace = ['',''] ): LayersInfo = pickle.load( open( self.lightLinkPath.path, "rb") ) mc.refresh( su = 1 ) if not asset == '': LayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace ) for l in LayersInfo.keys(): objsToBreakLink = [] for link in LayersInfo[l]: if mc.objExists( link ): objsToBreakLink.append( link ) mc.lightlink( b = True, light = l, o = objsToBreakLink ) mc.refresh( su = 0 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\...
[ "0.7286741", "0.6968058", "0.667978", "0.6516438", "0.6463352", "0.6461872", "0.6437315", "0.6400645", "0.63854384", "0.63444823", "0.6283221", "0.62622285", "0.6234883", "0.61583436", "0.61548394", "0.614277", "0.6137061", "0.61298877", "0.61078626", "0.61018175", "0.6089106...
0.70111126
1
filter light linking data for the specific asset
def filterLightLinksData(self, LayersInfo , asset, sAr = ['',''] ): lightData = [(a.replace( sAr[0], sAr[1] ),LayersInfo[a].replace( sAr[0], sAr[1] )) for a in LayersInfo.keys() if asset in a] return dict( lightData )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\...
[ "0.5837777", "0.5805665", "0.56016874", "0.53269315", "0.53257084", "0.52495986", "0.52477026", "0.5235335", "0.51835656", "0.51263887", "0.5116933", "0.51134336", "0.50554407", "0.505449", "0.5049599", "0.5036466", "0.5035692", "0.50177574", "0.50123763", "0.49872875", "0.49...
0.7130668
0
import aovs into scene
def importAovs(self): LayersInfo = pickle.load( open( self.aovsPath.path, "rb") ) mc.refresh( su = 1 ) for ao in LayersInfo.keys(): aov.create( ao, LayersInfo[ao]['name'], LayersInfo[ao]['type'], LayersInfo[ao]['enabled'] ) mc.refresh( su = 0 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_scene(file_path):\n\n pass", "def importBaseScene(self):\n logger.debug(\"Func: importBaseScene\")\n relSceneFile = self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"RelativePath\"]\n absSceneFile = os.path.join(self.projectDir, relSceneFile)\n if os.pa...
[ "0.7213218", "0.63318026", "0.59511834", "0.5773653", "0.5763489", "0.5640468", "0.5608729", "0.5585865", "0.55745476", "0.5565132", "0.5552705", "0.5474964", "0.5472537", "0.5469946", "0.5382948", "0.5356013", "0.5355673", "0.5343927", "0.5336113", "0.53249675", "0.5310616",...
0.7212844
1
import shaders into scene
def importShaders(self): if self.shaderPath.exists: self.shaderPath.imp()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def init_shaders():\n global shaders\n\n vertex_shader = glCreateShader(GL_VERTEX_SHADER)\n glShaderSource(vertex_shader,open('shaders/vs-phong-interp.c','r').read())\n glCompileShader(vertex_shader)\n re...
[ "0.7801303", "0.74852675", "0.7035145", "0.69157875", "0.68511426", "0.68024266", "0.67894316", "0.65204686", "0.64844674", "0.64807636", "0.6422864", "0.6374863", "0.6357216", "0.6319503", "0.61505246", "0.61424196", "0.6106217", "0.60896313", "0.60892934", "0.60849136", "0....
0.7719651
1