function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def _decode_path(self, path): """ Decode path only if unicode string was passed to this emitter. """ if isinstance(self.watch.path, bytes): return path return unicode_paths.decode(path)
wandb/client
[ 5607, 445, 5607, 725, 1490334383 ]
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): InotifyEmitter.__init__(self, event_queue, watch, timeout)
wandb/client
[ 5607, 445, 5607, 725, 1490334383 ]
def queue_events(self, timeout, events=True): InotifyEmitter.queue_events(self, timeout, full_events=events)
wandb/client
[ 5607, 445, 5607, 725, 1490334383 ]
def _handle_remotes(registry_path, remote_file, output): registry = RemoteRegistry(registry_path, output) new_registry = RemoteRegistry(remote_file, output) registry.define_remotes(new_registry.remotes)
lasote/conan
[ 2, 3, 2, 2, 1448989002 ]
def _process_git_repo(repo_url, client_cache, output, runner, tmp_folder): output.info("Trying to clone repo %s" % repo_url) with tools.chdir(tmp_folder): runner('git clone "%s" config' % repo_url, output=output) tmp_folder = os.path.join(tmp_folder, "config") _process_folder(tmp_folder, client_cache, output)
lasote/conan
[ 2, 3, 2, 2, 1448989002 ]
def _handle_conan_conf(current_conan_conf, new_conan_conf_path): current_conan_conf.read(new_conan_conf_path) with open(current_conan_conf.filename, "w") as f: current_conan_conf.write(f)
lasote/conan
[ 2, 3, 2, 2, 1448989002 ]
def _process_download(item, client_cache, output, tmp_folder): output.info("Trying to download %s" % item) zippath = os.path.join(tmp_folder, "config.zip") tools.download(item, zippath, out=output) _process_zip_file(zippath, client_cache, output, tmp_folder, remove=True)
lasote/conan
[ 2, 3, 2, 2, 1448989002 ]
def connect(self): self.user = self.scope["user"] headers = dict(self.scope["headers"]) remote_addr = headers[b"x-real-ip"].decode() if b"x-real-ip" in headers else self.scope["client"][0] if (not self.user.is_authenticated or self.user.is_restricted) and remote_addr not in settings.INTERNAL_IPS: self.connected = False self.close() return self.connected = True data = self._serialize(user=self.user) self.accept() self.send_json(data)
tjcsl/ion
[ 89, 57, 89, 90, 1408078781 ]
def bus_update(self, event): if not self.connected: return self.send_json(event["data"])
tjcsl/ion
[ 89, 57, 89, 90, 1408078781 ]
def _format_class(module, name): """ Format a line for the config. """ return u'{0}/{1}'.format(module, name)
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __init__(self, filename=None): """ Initialize new config. """ self._exts = list() # NOTE: Must keep order! if filename: self.load_from_file(filename)
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def extensions(self): """ All extensions from config. """ for mod, cls in self._exts: yield mod, cls
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def nfa_to_dfa(old_machine, debug = None): """ Given a nondeterministic Machine, return a new equivalent Machine which is deterministic. """ # We build a new machine whose states correspond to sets of states # in the old machine. Initially we add a new state corresponding to # the epsilon-closure of each initial old state. Then we give transitions # to each new state which are the union of all transitions out of any # of the corresponding old states. The new state reached on a given # character is the one corresponding to the set of states reachable # on that character from any of the old states. As new combinations of # old states are created, new states are added as needed until closure # is reached. new_machine = Machines.FastMachine() state_map = StateMap(new_machine) # Seed the process using the initial states of the old machine. # Make the corresponding new states into initial states of the new # machine with the same names. for (key, old_state) in old_machine.initial_states.items(): new_state = state_map.old_to_new(epsilon_closure(old_state)) new_machine.make_initial_state(key, new_state) # Tricky bit here: we add things to the end of this list while we're # iterating over it. The iteration stops when closure is achieved. for new_state in new_machine.states: transitions = TransitionMap() for old_state in state_map.new_to_old(new_state).keys(): for event, old_target_states in old_state.transitions.items(): if event and old_target_states: transitions.add_set(event, set_epsilon_closure(old_target_states)) for event, old_states in transitions.items(): new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states)) if debug: debug.write("\n===== State Mapping =====\n") state_map.dump(debug) return new_machine
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def epsilon_closure(state): """ Return the set of states reachable from the given state by epsilon moves. """ # Cache the result result = state.epsilon_closure if result is None: result = {} state.epsilon_closure = result add_to_epsilon_closure(result, state) return result
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __init__(self, new_machine): self.new_machine = new_machine self.old_to_new_dict = {} self.new_to_old_dict= {}
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def highest_priority_action(self, state_set): best_action = None best_priority = LOWEST_PRIORITY for state in state_set.keys(): priority = state.action_priority if priority > best_priority: best_action = state.action best_priority = priority return best_action
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def new_to_old(self, new_state): """Given a new state, return a set of corresponding old states.""" return self.new_to_old_dict[id(new_state)]
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def dump(self, file): from Transitions import state_set_str for new_state in self.new_machine.states: old_state_set = self.new_to_old_dict[id(new_state)] file.write(" State %s <-- %s\n" % ( new_state['number'], state_set_str(old_state_set)))
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def create_init_file(name, env): ''' Create package initialization file. e.g. init_package_env.sh :param name: file name to be created. :type name: string :param env: the environment object. :type env: :class:`.Environment` :return: None ''' aroot = os.environ["ASKAP_ROOT"] inittxt= """#
ATNF/askapsdp
[ 4, 5, 4, 1, 1411460489 ]
def process_item(self, item_xml): if "<cobp>" in item_xml: item = JenItem(item_xml) if "http" in item.get("cobp", ""): result_item = { 'label': item["title"], 'icon': item.get("thumbnail", addon_icon), 'fanart': item.get("fanart", addon_fanart), 'mode': "PlayVideo", 'url': item.get("cobp", ""), 'folder': False, 'imdb': "0", 'content': "files", 'season': "0", 'episode': "0", 'info': {}, 'year': "0", 'context': get_context_items(item), "summary": item.get("summary", None) } elif "category/" in item.get("cobp", ""): result_item = { 'label': item["title"], 'icon': item.get("thumbnail", addon_icon), 'fanart': item.get("fanart", addon_fanart), 'mode': "COBP", 'url': item.get("cobp", ""), 'folder': True, 'imdb': "0", 'content': "files", 'season': "0", 'episode': "0", 'info': {}, 'year': "0", 'context': get_context_items(item), "summary": item.get("summary", None) } elif "tag/" in item.get("cobp", ""): result_item = { 'label': item["title"], 'icon': item.get("thumbnail", addon_icon), 'fanart': item.get("fanart", addon_fanart), 'mode': "COBP", 'url': item.get("cobp", ""), 'folder': True, 'imdb': "0", 'content': "files", 'season': "0", 'episode': "0", 'info': {}, 'year': "0", 'context': get_context_items(item), "summary": item.get("summary", None) } elif "most-" in item.get("cobp", ""): result_item = { 'label': item["title"], 'icon': item.get("thumbnail", addon_icon), 'fanart': item.get("fanart", addon_fanart), 'mode': "COBP", 'url': item.get("cobp", ""), 'folder': True, 'imdb': "0", 'content': "files", 'season': "0", 'episode': "0", 'info': {}, 'year': "0", 'context': get_context_items(item), "summary": item.get("summary", None) } result_item["properties"] = { 'fanart_image': result_item["fanart"] } result_item['fanart_small'] = result_item["fanart"] return result_item
repotvsupertuga/tvsupertuga.repository
[ 1, 8, 1, 4, 1493763534 ]
def get_stream(url): url = urlparse.urljoin('http://collectionofbestporn.com/', url) xml = fetch_from_db(url) if not xml: xml = "" try: headers = {'User_Agent':User_Agent} html = requests.get(url,headers=headers).content vid_divs = dom_parser.parseDOM(html, 'div', attrs={'class':'video-item col-sm-5 col-md-4 col-xs-10'}) count = 0 for vid_section in vid_divs: thumb_div = dom_parser.parseDOM(vid_section, 'div', attrs={'class':'video-thumb'})[0] thumbnail = re.compile('<img src="(.+?)"',re.DOTALL).findall(str(thumb_div))[0] vid_page_url = re.compile('href="(.+?)"',re.DOTALL).findall(str(thumb_div))[0] title_div = dom_parser.parseDOM(vid_section, 'div', attrs={'class':'title'})[0] title = remove_non_ascii(re.compile('title="(.+?)"',re.DOTALL).findall(str(title_div))[0]) count += 1 xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <cobp>%s</cobp>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,title,vid_page_url,thumbnail) try: pagination = dom_parser.parseDOM(html, 'li', attrs={'class':'next'})[0] next_page = dom_parser.parseDOM(pagination, 'a', ret='href')[0] xml += "<dir>"\ " <title>Next Page</title>"\ " <meta>"\ " <summary>Click here for more porn bitches!</summary>"\ " </meta>"\ " <cobp>%s</cobp>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (next_page,next_icon) except: pass save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
repotvsupertuga/tvsupertuga.repository
[ 1, 8, 1, 4, 1493763534 ]
def play_source(url): try: headers = {'User_Agent':User_Agent} vid_html = requests.get(url,headers=headers).content sources = dom_parser.parseDOM(vid_html, 'source', ret='src') vid_url = sources[len(sources)-1] xbmc.executebuiltin("PlayMedia(%s)" % vid_url) except: return
repotvsupertuga/tvsupertuga.repository
[ 1, 8, 1, 4, 1493763534 ]
def fetch_from_db(url): koding.reset_db() cobp_plugin_spec = { "columns": { "url": "TEXT", "item": "TEXT", "created": "TEXT" }, "constraints": { "unique": "url" } } koding.Create_Table("cobp_com_plugin", cobp_plugin_spec) match = koding.Get_From_Table( "cobp_com_plugin", {"url": url}) if match: match = match[0] if not match["item"]: return None created_time = match["created"] if created_time and float(created_time) + CACHE_TIME >= time.time(): match_item = match["item"] try: result = base64.b64decode(match_item) except: return None return result else: return None else: return None
repotvsupertuga/tvsupertuga.repository
[ 1, 8, 1, 4, 1493763534 ]
def __init__(self, vfs, entrylimit): self.vfs = vfs self.limit = entrylimit self.contents = altsortdict() self.reload()
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def write(self): """Write the database to disk.""" with self.vfs("commit_metadata", "w", atomictemp=True) as f: entries = [ {"node": hex(node), "category": category, "data": data} for ((node, category), data) in self.contents.items() ] json.dump(entries, f)
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def delete(self, node, category): """Removes the entry with matching node and category and returns its value.""" value = self.contents[(node, category)] del self.contents[(node, category)] return value
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def find(self, node=None, category=None): """Returns a map of all entries with matching node and/or category. If both are None, returns all entries.""" return altsortdict( ( ((node_, category_), data) for ((node_, category_), data) in self.contents.items() if node is None or node == node_ if category is None or category == category_ ) )
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def request(host, path, show): assert not path.startswith("/"), path global tag headers = {} if tag: headers["If-None-Match"] = tag if hgproto: headers["X-HgProto-1"] = hgproto conn = httplib.HTTPConnection(host) conn.request("GET", "/" + path, None, headers) response = conn.getresponse() print(response.status, response.reason) if show[:1] == ["-"]: show = sorted(h for h, v in response.getheaders() if h.lower() not in show) for h in [h.lower() for h in show]: if response.getheader(h, None) is not None: print("%s: %s" % (h, response.getheader(h))) if not headeronly: print() data = response.read() # Pretty print JSON. This also has the beneficial side-effect # of verifying emitted JSON is well-formed. if formatjson: # json.dumps() will print trailing newlines. Eliminate them # to make tests easier to write. data = json.loads(data) lines = json.dumps(data, sort_keys=True, indent=2).splitlines() for line in lines: print(line.rstrip()) else: sys.stdout.write(data) if twice and response.getheader("ETag", None): tag = response.getheader("ETag") return response.status
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def setUp(self) -> None: """Set up the test case.""" test_data = np.ones(NUMBER_OF_SCANS, dtype=dtype) # Channel 3a test_data["id"]["id"][:5] = 891 # Channel 3b test_data["id"]["id"][5:] = 890 with NamedTemporaryFile(mode='w+', suffix='.hmf', delete=False) as hrpt_file: self.filename = hrpt_file.name test_data.tofile(hrpt_file)
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def _get_dataset(self, dataset_id): fh = HRPTFile(self.filename, {}, {}) return fh.get_dataset(dataset_id, {})
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def test_reading(self): """Test that data is read.""" fh = HRPTFile(self.filename, {}, {}) assert fh._data is not None
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def _get_channel_1_counts(self): return self._get_dataset(make_dataid(name='1', calibration='counts'))
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def test_platform_name(self): """Test that the platform name is correct.""" result = self._get_channel_1_counts() assert result.attrs['platform_name'] == 'NOAA 19'
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def fake_calibrate_solar(data, *args, **kwargs): """Fake calibration.""" del args, kwargs return data * 25.43 + 3
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def setUp(self) -> None: """Patch pygac's calibration.""" super().setUp() # Import things to patch here to make them patchable. Otherwise another function # might import it first which would prevent a successful patch. from pygac.calibration import Calibrator, calibrate_solar, calibrate_thermal self.Calibrator = Calibrator self.calibrate_thermal = calibrate_thermal self.calibrate_thermal.side_effect = fake_calibrate_thermal self.calibrate_solar = calibrate_solar self.calibrate_solar.side_effect = fake_calibrate_solar
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def setUp(self) -> None: """Set up the test case.""" CalibratorPatcher.setUp(self) TestHRPTWithFile.setUp(self)
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def _get_channel_1_reflectance(self): """Get the channel 1 reflectance.""" dataset_id = make_dataid(name='1', calibration='reflectance') return self._get_dataset(dataset_id)
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def _get_channel_4_bt(self): """Get the channel 4 bt.""" dataset_id = make_dataid(name='4', calibration='brightness_temperature') return self._get_dataset(dataset_id)
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def _get_channel_3b_bt(self): """Get the channel 4 bt.""" dataset_id = make_dataid(name='3b', calibration='brightness_temperature') return self._get_dataset(dataset_id)
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def _get_channel_3a_counts(self): """Get the channel 4 bt.""" dataset_id = make_dataid(name='3a', calibration='counts') return self._get_dataset(dataset_id)
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def test_channel_3a_masking(self): """Test that channel 3a is split correctly.""" result = self._get_channel_3a_reflectance() assert np.isnan(result.values[5:]).all() assert np.isfinite(result.values[:5]).all()
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def setUp(self) -> None: """Set up the test case.""" super().setUp() self.fake_lons = np.ones((NUMBER_OF_SCANS, SWATH_WIDTH)) self.fake_lats = np.ones((NUMBER_OF_SCANS, SWATH_WIDTH)) * 2
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def test_longitudes_are_returned(self, Orbital, compute_pixels, get_lonlatalt, SatelliteInterpolator): """Check that latitudes are returned properly.""" self._prepare_mocks(Orbital, SatelliteInterpolator, get_lonlatalt) dataset_id = make_dataid(name='longitude') result = self._get_dataset(dataset_id) assert (result == self.fake_lons).all()
pytroll/satpy
[ 901, 261, 901, 407, 1455049783 ]
def setup(): """ Build the required testing extension module
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def flags_info(arr): flags = wrap.array_attrs(arr)[6] return flags2names(flags)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def flags2names(flags): info = [] for flagname in ['CONTIGUOUS','FORTRAN','OWNDATA','ENSURECOPY', 'ENSUREARRAY','ALIGNED','NOTSWAPPED','WRITEABLE', 'UPDATEIFCOPY','BEHAVED','BEHAVED_RO', 'CARRAY','FARRAY' ]: if abs(flags) & getattr(wrap,flagname, 0): info.append(flagname) return info
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def __init__(self,intent_list=[]): self.intent_list = intent_list[:] flags = 0 for i in intent_list: if i=='optional': flags |= wrap.F2PY_OPTIONAL else: flags |= getattr(wrap,'F2PY_INTENT_'+i.upper()) self.flags = flags
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def __str__(self): return 'intent(%s)' % (','.join(self.intent_list))
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def is_intent(self,*names): for name in names: if name not in self.intent_list: return False return True
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def __new__(cls,name): if isinstance(name,dtype): dtype0 = name name = None for n,i in typeinfo.items(): if isinstance(i,tuple) and dtype0.type is i[-1]: name = n break obj = cls._type_cache.get(name.upper(),None) if obj is not None: return obj obj = object.__new__(cls) obj._init(name) cls._type_cache[name.upper()] = obj return obj
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def _init(self,name): self.NAME = name.upper() self.type_num = getattr(wrap,'NPY_'+self.NAME) assert_equal(self.type_num,typeinfo[self.NAME][1]) self.dtype = typeinfo[self.NAME][-1] self.elsize = typeinfo[self.NAME][2] / 8 self.dtypechar = typeinfo[self.NAME][0]
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def cast_types(self): return map(self.__class__,self._cast_dict[self.NAME])
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def all_types(self): return map(self.__class__,self._type_names)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def smaller_types(self): bits = typeinfo[self.NAME][3] types = [] for name in self._type_names: if typeinfo[name][3]<bits: types.append(Type(name)) return types
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def equal_types(self): bits = typeinfo[self.NAME][3] types = [] for name in self._type_names: if name==self.NAME: continue if typeinfo[name][3]==bits: types.append(Type(name)) return types
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def larger_types(self): bits = typeinfo[self.NAME][3] types = [] for name in self._type_names: if typeinfo[name][3]>bits: types.append(Type(name)) return types
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def __init__(self,typ,dims,intent,obj): self.type = typ self.dims = dims self.intent = intent self.obj_copy = copy.deepcopy(obj) self.obj = obj
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def arr_equal(self,arr1,arr2): if arr1.shape != arr2.shape: return False s = arr1==arr2 return alltrue(s.flatten())
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def __str__(self): return str(self.arr)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def has_shared_memory(self): """Check that created array shares data with input array. """ if self.obj is self.arr: return True if not isinstance(self.obj,ndarray): return False obj_attr = wrap.array_attrs(self.obj) return obj_attr[0]==self.arr_attr[0]
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_in_out(self): assert_equal(str(intent.in_.out),'intent(in,out)') assert_(intent.in_.c.is_intent('c')) assert_(not intent.in_.c.is_intent_exact('c')) assert_(intent.in_.c.is_intent_exact('c','in')) assert_(intent.in_.c.is_intent_exact('in','c')) assert_(not intent.in_.is_intent('c'))
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_in_from_2seq(self): a = self.array([2],intent.in_,self.num2seq) assert_(not a.has_shared_memory())
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_in_from_2casttype(self): for t in self.type.cast_types(): obj = array(self.num2seq,dtype=t.dtype) a = self.array([len(self.num2seq)],intent.in_,obj) if t.elsize==self.type.elsize: assert_(a.has_shared_memory(),`self.type.dtype,t.dtype`) else: assert_(not a.has_shared_memory(),`t.dtype`)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_inout_2seq(self): obj = array(self.num2seq,dtype=self.type.dtype) a = self.array([len(self.num2seq)],intent.inout,obj) assert_(a.has_shared_memory())
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_f_inout_23seq(self): obj = array(self.num23seq,dtype=self.type.dtype,order='F') shape = (len(self.num23seq),len(self.num23seq[0])) a = self.array(shape,intent.in_.inout,obj) assert_(a.has_shared_memory())
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_c_inout_23seq(self): obj = array(self.num23seq,dtype=self.type.dtype) shape = (len(self.num23seq),len(self.num23seq[0])) a = self.array(shape,intent.in_.c.inout,obj) assert_(a.has_shared_memory())
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_in_copy_from_2casttype(self): for t in self.type.cast_types(): obj = array(self.num2seq,dtype=t.dtype) a = self.array([len(self.num2seq)],intent.in_.copy,obj) assert_(not a.has_shared_memory(),`t.dtype`)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_c_in_from_23seq(self): a = self.array([len(self.num23seq),len(self.num23seq[0])], intent.in_,self.num23seq) assert_(not a.has_shared_memory())
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq,dtype=t.dtype) a = self.array([len(self.num23seq),len(self.num23seq[0])], intent.in_,obj) assert_(not a.has_shared_memory(),`t.dtype`)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_f_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq,dtype=t.dtype,order='F') a = self.array([len(self.num23seq),len(self.num23seq[0])], intent.in_,obj) if t.elsize==self.type.elsize: assert_(a.has_shared_memory(),`t.dtype`) else: assert_(not a.has_shared_memory(),`t.dtype`)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_c_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq,dtype=t.dtype) a = self.array([len(self.num23seq),len(self.num23seq[0])], intent.in_.c,obj) if t.elsize==self.type.elsize: assert_(a.has_shared_memory(),`t.dtype`) else: assert_(not a.has_shared_memory(),`t.dtype`)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_f_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq,dtype=t.dtype,order='F') a = self.array([len(self.num23seq),len(self.num23seq[0])], intent.in_.copy,obj) assert_(not a.has_shared_memory(),`t.dtype`)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_c_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq,dtype=t.dtype) a = self.array([len(self.num23seq),len(self.num23seq[0])], intent.in_.c.copy,obj) assert_(not a.has_shared_memory(),`t.dtype`)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_in_cache_from_2casttype(self): for t in self.type.all_types(): if t.elsize != self.type.elsize: continue obj = array(self.num2seq,dtype=t.dtype) shape = (len(self.num2seq),) a = self.array(shape,intent.in_.c.cache,obj) assert_(a.has_shared_memory(),`t.dtype`)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_in_cache_from_2casttype_failure(self): for t in self.type.all_types(): if t.elsize >= self.type.elsize: continue obj = array(self.num2seq,dtype=t.dtype) shape = (len(self.num2seq),) try: a = self.array(shape,intent.in_.cache,obj) except ValueError,msg: if not str(msg).startswith('failed to initialize intent(cache) array'): raise else: raise SystemError('intent(cache) should have failed on smaller array')
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_cache_hidden(self): shape = (2,) a = self.array(shape,intent.cache.hide,None) assert_(a.arr.shape==shape)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_hidden(self): shape = (2,) a = self.array(shape,intent.hide,None) assert_(a.arr.shape==shape) assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype)))
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_optional_none(self): shape = (2,) a = self.array(shape,intent.optional,None) assert_(a.arr.shape==shape) assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype)))
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_optional_from_2seq(self): obj = self.num2seq shape = (len(obj),) a = self.array(shape,intent.optional,obj) assert_(a.arr.shape==shape) assert_(not a.has_shared_memory())
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_optional_from_23seq(self): obj = self.num23seq shape = (len(obj),len(obj[0])) a = self.array(shape,intent.optional,obj) assert_(a.arr.shape==shape) assert_(not a.has_shared_memory())
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_inplace(self): obj = array(self.num23seq,dtype=self.type.dtype) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape a = self.array(shape,intent.inplace,obj) assert_(obj[1][2]==a.arr[1][2],`obj,a.arr`) a.arr[1][2]=54 assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),`obj,a.arr`) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! assert_(not obj.flags['CONTIGUOUS'])
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def test_inplace_from_casttype(self): for t in self.type.cast_types(): if t is self.type: continue obj = array(self.num23seq,dtype=t.dtype) assert_(obj.dtype.type==t.dtype) assert_(obj.dtype.type is not self.type.dtype) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape a = self.array(shape,intent.inplace,obj) assert_(obj[1][2]==a.arr[1][2],`obj,a.arr`) a.arr[1][2]=54 assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),`obj,a.arr`) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! assert_(not obj.flags['CONTIGUOUS']) assert_(obj.dtype.type is self.type.dtype) # obj type is changed inplace!
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def setUp(self): self.type = Type(%r)
beiko-lab/gengis
[ 28, 14, 28, 18, 1363614616 ]
def proxy(request): if 'url' not in request.GET: return HttpResponse( "The proxy service requires a URL-encoded URL as a parameter.", status=400, content_type="text/plain" ) url = urlsplit(request.GET['url']) # Don't allow localhost connections unless in DEBUG mode if not settings.DEBUG and re.search('localhost|127.0.0.1', url.hostname): return HttpResponse(status=403) locator = url.path if url.query != "": locator += '?' + url.query if url.fragment != "": locator += '#' + url.fragment # Strip all headers and cookie info headers = {} conn = HTTPConnection(url.hostname, url.port) if url.scheme == "http" else HTTPSConnection(url.hostname, url.port) conn.request(request.method, locator, request.raw_post_data, headers) result = conn.getresponse() response = HttpResponse( valid_response(result.read()), status=result.status, content_type=result.getheader("Content-Type", "text/plain") ) return response
cga-harvard/cga-worldmap
[ 96, 31, 96, 30, 1288628631 ]
def geoserver_rest_proxy(request, proxy_path, downstream_path): if not request.user.is_authenticated(): return HttpResponse( "You must be logged in to access GeoServer", mimetype="text/plain", status=401) def strip_prefix(path, prefix): assert path.startswith(prefix) return path[len(prefix):] path = strip_prefix(request.get_full_path(), proxy_path) url = "".join([settings.GEOSERVER_BASE_URL, downstream_path, path]) http = httplib2.Http() http.add_credentials(*settings.GEOSERVER_CREDENTIALS) headers = dict() if request.method in ("POST", "PUT") and "CONTENT_TYPE" in request.META: headers["Content-Type"] = request.META["CONTENT_TYPE"] response, content = http.request( url, request.method, body=request.raw_post_data or None, headers=headers) return HttpResponse( content=content, status=response.status, mimetype=response.get("content-type", "text/plain"))
cga-harvard/cga-worldmap
[ 96, 31, 96, 30, 1288628631 ]
def flickr(request): url = "http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=%s" % settings.FLICKR_API_KEY bbox = request.GET['bbox'] if request.method == 'GET' else request.POST['bbox'] query = request.GET['q'] if request.method == 'GET' else request.POST['q'] maxResults = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results'] coords = bbox.split(",") coords[0] = -180 if float(coords[0]) <= -180 else coords[0] coords[2] = 180 if float(coords[2]) >= 180 else coords[2] coords[1] = coords[1] if float(coords[1]) > -90 else -90 coords[3] = coords[3] if float(coords[3]) < 90 else 90 newbbox = str(coords[0]) + ',' + str(coords[1]) + ',' + str(coords[2]) + ',' + str(coords[3]) url = url + "&tags=%s&per_page=%s&has_geo=1&bbox=%s&format=json&extras=geo,url_q&accuracy=1&nojsoncallback=1" % (query,maxResults,newbbox) feed_response = urllib.urlopen(url).read() return HttpResponse(feed_response, mimetype="text/xml")
cga-harvard/cga-worldmap
[ 96, 31, 96, 30, 1288628631 ]
def hglServiceStarter (request, layer): #Check if the layer is accessible to public, if not return 403 accessUrl = HGL_URL + "/ogpHglLayerInfo.jsp?ValidationKey=" + settings.HGL_VALIDATION_KEY +"&layers=" + layer accessJSON = json.loads(urllib.urlopen(accessUrl).read()) if accessJSON[layer]['access'] == 'R': return HttpResponse(status=403) #Call the RemoteServiceStarter to load the layer into HGL's Geoserver in case it's not already there startUrl = HGL_URL + "/RemoteServiceStarter?ValidationKey=" + settings.HGL_VALIDATION_KEY + "&AddLayer=" + layer return HttpResponse(urllib.urlopen(startUrl).read())
cga-harvard/cga-worldmap
[ 96, 31, 96, 30, 1288628631 ]
def tweetDownload (request): if (not request.user.is_authenticated() or not request.user.get_profile().is_org_member): return HttpResponse(status=403) proxy_url = urlsplit(request.get_full_path()) download_url = "http://" + settings.GEOPS_IP + "?" + proxy_url.query + settings.GEOPS_DOWNLOAD http = httplib2.Http() response, content = http.request( download_url, request.method) response = HttpResponse( content=content, status=response.status, mimetype=response.get("content-type", "text/plain")) response['Content-Disposition'] = response.get('Content-Disposition', 'attachment; filename="tweets"' + request.user.username + '.csv'); return response
cga-harvard/cga-worldmap
[ 96, 31, 96, 30, 1288628631 ]
def youtube(request): url = "http://gdata.youtube.com/feeds/api/videos?v=2&prettyprint=true&" bbox = request.GET['bbox'] if request.method == 'GET' else request.POST['bbox'] query = request.GET['q'] if request.method == 'GET' else request.POST['q'] maxResults = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results'] coords = bbox.split(",") coords[0] = coords[0] if float(coords[0]) > -180 else -180 coords[2] = coords[2] if float(coords[2]) < 180 else 180 coords[1] = coords[1] if float(coords[1]) > -90 else -90 coords[3] = coords[3] if float(coords[3]) < 90 else 90 #location would be the center of the map. location = str((float(coords[3]) + float(coords[1]))/2) + "," + str((float(coords[2]) + float(coords[0]))/2); #calculating the location-readius R = 6378.1370; PI = 3.1415926; left = R*float(coords[0])/180.0/PI; right = R*float(coords[2])/180.0/PI; radius = (right - left)/2*2; radius = 1000 if (radius > 1000) else radius; url = url + "location=" + location + "&max-results=" + maxResults + "&location-radius=" + str(radius) + "km&q=" + urllib.quote(query.encode('utf-8')) feed_response = urllib.urlopen(url).read() return HttpResponse(feed_response, mimetype="text/xml")
cga-harvard/cga-worldmap
[ 96, 31, 96, 30, 1288628631 ]
def post(self, request, *args, **kwargs): serializer = ApplyForMembershipSerializer(data=self.request.data) serializer.is_valid(raise_exception=True) user, created = serializer.get_or_create_user_profile() administrative_unit = serializer.validated_data.get("administrative_unit") user.administrative_units.add(administrative_unit), interaction_type = membership_application_interaction_type() Interaction.objects.create( user=user, type=interaction_type, administrative_unit=administrative_unit, date_from=timezone.now(), subject=interaction_type.name, ) return Response( {"user_id": user.pk}, status=status.HTTP_200_OK, )
auto-mat/klub
[ 7, 9, 7, 37, 1438249327 ]
def _create_count_dataframe(df): return df.groupby(['year_published']) \ .agg(dict(title='count', is_archived='sum', has_formal_description='sum', has_odd='sum', has_visual_documentation='sum')) \ .rename(columns={ 'title': 'count', 'is_archived': IncludedStatistics.code_availability_count.name, 'has_formal_description': IncludedStatistics.formal_description_count.name, 'has_odd': IncludedStatistics.odd_count.name, 'has_visual_documentation': IncludedStatistics.visual_documentation_count.name }).reindex(pd.RangeIndex(start=1995, stop=2018, name='year_published'), fill_value=0.0)
comses/catalog
[ 4, 3, 4, 38, 1397110212 ]
def analytical_gradients(network, X, Y): print("Calculating analytical gradients...") print("Forward pass:", end=" ") preds = network.predict(X) print("done! Backward pass:", end=" ") delta = network.cost.derivative(preds, Y) network.backpropagate(delta) print("done!") return network.get_gradients(unfold=True)
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def test_view_person_link_list_unauthorized(self): response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/") self.assertEqual(response.status_code, status.HTTP_200_OK)
Sinar/popit_ng
[ 21, 4, 21, 91, 1443407491 ]
def test_view_person_link_details_unauthorized(self): response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/") self.assertEqual(response.status_code, status.HTTP_200_OK)
Sinar/popit_ng
[ 21, 4, 21, 91, 1443407491 ]
def test_view_person_link_details_not_exist_unauthorized(self): response = self.client.get("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/not_exist/") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
Sinar/popit_ng
[ 21, 4, 21, 91, 1443407491 ]
def test_create_person_links_unauthorized(self): data = { "url": "http://twitter.com/sweemeng", } response = self.client.post("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/", data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
Sinar/popit_ng
[ 21, 4, 21, 91, 1443407491 ]
def test_update_person_links_unauthorized(self): data = { "note": "just a random repo" } response = self.client.put( "/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/", data ) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
Sinar/popit_ng
[ 21, 4, 21, 91, 1443407491 ]
def test_update_person_links_authorized(self): data = { "note": "just a random repo" } token = Token.objects.get(user__username="admin") self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) response = self.client.put( "/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/", data ) self.assertEqual(response.status_code, status.HTTP_200_OK) person = Person.objects.language('en').get(id='ab1a5788e5bae955c048748fa6af0e97') url = person.links.language("en").get(id="a4ffa24a9ef3cbcb8cfaa178c9329367") self.assertEqual(url.note, "just a random repo")
Sinar/popit_ng
[ 21, 4, 21, 91, 1443407491 ]
def test_delete_person_links_unauthorized(self): response = self.client.delete("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/") self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
Sinar/popit_ng
[ 21, 4, 21, 91, 1443407491 ]
def test_delete_person_links_authorized(self): token = Token.objects.get(user__username="admin") self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) response = self.client.delete("/en/persons/ab1a5788e5bae955c048748fa6af0e97/links/a4ffa24a9ef3cbcb8cfaa178c9329367/") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
Sinar/popit_ng
[ 21, 4, 21, 91, 1443407491 ]