file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
service.rs
_cpu_usage: f64, pub max_instances: i64, pub instances: i64, pub tasks: HashMap<String, String>, } #[derive(Debug)] pub struct Statistic { pub timestamp: f64, pub cpu_time: f64, pub cpu_usage: f64, pub mem_usage: f64, } #[derive(Debug, Deserialize)] struct TaskStatistic { cpus_limit: f...
{ handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64, } impl Service { pub fn new(handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, ...
Service
identifier_name
service.rs
_usage: f64, pub max_instances: i64, pub instances: i64, pub tasks: HashMap<String, String>, } #[derive(Debug)] pub struct Statistic { pub timestamp: f64, pub cpu_time: f64, pub cpu_usage: f64, pub mem_usage: f64, } #[derive(Debug, Deserialize)] struct TaskStatistic { cpus_limit: f64, ...
("AUTOSCALE_MEM_PERCENT", v) => { max_mem_usage = from_value(v.clone()).unwrap(); } ("AUTOSCALE_CPU_PERCENT", v) => { max_cpu_usage = from_value(v.clone()).unwrap(); } _ =...
{ max_instances = from_value(v.clone()).unwrap(); }
conditional_block
audio_processing.py
OTUNE ) return tf.data.Dataset.zip((dataset, label_ds)) def get_stft(waveform, frame_length=512, frame_step=256): # apply short-time Fourier transform # splits signal into frames and applies Fourier transform on those # by default uses smallest power of 2 enclosing frame_length for fft size ...
tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1 ) / denominator, (1, feat.shape[1]) ) ], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1] return delta_feat def get_mfcc( log_mel_spectrogram, num_mel_bins_to_pick=12, add_energy=False, ...
random_line_split
audio_processing.py
OTUNE ) return tf.data.Dataset.zip((dataset, label_ds)) def get_stft(waveform, frame_length=512, frame_step=256): # apply short-time Fourier transform # splits signal into frames and applies Fourier transform on those # by default uses smallest power of 2 enclosing frame_length for fft size ...
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example # Compute MFCCs from log mel spectrograms # Take num_mel_bins_to_pick bins mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[ ..., :num_mel_bins_to_pick] # add symmetric_z...
energy = tf.slice( log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1], [log_mel_spectrogram.shape[0], 1] ) log_mel_spectrogram = tf.slice( log_mel_spectrogram, [0, 0], [log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1] )
conditional_block
audio_processing.py
(file_path): # each file's label is its directory's name parts = tf.strings.split(file_path, os.path.sep) return parts[-2] def prepare_label_dataset(file_paths): # create dataset by splitting input tensor to individual items label_ds = tf.data.Dataset.from_tensor_slices(file_paths) # extract ...
get_label
identifier_name
audio_processing.py
def get_stft(waveform, frame_length=512, frame_step=256): # apply short-time Fourier transform # splits signal into frames and applies Fourier transform on those # by default uses smallest power of 2 enclosing frame_length for fft size # uses hann window, an alternative would be hamming window # ...
label_ds = prepare_label_dataset(file_paths) if len(label_list) > 0: label_ds = label_ds.map( lambda label: tf.argmax(label == label_list), num_parallel_calls=tf.data.experimental.AUTOTUNE ) return tf.data.Dataset.zip((dataset, label_ds))
identifier_body
dap_cortex-m7.py
_sgpb | (bit_index << 8)) waitForFlashReady() else: log.info("Debug:: Clearing GPNVM bit %d" % bit_index) dev.Write32(efc_fcr, efc_cmd_cgpb | (bit_index << 8)) waitFor...
reset_and_halt()
identifier_body
dap_cortex-m7.py
5fa0001) # VECTKEY | VECTRESET) # 1=VECTRESET 4=SYSRESETREQ n = 0 max_retries = 100 seenReset = False while n < max_retries: dhcsr = dev.Read32(arm.DHCSR) log.info("S_RESET_ST = %s / %s at PC = %X" % ("1" if dhcsr & 0x02000000 else "0", "Halted" if dhcsr & 0x20000 else "RUNNING", get_p...
prog_read
identifier_name
dap_cortex-m7.py
global need_reset_for_read_operations need_reset_for_read_operations = True if flash_strategy == 1 else False def bitsInByte(byteValue): for i in xrange(8): yield (byteValue >> i) & 1 def log_efc_fsr_error(fsr): err_string = "" if fsr & 0x00080000: # FSR_MECCEMSB err_string = "MEC...
was_running = True halt_or_raise()
conditional_block
dap_cortex-m7.py
... elif str(type_of_mem) != "Pgm": log.warning( "Debug:: Currently not supporting writing to memory type %s" % type_of_mem) return if is_target_running(): log.error("Error: Target is running when it should be halted") halt_or_raise() if "RH71" not in device: # S...
random_line_split
ctx.rs
!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } /// Applies a `CtxBuilder` to an existing `Ctx`. /// /// # Usage Example /// ``` /// # use...
self.raw.as_ref().get(RawCtxOption::SocketLimit) }
identifier_body
ctx.rs
== 0 { break; } else { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINTR => (), _ => unreachable!(), } } } } fn shutdown(&self) { let rc = unsafe { ...
Self { ctx } } } /// A config for a [`Ctx`]. /// /// Usefull in configuration files. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxConfig { io_threads: Option<i32>, max_msg_size: Option<i32>, max_sockets: Option<i32>, ...
panic!(msg_from_errno(unsafe { sys::zmq_errno() })); }
conditional_block
ctx.rs
IOThreads, MaxSockets, MaxMsgSize, SocketLimit, IPV6, Blocky, } impl From<RawCtxOption> for c_int { fn from(r: RawCtxOption) -> c_int { match r { RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int, RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_in...
} #[derive(Copy, Clone, Debug)] enum RawCtxOption {
random_line_split
ctx.rs
== 0 { break; } else { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINTR => (), _ => unreachable!(), } } } } fn shutdown(&self) { let rc = unsafe { ...
&mut self, value: i32) -> &mut Self { self.inner.set_io_threads(Some(value)); self } /// See [`set_max_msg_size`]. /// /// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size pub fn max_msg_size(&mut self, value: i32) -> &mut Self { self.inner.set_max_msg_size(Some...
o_threads(
identifier_name
common_domain_analyser.py
domain and all its SAN for domain in record['all_domains']: # Remove wildcard domain = re.sub(r'^\*\.', '', domain) # Remove some FP-prone parts domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain) # Similar to all domains in the lis...
return record class DomainMatchingOption(Enum): ''' Control how strict we want to do our matching. ''' # For example applefake.it will match with apple.com case ['apple'] is # a subset of ['apple', 'fake'] SUBSET_MATCH = 0 # Similar but use in instead of issubset so that the ord...
record['analysers'].append({ 'analyser': type(self).__name__, 'output': results, })
conditional_block
common_domain_analyser.py
name == BulkDomainMarker.__name__ and analyser['output']: # Skip bulk record and deal with it later, with such large # number of SAN name, it's bound to be a match continue analysers[name] = analyser['output'] # Check that all outputs are there befo...
''' Generate features to detect outliers in the stream. In our case, the outliers is the 'suspicious' phishing domains. ''' NOSTRIL_LENGTH_LIMIT = 6 # pylint: disable=invalid-name def run(self, record): ''' The list of features will be: - The number of domain parts, for ...
identifier_body
common_domain_analyser.py
domain and all its SAN for domain in record['all_domains']: # Remove wildcard domain = re.sub(r'^\*\.', '', domain) # Remove some FP-prone parts domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain) # Similar to all domains in the lis...
(Enum): ''' Control how strict we want to do our matching. ''' # For example applefake.it will match with apple.com case ['apple'] is # a subset of ['apple', 'fake'] SUBSET_MATCH = 0 # Similar but use in instead of issubset so that the order is preserved ORDER_MATCH = 1 class DomainMa...
DomainMatchingOption
identifier_name
common_domain_analyser.py
ers. ''' def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH): ''' Just load the wordsegment package, whatever it is. ''' wordsegment.load() # Save the matching option here so we can refer to it later self.include_tld = include_tld ...
def _generate_alternatives(self, alt_characters, index=0, current=''): ''' Generate all alternative ASCII names of a domain using the list of all alternative characters. '''
random_line_split
server.go
ReapTimeout = 5 * time.Second ) var errorMessage = map[int]string{ errorTransportUnknown: "Transport unknown", errorUnknownSID: "Session ID unknown", errorBadHandshakeMethod: "Bad handshake method", errorBadRequest: "Bad request", } var ( validTransports = map[string]bool{ transportWebSocket:...
s.Handler != nil { go s.Handler(c.pubConn) } } // ServeHTTP implements the http.Handler interface for an FTC Server. func (s *server) ServeHTTP(w http.ResponseWriter, r *
glog.Errorf("could not encode open payload: %v", err) return } if
conditional_block
server.go
upgrades[i] = u i++ } return upgrades } // A Handler is called by the server when a connection is // opened successfully. type Handler func(*Conn) type server struct { // Handler handles an FTC connection. Handler basePath string cookieName string clients *clientSet // The set of connections (so...
emoteAddr := r.Header.Get("X-Forwarded-For") if len(remoteAddr) == 0 { remoteAddr = r.RemoteAddr } glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL) transport := r.FormValue(paramTransport) if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[tra...
identifier_body
server.go
ReapTimeout = 5 * time.Second ) var errorMessage = map[int]string{ errorTransportUnknown: "Transport unknown", errorUnknownSID: "Session ID unknown", errorBadHandshakeMethod: "Bad handshake method", errorBadRequest: "Bad request", } var ( validTransports = map[string]bool{ transportWebSocket:...
// options and handler. If nil options are passed, the defaults // specified in the constants above are used instead. func NewServer(o *Options, h Handler) *server { opts := Options{} if o != nil { opts = *o } if len(opts.BasePath) == 0 { opts.BasePath = defaultBasePath } if len(opts.CookieName) == 0 { opts...
CookieName string } // NewServer allocates and returns a new server with the given
random_line_split
server.go
by the server when a connection is // opened successfully. type Handler func(*Conn) type server struct { // Handler handles an FTC connection. Handler basePath string cookieName string clients *clientSet // The set of connections (some may be closed). wsServer *websocket.Server // The underlying Web...
shakeData(c *
identifier_name
symbolizer.go
liner file", "err", err) } } s.linerCache = newLinerCache numFunctions := 0 for _, locationsByMapping := range locationsByMappings { for _, locationLines := range locationsByMapping.LocationsLines { numFunctions += len(locationLines) } } if numFunctions == 0 { return nil } functions := make([]*pb....
countLocationsToSymbolize
identifier_name
symbolizer.go
: line.Line, }) i++ } // Update the location with the lines in-place so that in the next // step we can just reuse the same locations as were originally // passed in. locations = append(locations, locationsByMapping.Locations[j]) locationsByMapping.Locations[j].Lines = lines } } // ...
random_line_split
symbolizer.go
} functions := make([]*pb.Function, numFunctions) numLocations := 0 i := 0 for _, locationsByMapping := range locationsByMappings { for _, locationLines := range locationsByMapping.LocationsLines { if len(locationLines) == 0 { continue } numLocations++ for _, line := range locationLines { fun...
{ locationsToSymbolize++ continue }
conditional_block
symbolizer.go
func WithDemangleMode(mode string) Option { return func(s *Symbolizer) { s.demangler = demangle.NewDemangler(mode, false) } } type Symbolizer struct { logger log.Logger // attempts counts the total number of symbolication attempts. // It counts per batch. attempts prometheus.Counter // errors counts the tot...
{ return func(s *Symbolizer) { s.attemptThreshold = t } }
identifier_body
app.js
SELECTABLES.push(sel); SELECTABLES_BY_NAME[sel.d.toLowerCase()] = sel; SELECTABLES_BY_KEY[sel.k] = sel;
} }; function TimeZoneState(m, zone) { this.tz = m.tz(); this.urlKey = zone.k; this.offset = 0; this.timezoneShortName = zone.n; this.timezoneName = zone.d; this.update(); } TimeZoneState.prototype.update = function(day, homeZone) { var reftz = homeZone ? homeZone.tz : this.tz;...
random_line_split
app.js
function zoneExists(input) { return !!SELECTABLES_BY_NAME[normalizeZoneName(input)]; } function lookupTimeZoneState(input) { var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)]; if (!zone) { zone = SELECTABLES_BY_KEY[input]; if (!zone) return null; } var m; try ...
{ return zoneName.toLowerCase().replace(/^\s+|\s+$/g, ''); }
identifier_body
app.js
InputZone = function() { if ($scope.addZone($scope.currentZone)) $scope.currentZone = ''; }; $scope.addZone = function(zoneName) { var zoneState = lookupTimeZoneState(zoneName); if (zoneState === null) return false; $scope.zones.push(zoneState); $scope.updateZones(...
{ localChange = false; return; }
conditional_block
app.js
(zoneName) { return zoneName.toLowerCase().replace(/^\s+|\s+$/g, ''); } function zoneExists(input) { return !!SELECTABLES_BY_NAME[normalizeZoneName(input)]; } function lookupTimeZoneState(input) { var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)]; if (!zone) { zone = SELECTABLES_B...
normalizeZoneName
identifier_name
add_legacy_redirects.py
and redirects: written = written + 1 # print redirects at the end of the frontmatter print('legacyRedirectsGenerated:') print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.') for redirect in redirects: relat...
headings.append( normalize_title(heading_re.sub('', line)) ) mdx_file.close() return headings def normalize_title(title): title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip()) title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title) title = t...
heading_re = re.compile(r'^#+ ') mdx_file = open(filepath) for line in mdx_file: if heading_re.match(line):
random_line_split
add_legacy_redirects.py
and redirects: written = written + 1 # print redirects at the end of the frontmatter print('legacyRedirectsGenerated:') print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.') for redirect in redirects: relat...
determine_root_mdx_file(docs_path, mdx_folder = None): root_path = docs_path if mdx_folder: root_path += '/{}'.format(mdx_folder) index_path = root_path + '/index.mdx' if not os.path.exists(index_path): return None return index_path def print_report(report_dict): for key in report_dict.keys(): ...
title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip()) title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title) title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','') return title def
identifier_body
add_legacy_redirects.py
(output): written = 0 for filepath in Path('product_docs/docs').rglob('*.mdx'): redirects = output[str(filepath)] in_frontmatter = False injected_redirects = False in_existing_redirect_section = False for line in fileinput.input(files=[filepath], inplace=1): if not injected_redirects and ...
write_redirects_to_mdx_files
identifier_name
add_legacy_redirects.py
redirects: written = written + 1 # print redirects at the end of the frontmatter print('legacyRedirectsGenerated:') print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.') for redirect in redirects: relative_...
url = legacy_page['url'] if '/latest/' in url: # skip latest urls if they appear, we'll handle those separately continue url_scheme = determine_url_scheme(url) # if product version index page, can match right here is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d...
ct_data = legacy_urls_by_product_version[product] for version in product_data.keys(): product_version_data = product_data[version] effective_version = version if product in equivalent_versions and version in equivalent_versions.get(product): effective_version = equivalent_versions.get(product).get(v...
conditional_block
Pipeline_for_videos.py
o = cv2.calcHist([image_yuv],[0],None,[256],[0,256]) #image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0]) #histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256]) #plt.plot(histo) #plt.show() clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20)) image_yuv[:,:,0] = clahe.apply(ima...
s_binary = hls_select(img_undist,thresh=(151,255)) #151 luminiscence = yuv_select_lumin(img_undist,thresh=(14,255)) combined = np.zeros_like(dir_binary) combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)]...
count+=1 ret, image = cap.read() dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb')) dst = dist_pickle["dist"] mtx = dist_pickle["mtx"] if ret: ksize = 3 img_undist = cv2.undistort(image,mtx,dst,None,mtx) final_img = np.copy(img_undist) ...
conditional_block
Pipeline_for_videos.py
o = cv2.calcHist([image_yuv],[0],None,[256],[0,256]) #image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0]) #histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256]) #plt.plot(histo)
#plt.show() clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20)) image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0]) img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR) return img_output def yuv_select_lumin(image,thresh=(0,255)): yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV) ...
random_line_split
Pipeline_for_videos.py
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)): # Calculate gradient direction # Apply threshold gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel) sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel) ...
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel) sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel) mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2) absolute = np.absolute(mag_sobel) scaled = np.uint8(255*absolute/np.max(...
identifier_body
Pipeline_for_videos.py
(image, sobel_kernel=3, mag_thresh=(0, 255)): # Calculate gradient magnitude # Apply threshold gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel) sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel) mag_sobel = np.sqrt...
mag_thresh
identifier_name
decomposition_utils.py
Parse decomposition expression in string format, retaining ellipses if present. """ input_modes, *output_modes = subscripts.split("->") if not output_modes: raise ValueError("Output modes must be explicitly specified for decomposition") if len(output_modes) > 1: raise ValueError("sub...
"""
random_line_split
decomposition_utils.py
different number of modes, # here we disable it due to limited use case if any and potential confusion due to implicit specification. raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}") n = num_implicit_modes.pop() ...
create_operands_and_descriptors
identifier_name
decomposition_utils.py
_modes def compute_mid_extent(size_dict, inputs, outputs): """ Compute the expected mid extent given a size_dict and the modes for both inputs and outputs. """ size_dict = size_dict.copy() # this func will modify it in place left_output = set(outputs[0]) right_output = set(outputs[1]) shar...
logger.info(f"The SVDConfig attribute '{cut
""" Given an SVDMethod object, set the corresponding attributes in the SVDConfig. """ svd_algorithm = None for method_attr, attr in SVD_METHOD_CONFIG_MAP.items(): data = getattr(svd_method, method_attr) if method_attr == 'partition': data = PARTITION_MAP[data] elif me...
identifier_body
decomposition_utils.py
_modes def compute_mid_extent(size_dict, inputs, outputs): """ Compute the expected mid extent given a size_dict and the modes for both inputs and outputs. """ size_dict = size_dict.copy() # this func will modify it in place left_output = set(outputs[0]) right_output = set(outputs[1]) shar...
n = num_implicit_modes.pop() ellipses_modes = tuple(range(label_end-n, label_end)) for i, _modes in enumerate(all_modes): if Ellipsis not in _modes: continue s = _modes.index(Ellipsis) all_modes[i] = _modes[:s] + ellipses_modes + ...
raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}")
conditional_block
main.rs
Mat4>, } #[inline(always)] fn lerp<S, T: Add<T,T> + Sub<T,T> + Mul<S,T>>(start: T, end: T, s: S) -> T { return start + (end - start) * s; } impl<'a> Model<'a> { fn from_file(ai_scene: ai::Scene<'a>, graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>, program: &...
// find the textures used by this model from the list of materials for mat in ai_scene.get_materials().iter() { let texture_src = mat.get_texture(ai::material::TextureType::Diffuse, 0 ); ma...
// Create the buffer for the bone transformations. We fill this // up each time we draw, so no need to do it here. let u_bone_transformations: gfx::BufferHandle<Mat4> = graphics.device.create_buffer(MAX_BONES, gfx::BufferUsage::Dynamic);
random_line_split
main.rs
<'a> { pub vertices: Vec<Vertex>, pub indices: Vec<u32>, pub batches: Vec<ModelComponent>, pub scene: ai::Scene<'a>, pub bone_map: RefCell<BoneMap>, pub global_inverse: ai::Matrix4x4, pub bone_transform_buffer: gfx::BufferHandle<Mat4>, } #[inline(always)] fn lerp<S, T: Add<T,T> + Sub<T,T> +...
Model
identifier_name
bot.js
].bans = 0; },Otime) } catch (error) { console.log(error) } } } }) }); let channelc = {}; client.on('channelCreate', async (channel) => { const rebellog = client.channels.find("name", "log"), Oguild = channel.guild, Onumber = 10, Otime = 10000; const audit = await channel.guild.fetchAuditLogs({limit: 1})...
; Save New Duplicate & Edit Just Text Twitter 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
conditional_block
bot.js
.addField('**__ Voice Channels | رومات صوتيه__**',`[** __${msg.guild.channels.filter(m => m.type === 'voice').size}__ **]`,true) .addField('**__ Created At | صنع في __**',msg.guild.createdAt.toLocaleString()) msg.channel.send({embed:embed}); } }); client.on('message', message => { var prefix = "$"; ...
var guilds = {}; client.on('guildBanAdd', function(guild) { const rebellog = client.channels.find("name", "log"), Onumber = 10, Otime = 10000 guild.fetchAuditLogs({ type: 22 }).then(audit => { let banner = audit.entries.map(banner => banner.executor.id) let bans = guilds[guild.i...
random_line_split
menus.component.ts
); // call get pages function this.getPages(); // get templates from storage storage.get('templates').then(templates => { if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE; this.templates = templates; }); events.watchOffline().subscribe(() => { if ((!this.pages || this.pa...
{ if(this.speakingStart == false) { this.alertCtrl.create({ message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>', buttons:[ { text:"OK", handler:()=>{ this.speech.hasPermission(...
Permission()
identifier_name
menus.component.ts
); // call get pages function this.getPages(); // get templates from storage storage.get('templates').then(templates => { if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE; this.templates = templates; }); events.watchOffline().subscribe(() => { if ((!this.pages || this.pa...
else{ this.speech.requestPermission().then(()=>{ this.openSpeech(); }) } },(err)=>{ this.speech.requestPermission().then(()=>{ this.openSpeech(); }) }) } }, ...
this.openSpeech(); }
conditional_block
menus.component.ts
); // call get pages function this.getPages(); // get templates from storage storage.get('templates').then(templates => { if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE; this.templates = templates; }); events.watchOffline().subscribe(() => { if ((!this.pages || this.pa...
// reports() { // this.router.navigateByUrl('../reports/') // } // ********************************************************* // Voice search - No City found // ********************************************************* ifNoResFound(){ this.alertCtrl.create({ message:"<h6>Nincs találat.</h6>...
{ this.iab.create( url, '_system' ); }
identifier_body
menus.component.ts
('Loading dismissed!'); } // *********************************** // Show toast when user need to talk // ************************************ async presentToast() { // const toast = await this.toastController.create({ // message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>', // duration: 3000, //...
this.refresh(); }) ; },(err)=>{ //alert("refresh call 3"); this.refresh();
random_line_split
template_model.py
(parameters_values)), "You didn't specify all parameters' values." # Make sure we are dealing with arrays (list will be transformed) if not isinstance(differential_fluxes, u.Quantity): differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2) differential_fluxes = np.arra...
self.K.unit = y_unit self.scale.unit = 1 / x_unit
identifier_body
template_model.py
self._interpolation_degree = interpolation_degree self._spline_smoothing_factor = int(spline_smoothing_factor) def define_parameter_grid(self, parameter_name, grid): assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name grid_ = np.arr...
(self, method, name=None): if name is None: name = method.func_name setattr(self.__class__, name, method) class RectBivariateSplineWrapper(object): """ Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same syntax as the other interpolation methods ...
add_method
identifier_name
template_model.py
self._interpolation_degree = interpolation_degree self._spline_smoothing_factor = int(spline_smoothing_factor) def define_parameter_grid(self, parameter_name, grid): assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name grid_ = np.arr...
else: raise IOError("The file %s already exists! You cannot call two different " "template models with the same name" % filename_sanitized) # Open the HDF5 file and write objects with HDFStore(filename_sanitized) as store: # The...
try: os.remove(filename_sanitized) except: raise IOError("The file %s already exists and cannot be removed (maybe you do not have " "permissions to do so?). " % filename_sanitized)
conditional_block
template_model.py
self._interpolation_degree = interpolation_degree self._spline_smoothing_factor = int(spline_smoothing_factor) def define_parameter_grid(self, parameter_name, grid): assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name grid_ =...
function_definition['description'] = description function_definition['latex'] = 'n.a.' # Now build the parameters according to the content of the parameter grid parameters = collections.OrderedDict() parameters['K'] = Parameter('K', 1.0) parameters['scale'] = Paramete...
# Make the dictionary of parameters function_definition = collections.OrderedDict()
random_line_split
index.js
列表数据 $rootScope.isMore=true;//是否有后一页 $rootScope.isPrev=false;//是否有前一页 $rootScope.exit=function(){ $rootScope.userId=""; $rootScope.userName=""; } $rootScope.jump=function(url){ $location.path(url); } $rootScope.$watch("searchMsg.pageNum",function(){ //判断分页按钮状态 $rootScope.isPrev=$rootSc...
app.controller("mallProListCtrl",["$scope", "$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){ $rootScope.isPageShow=innerWidth>450?true:false; $rootScope.searchMsg={}; $rootScope.searchMsg.pclass=$routeParams.id; $rootScope.num=[]; $rootScope.proList=[]; $rootScope.loadMore...
}else{ alert("添加失败") } }) } }else{ //TODO 弹出提示框,提醒用户登录 alert("您还未登录,请登录后在使用此功能") } } }]); app.controller("mallLotteryCtrl",["$scope",function($scope){ }]); app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootSc...
conditional_block
index.js
",{ templateUrl:"tpl/mall_proList.html", controller:"mallProListCtrl" }) .when("/mall_proListbyteam/:id",{ templateUrl:"tpl/mall_proListbyteam.html", controller:"mallProListByTeamCtrl" }) .when("/mall_detail/:id",{ templateUrl:"tpl/mall_detail.html", controller:"mallD...
identifier_body
index.js
",{ templateUrl:"tpl/mall_proList.html", controller:"mallProListCtrl" }) .when("/mall_proListbyteam/:id",{ templateUrl:"tpl/mall_proListbyteam.html", controller:"mallProListByTeamCtrl" }) .when("/mall_detail/:id",{ templateUrl:"tpl/mall_detail.html", controller:"mallD...
identifier_name
index.js
<=obj.pageCount;i++){ $rootScope.num.push(i); $rootScope.isPageShow=true; } }else{ $rootScope.isPageShow=false; if($rootScope.len<8){ $rootScope.searchMsg.pageNum++; } } for (var i = 0; i < $rootScope.len; i++) { var img=obj.data[i]...
$scope.orderList[i].orderTime=$scope.changeTime(date); var status=$scope.orderList[i].status; $scope.orderList[i].status=$scope.judgeStatus(status); } })
random_line_split
checkData.py
tMarkerFromXML(xmlBody, markerStr): marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody) if marker and marker[0]: logging.info('get marker in response %s' %marker[0]) return marker[0] else: logging.info('get no marker in response') return No...
e.findall('<Key>(.+?)</Key>', xmlBody) versions = re.findall('<VersionId>(.+?)</VersionId>', xmlBody) for i in range(len(versions)): if versions[i] == 'null': versions[i]=None if len(versions)>0 and len(versions) != len(keys): logging.error('response error, versions != keys %s' %xmlBody) ...
identifier_body
checkData.py
, markerStr): marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody) if marker and marker[0]: logging.info('get marker in response %s' %marker[0]) return marker[0] else: logging.info('get no marker in response') return None ...
rFromXML(xmlBody
identifier_name
checkData.py
if not len(versions): versions = [None for i in range(len(keys))] return zip(keys,versions) def getMarkerFromXML(xmlBody, markerStr): marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody) if marker and marker[0]: logging.info('get marker in response %s' %marker[0]...
if len(versions)>0 and len(versions) != len(keys): logging.error('response error, versions != keys %s' %xmlBody) return []
random_line_split
checkData.py
HTTPConnection.connection.endheaders() localAddr = str(myHTTPConnection.connection.sock._sock.getsockname()) peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername()) logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \ ...
#记日志、重定向(<400:debug; >=400,<500: warn; >=500:error) if httpResponse.status < 400: logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \ %(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, sta...
son
conditional_block
create.go
return microerror.Mask(err) } newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get() if err != nil { return microerror.Mask(err) } newAccessor, err := meta.Accessor(newObj) if err != nil { return microerror.Mask(err) } patches, err := r.computeCreateEventP...
{ r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status") // We process the status updates within its own backoff here to gurantee its // execution independent of any eventual retries via the retry resource. It // might happen that the reconciled object is not the latest version so any // patch wo...
identifier_body
create.go
version so any // patch would fail. In case the patch fails we retry until we succeed. The // steps of the backoff operation are as follows. // // Fetch latest version of runtime object. // Compute patches for runtime object. // Apply computed list of patches. // // In case there are no patches we ...
else if err != nil { return microerror.Mask(err) } if len(patches) > 0 { err := r.applyPatches(ctx, newAccessor, patches) if err != nil { return microerror.Mask(err) } modified = true } return nil } b := r.backOffFactory() n := func(err error, d time.Duration) { r.logge...
{ r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available") r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource") return nil }
conditional_block
create.go
latest version so any // patch would fail. In case the patch fails we retry until we succeed. The // steps of the backoff operation are as follows. // // Fetch latest version of runtime object. // Compute patches for runtime object. // Apply computed list of patches. // // In case there are no patc...
} // Update the node status based on what the tenant cluster API tells us. // // TODO this is a workaround until we can read the node status information // from the NodeConfig CR status. This is not possible right now because the // NodeConfig CRs are still used for draining by older tenant clusters. { var k8...
random_line_split
create.go
(ctx context.Context, obj interface{}) error { r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status") // We process the status updates within its own backoff here to gurantee its // execution independent of any eventual retries via the retry resource. It // might happen that the reconciled object ...
EnsureCreated
identifier_name
kmeans_to_classifier_main.py
'first_tkod_tifl_count', 'history_trail_cnt', 'teacher_after_4d_lp_cnt', 'l3m_hw_correct_rate', # # 'teacher_fresh_hour', "effectiveCommunicationCount", "score_min", 'learning_target_lenght', "teacher_staff_age_byopt", 'self_evaluation_len...
random_line_split
kmeans_to_classifier_main.py
[labels] == 1]) v = len(df_btest[df_btest[labels] == 0]) / len(df_btest[df_btest[labels] == 1]) print(t,v) # 特征筛选 # from sklearn.feature_selection import RFECV # # dt_score = make_scorer(precision_score, pos_label=1) # rf = RandomForestClassifier(n_estimators=24, criterion='gini', max_depth...
=0.8, crossover_rate=0.2) x_train_x = np.array(x_train[x_train["chunk_label"] == i].drop(["chunk_label", labels], axis=1)) x_test_x = np.array(x_test[x_test["chunk_label"] == i].drop(["chunk_label", labels], ...
conditional_block
units.py
particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc). """ # From https://stackoverflow.com/a/10970888 _prefix_table = { 'y': 1e-24, # yocto 'z': 1e-21, # zepto 'a': 1e-18, # atto 'f': 1e-15, # femto 'p': 1e-12, # pico 'n': 1e-9, # nan...
return self.value < other.value def __le__(self: _TT, other: _TT) -> bool: """ Check if self is less than or equal to other. The types must match. """ if type(self) != type(other): raise TypeError("Types do not match") return self.value <= other....
raise TypeError("Types do not match")
conditional_block
units.py
particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc). """ # From https://stackoverflow.com/a/10970888 _prefix_table = { 'y': 1e-24, # yocto 'z': 1e-21, # zepto 'a': 1e-18, # atto 'f': 1e-15, # femto 'p': 1e-12, # pico 'n': 1e-9, # nan...
(self: _TT, other: float) -> _TT: """ Multiply
__mul__
identifier_name
units.py
some particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc). """ # From https://stackoverflow.com/a/10970888
'z': 1e-21, # zepto 'a': 1e-18, # atto 'f': 1e-15, # femto 'p': 1e-12, # pico 'n': 1e-9, # nano 'u': 1e-6, # micro 'm': 1e-3, # milli 'c': 1e-2, # centi 'd': 1e-1, # deci '': 1, # <no prefix> 'k': 1e3, # kilo '...
_prefix_table = { 'y': 1e-24, # yocto
random_line_split
units.py
particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc). """ # From https://stackoverflow.com/a/10970888 _prefix_table = { 'y': 1e-24, # yocto 'z': 1e-21, # zepto 'a': 1e-18, # atto 'f': 1e-15, # femto 'p': 1e-12, # pico 'n': 1e-9, # nan...
@property @abstractmethod def unit_type(self) -> str: """Get the base unit type for values. (e.g. for "s", this would be "time") Meant to be overridden by subclasses.""" @property @abstractmethod def default_prefix(self) -> str: """Get the default prefix for values. ...
"""Get the base unit for values (e.g. "s", "m", "V", etc). Meant to be overridden by subclasses."""
identifier_body
attribute_context.pb.go
type AttributeContext_Peer struct { // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. Address *core.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // The canonical service name of the peer. // It should be set to :ref:`the HTTP x-env...
random_line_split
attribute_context.pb.go
(m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) } func (*AttributeContext_Peer) ProtoMessage() {} func (*AttributeContext_Peer) Descriptor() ([]byte, []int) { return fileDescriptor_a6030c9468e3591b, []int{0, 0} } func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error { retur...
{ return m.Query }
conditional_block
attribute_context.pb.go
() *core.Metadata { if m != nil { return m.MetadataContext } return nil } // This message defines attributes for a node that handles a network request. // The node can be either a service or an application that sends, forwards, // or receives the request. Service peers should fill in the `service`, // `principal`...
GetMetadataContext
identifier_name
attribute_context.pb.go
(m *AttributeContext_Peer) Reset() { *m = AttributeContext_Peer{} } func (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) } func (*AttributeContext_Peer) ProtoMessage() {} func (*AttributeContext_Peer) Descriptor() ([]byte, []int) { return fileDescriptor_a6030c9468e3591b, []in...
func (m *AttributeContext_HttpRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_AttributeContext_HttpRequest.Merge(m, src) } func (m *AttributeContext_HttpRequest) XXX_Size() int { return xxx_messageInfo_AttributeContext_HttpRequest.Size(m) } func (m *AttributeContext_HttpRequest) XXX_DiscardUnknown() { xxx_m...
{ return xxx_messageInfo_AttributeContext_HttpRequest.Marshal(b, m, deterministic) }
identifier_body
1602.object_view.js
?1:0)+'" '+(cl?'iclass="'+className+'"':'')+'><td class="'+(is_levelup?'likealink ':'')+'ovColMain" style="padding-left:'+(15*level+3)+'px" '+(is_levelup?levelup:'')+'>'+label+'</td><td class="ovColumn">'+val+'</td></tr>'; // Рекурсивно вычисляем развернутых потомков if(expanded)r+=$.ov.objectToRows(ovid,newP...
var cs = $.ov.classes[className].collections;
random_line_split
1602.object_view.js
x == 'object' && typeof mem.editing != 'object') ) ){ // Edit var type = (mem.editing&&mem.editing.type)?mem.editing.type:mem.editing; switch(type){ case 'textarea': value = '<textarea path="'+newPathStr+'" rows="'+(mem.editing&&mem.editing.rows?mem.editing.rows:3)+'" onblur="$.ov...
',\''+mem.className+'\'':'')+')"'; var is_levelup = false; if(typeof x == 'object')for(var xxx in x){is_levelup = true; break;} var expanded = is_levelup && (cm_in_array(exp,newPathStr) || x.__ov_expanded || mem.defaultExpanded); r+='<tr level="'+level+'" expanded="'+(expanded?1:0)+'" '+(mem.classNa...
asses[mem.className].collection.value.apply(x); } } if(typeof mem == 'string'){ label = mem; }else if(typeof mem == 'object' && mem.label){ label = mem.label; } var levelup = 'class="likealink" onclick="$(this.parentNode).objectView(\''+newPathStr+'\''+(mem.className?
conditional_block
dq_ingestion.go
Name, key string, sharedQueue *utils.WorkerQueue) { // First see if there's another instance of the same model in the store modelName := tenant + "/" + gsName bkt := utils.Bkt(modelName, sharedQueue.NumWorkers) sharedQueue.Workqueue[bkt].AddRateLimited(modelName) gslbutils.Logf("key: %s, modelName: %s, msg: %s", k...
gsName := DeriveGSLBServiceName(metaObj.GetHostname()) modelName := utils.ADMIN_NS + "/" + gsName found, aviGS := agl.Get(modelName) if !found { gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model") aviGS = NewAviGSObjectGraph() // Note: For now, the hostname is used as a w...
{ var prevChecksum, newChecksum uint32 obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore) if obj == nil { // error message already logged in the above function return } metaObj := obj.(k8sobjects.MetaObject) if metaObj.GetHostname() == "" { gslbutils.Errf("key: %s, msg: %s", ...
identifier_body
dq_ingestion.go
store = gslbutils.GetAcceptedLBSvcStore() } else { store = gslbutils.GetRejectedLBSvcStore() } if store == nil { gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc") return nil } break } obj, ok := store.GetClusterNSObjectByName(cname, ns, objName) if !ok { gslb...
gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process") return
random_line_split
dq_ingestion.go
utils.GetAcceptedLBSvcStore() } else { store = gslbutils.GetRejectedLBSvcStore() } if store == nil { gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc") return nil } break } obj, ok := store.GetClusterNSObjectByName(cname, ns, objName) if !ok { gslbutils.Warnf("...
{ gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process") return }
conditional_block
dq_ingestion.go
Name, key string, sharedQueue *utils.WorkerQueue) { // First see if there's another instance of the same model in the store modelName := tenant + "/" + gsName bkt := utils.Bkt(modelName, sharedQueue.NumWorkers) sharedQueue.Workqueue[bkt].AddRateLimited(modelName) gslbutils.Logf("key: %s, modelName: %s, msg: %s", k...
(objType, cname, ns, objName, key, storeType string) interface{} { var store *gslbutils.ClusterStore switch objType { case gslbutils.RouteType: if storeType == gslbutils.AcceptedStore { store = gslbutils.GetAcceptedRouteStore() } else { store = gslbutils.GetRejectedRouteStore() } if store == nil { /...
getObjFromStore
identifier_name
app.py
'__getitem__'): return dict(o) raise None app = Flask(__name__) app.json_encoder = JSONEncoder app.config.from_object(config) app.config['SECRET_KEY'] = 'I have a dream' address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\' app.config['UPLOADED_PHOTOS_DEST'] = address app.config['MAX_CO...
folder_name = address + task.folder_name filename = folder_name + "\\data.xlsx" arr = [] ex = xlrd.open_workbook(filename).sheets()[0] for i in range(ex.nrows): col = ex.row_values(i) for index, n in enumerate(col): if isinstance(n, str): col[index] = 0 ...
random_line_split
app.py
'__getitem__'): return dict(o) raise None app = Flask(__name__) app.json_encoder = JSONEncoder app.config.from_object(config) app.config['SECRET_KEY'] = 'I have a dream' address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\' app.config['UPLOADED_PHOTOS_DEST'] = address app.config['MAX_CO...
thods=['GET', 'POST']) def page_list(): user_id = request.headers.get('Authorization',None) task = Task.query.filter_by(user_id=user_id, status=2).first() folder_name = address + task.folder_name if not os.path.exists(folder_name): return jsonify(0) files_list = os.listdir(folder_name) ...
080, debug=True) # app.run(debug=True) @app.route('/page_list', me
conditional_block
app.py
'__getitem__'): return dict(o) raise None app = Flask(__name__) app.json_encoder = JSONEncoder app.config.from_object(config) app.config['SECRET_KEY'] = 'I have a dream' address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\' app.config['UPLOADED_PHOTOS_DEST'] = address app.config['MAX_CO...
thods=['GET', 'POST']) def get_excel(row, line): user_id = request.headers.get('Authorization', None) task = Task.query.filter_by(user_id=user_id, status=2).first() folder_name = address + task.folder_name row = int(row) - 1 line = int(line) - 1 x1 = xlrd.open_workbook(folder_name + '\\data.xlsx...
for cur_line_number, line in enumerate(open(the_file_path, 'rU')): if cur_line_number == line_number-1: return line return '' @app.route('/getValue/<row>/<line>', me
identifier_body
app.py
GET', 'POST']) def index(): return render_template('index.html') @app.route('/index', methods=['GET', 'POST']) def upload_file(): folder_name = request.form.get('folderName') # form = UploadForm() folder = address + folder_name tasks = Task.query.filter_by(folder_name=folder_name).all() if len...
True
identifier_name
mongodb-scraper.py
ed = [] self.table_names = ['account', 'user', 'subscriber', 'customer'] self.column_names = ['pass', 'pwd'] self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}') self.filename = 'combo.txt' # Init the logger self.logger = logging.getLogger('mongodb-...
# Ok, but are they really set? if not all([email_from, email_to, host, port, user, password]): return # Ok, we're good to go body = """ Hi Dude! I have just found a juicy collection! IP: {0} Collection: {1} Rows: {2} """ body = body.format(ip, collection, count) ...
try: threshold = self.settings['email']['threshold'] except KeyError: # No key set return # Result is not interesting enough if count < threshold: return # Do I have all the required strings? try: email_from = self.set...
identifier_body
mongodb-scraper.py
= [] self.table_names = ['account', 'user', 'subscriber', 'customer'] self.column_names = ['pass', 'pwd'] self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}') self.filename = 'combo.txt' # Init the logger self.logger = logging.getLogger('mongodb-sc...
(self): for ip in self.ips: # Do I have already processed this IP? if ip in self.processed: continue self.logger.info("Connecting to " + ip) try: client = MongoClient(ip, connectTimeoutMS=5000) dbs = client.databas...
scrape
identifier_name
mongodb-scraper.py
ed = [] self.table_names = ['account', 'user', 'subscriber', 'customer'] self.column_names = ['pass', 'pwd'] self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}') self.filename = 'combo.txt' # Init the logger self.logger = logging.getLogger('mongodb-...
# Load previous data self._load_data() # Let's parse some CLI options parser = argparse.ArgumentParser() parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped') arguments = parser.parse_args() if arguments.skip...
if not os.path.exists('data'): os.makedirs('data')
random_line_split
mongodb-scraper.py
ed = [] self.table_names = ['account', 'user', 'subscriber', 'customer'] self.column_names = ['pass', 'pwd'] self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}') self.filename = 'combo.txt' # Init the logger self.logger = logging.getLogger('mongodb-...
self.logger.debug("\t\tAnalyzing db: " + db) o_db = client[db] try: collections = o_db.collection_names() except (KeyboardInterrupt, SystemExit): return except Exception: # Don'...
if ip in self.processed: continue self.logger.info("Connecting to " + ip) try: client = MongoClient(ip, connectTimeoutMS=5000) dbs = client.database_names() except (KeyboardInterrupt, SystemExit): return ex...
conditional_block
lib.rs
} } pub type Pid = usize; #[derive(Clone, Copy, Debug)] pub struct IntRegisters(pub syscall::IntRegisters); impl IntRegisters { pub fn format_syscall_bare(&self) -> String { arch::format_syscall(None, &self) } pub fn format_syscall_full(&self, mem: &mut Memory) -> String { arch::forma...
/// breakpoint event, it returns an event handler that lets you /// handle events yourself. pub fn next_event(&mut self, flags: Flags) -> Result<EventHandler> { trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?); Ok(EventHandler { inner: self }) } /// Convert this tracer to...
random_line_split
lib.rs
} } pub type Pid = usize; #[derive(Clone, Copy, Debug)] pub struct IntRegisters(pub syscall::IntRegisters); impl IntRegisters { pub fn format_syscall_bare(&self) -> String { arch::format_syscall(None, &self) } pub fn format_syscall_full(&self, mem: &mut Memory) -> String { arch::format_s...
{ pub file: File, pub regs: Registers, pub mem: Memory, } impl Tracer { /// Attach to a tracer with the specified PID. This will stop it. pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { file: OpenOptions::new() .read(true) .write(true) ...
Tracer
identifier_name
ipymel.py
# and DAG_COMPLETER_RE, since those are simply more restrictive versions, # which set "dagOnly" # print "text_until_cursor: {}".format(event.text_until_cursor) # print "symbol: {}".format(event.symbol) linematch = NAME_COMPLETER_RE.match(event.text_until_cursor) # print "linematch: {}".format(li...
num = len(roots) - 1 tree = '' for i, x in enumerate(roots): for line in self.do_level(x, 0, [i == num]): tree += line return tree
identifier_body
ipymel.py
): for line in self.do_level(x, depth, isLast + [i == num]): yield line def make_tree(self, roots): num = len(roots) - 1 tree = '' for i, x in enumerate(roots): for line in self.do_level(x, 0, [i == num]): tree += line ...
random_line_split
ipymel.py
() try: sel.add(path) except RuntimeError: return [] if not sel.length(): return [] try: dagPath = sel.getDagPath(0) except TypeError: return [] return [om.MFnDagNode(dagPath.child(i)).fullPathName() for i in range(dagPath.childCount())] def ...
# if we have only one match, get the children as well if len(matches) == 1 and not attr_match: res = get_children(matches[0] + '|', dagOnly) matches += res if event.symbol != nametext: # in some situations, the event.symbol will only have incomplete # information - ie, if ...
raise TryNext
conditional_block
ipymel.py
yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n' if not self.options.maxdepth or depth < self.options.maxdepth: for i, x in enumerate(children): for line in self.do_level(x, depth, isLast + [i == num]): yield line def make_tree(self, roots):...
sigint_plugin_loaded_callback
identifier_name
Server.py
(self.groups) def isEmpty(self): """ Whether there is still group waiting :return: True/False """ if len(self.groups) > 0: return False else: return True def add_queue(self, group): """ Add the newly come group into queue...
>>> q2.groups[1].get_groupID() # Test whether vip would become the first 0 >>> g2=Group(20,2,False,2) >>> q2.add_queue(g2) >>> g3=Group(30,1,True,3) >>> q2.add_queue(g3) >>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly 2 >>...
>>> q2.add_queue(g0) >>> len(q2.groups) # Test whether group is correctly added 1 >>> g1=Group(14,1,True,1) >>> q2.add_queue(g1)
random_line_split
Server.py
.groups) def isEmpty(self): """ Whether there is still group waiting :return: True/False """ if len(self.groups) > 0: return False else: return True def add_queue(self, group): """ Add the newly come group into queue prop...
elif len(self.groups) <= 1: self.groups.insert(0, group) elif group.get_vip() is False: self.groups.insert(0, group) def del_queue(self): # delete last=delete first come group """ Pop the head (index = length of queue -1 ) of queue :return: ...
self.groups.insert(1, group)
conditional_block
Server.py
(self.groups) def isEmpty(self): """ Whether there is still group waiting :return: True/False """ if len(self.groups) > 0: return False else: return True def add_queue(self, group): """ Add the newly come group into queue...
def get_vip(self): return self.vip def get_time_request(self): return self.timeRequest def tablesSetting(number_tables_2, number_tables_4, number_tables_6): """ Initialize tables :param number_tables_2: number of tables for groups with one or two customers. (6) :param number...
""" Calculate the waiting time for the group :param current_time: current time point :return: waiting time for current group >>> g0=Group(20,2,False,0) >>> g0.wait_time(71) 51 """ return current_time - self.timestamp
identifier_body
Server.py
.groups) def isEmpty(self): """ Whether there is still group waiting :return: True/False """ if len(self.groups) > 0: return False else: return True def
(self, group): """ Add the newly come group into queue properly :param group: the group watiing for entering into the queue >>> g0=Group(12,2,False,0) >>> q2=Queue() >>> q2.add_queue(g0) >>> len(q2.groups) # Test whether group is correctly added 1 ...
add_queue
identifier_name