idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
238,300 | def parse ( self , xmp ) : tree = etree . fromstring ( xmp ) rdf_tree = tree . find ( RDF_NS + 'RDF' ) meta = defaultdict ( dict ) for desc in rdf_tree . findall ( RDF_NS + 'Description' ) : for el in desc . getchildren ( ) : ns , tag = self . _parse_tag ( el ) value = self . _parse_value ( el ) meta [ ns ] [ tag ] = value return dict ( meta ) | Run parser and return a dictionary of all the parsed metadata . | 115 | 12 |
238,301 | def _parse_tag ( self , el ) : ns = None tag = el . tag if tag [ 0 ] == '{' : ns , tag = tag [ 1 : ] . split ( '}' , 1 ) if self . ns_map and ns in self . ns_map : ns = self . ns_map [ ns ] return ns , tag | Extract the namespace and tag from an element . | 76 | 10 |
238,302 | def _parse_value ( self , el ) : if el . find ( RDF_NS + 'Bag' ) is not None : value = [ ] for li in el . findall ( RDF_NS + 'Bag/' + RDF_NS + 'li' ) : value . append ( li . text ) elif el . find ( RDF_NS + 'Seq' ) is not None : value = [ ] for li in el . findall ( RDF_NS + 'Seq/' + RDF_NS + 'li' ) : value . append ( li . text ) elif el . find ( RDF_NS + 'Alt' ) is not None : value = { } for li in el . findall ( RDF_NS + 'Alt/' + RDF_NS + 'li' ) : value [ li . get ( XML_NS + 'lang' ) ] = li . text else : value = el . text return value | Extract the metadata value from an element . | 211 | 9 |
238,303 | def parse_rsc_html ( htmlstring ) : converted = UnicodeDammit ( htmlstring ) if not converted . unicode_markup : raise UnicodeDecodeError ( 'Failed to detect encoding, tried [%s]' ) root = fromstring ( htmlstring , parser = HTMLParser ( recover = True , encoding = converted . original_encoding ) ) # Add p.otherpara tags around orphan text newp = None for child in root . get_element_by_id ( 'wrapper' ) : if newp is not None : if child . tag in BLOCK_ELEMENTS or child . get ( 'id' , '' ) . startswith ( 'sect' ) or child . getnext ( ) is None : child . addprevious ( newp ) newp = None else : newp . append ( child ) if newp is None and child . tag in BLOCK_ELEMENTS and child . tail and child . tail . strip ( ) : newp = Element ( 'p' , * * { 'class' : 'otherpara' } ) newp . text = child . tail child . tail = '' return root | Messy RSC HTML needs this special parser to fix problems before creating selector . | 247 | 16 |
238,304 | def replace_rsc_img_chars ( document ) : image_re = re . compile ( 'http://www.rsc.org/images/entities/(?:h[23]+_)?(?:[ib]+_)?char_([0-9a-f]{4})(?:_([0-9a-f]{4}))?\.gif' ) for img in document . xpath ( './/img[starts-with(@src, "http://www.rsc.org/images/entities/")]' ) : m = image_re . match ( img . get ( 'src' ) ) if m : u1 , u2 = m . group ( 1 ) , m . group ( 2 ) if not u2 and u1 in RSC_IMG_CHARS : rep = RSC_IMG_CHARS [ u1 ] else : rep = ( '\\u%s' % u1 ) . encode ( 'ascii' ) . decode ( 'unicode-escape' ) if u2 : rep += ( '\\u%s' % u2 ) . encode ( 'ascii' ) . decode ( 'unicode-escape' ) if img . tail is not None : rep += img . tail # Make sure we don't remove any tail text parent = img . getparent ( ) if parent is not None : previous = img . getprevious ( ) if previous is not None : previous . tail = ( previous . tail or '' ) + rep else : parent . text = ( parent . text or '' ) + rep parent . remove ( img ) return document | Replace image characters with unicode equivalents . | 351 | 9 |
238,305 | def space_references ( document ) : for ref in document . xpath ( './/a/sup/span[@class="sup_ref"]' ) : a = ref . getparent ( ) . getparent ( ) if a is not None : atail = a . tail or '' if not atail . startswith ( ')' ) and not atail . startswith ( ',' ) and not atail . startswith ( ' ' ) : a . tail = ' ' + atail return document | Ensure a space around reference links so there s a gap when they are removed . | 110 | 17 |
238,306 | def load ( ctx , input , output ) : log . debug ( 'chemdataextractor.cluster.load' ) import pickle click . echo ( 'Reading %s' % input . name ) clusters = { } for line in input . readlines ( ) : cluster , word , freq = line . split ( ) clusters [ word ] = cluster pickle . dump ( clusters , output , protocol = pickle . HIGHEST_PROTOCOL ) | Read clusters from file and save to model file . | 98 | 10 |
238,307 | def space_labels ( document ) : for label in document . xpath ( './/bold' ) : # TODO: Make this more permissive to match chemical_label in parser if not label . text or not re . match ( '^\(L?\d\d?[a-z]?\):?$' , label . text , re . I ) : continue parent = label . getparent ( ) previous = label . getprevious ( ) if previous is None : text = parent . text or '' if not text . endswith ( ' ' ) : parent . text = text + ' ' else : text = previous . tail or '' if not text . endswith ( ' ' ) : previous . tail = text + ' ' text = label . tail or '' if not text . endswith ( ' ' ) : label . tail = text + ' ' return document | Ensure space around bold compound labels . | 190 | 8 |
238,308 | def tidy_nlm_references ( document ) : def strip_preceding ( text ) : stext = text . rstrip ( ) if stext . endswith ( '[' ) or stext . endswith ( '(' ) : #log.debug('%s -> %s' % (text, stext[:-1])) return stext [ : - 1 ] return text def strip_between ( text ) : stext = text . strip ( ) if stext in { ',' , '-' , '\u2013' , '\u2212' } : #log.debug('%s -> %s' % (text, '')) return '' return text def strip_following ( text ) : stext = text . lstrip ( ) if stext . startswith ( ']' ) or stext . startswith ( ')' ) : #log.debug('%s -> %s' % (text, stext[1:])) return stext [ 1 : ] return text for ref in document . xpath ( './/xref[@ref-type="bibr"]' ) : parent = ref . getparent ( ) previous = ref . getprevious ( ) next = ref . getnext ( ) if previous is None : parent . text = strip_preceding ( parent . text or '' ) else : previous . tail = strip_preceding ( previous . tail or '' ) if next is not None and next . tag == 'xref' and next . get ( 'ref-type' ) == 'bibr' : ref . tail = strip_between ( ref . tail or '' ) ref . tail = strip_following ( ref . tail or '' ) return document | Remove punctuation around references like brackets commas hyphens . | 369 | 12 |
238,309 | def regex_span_tokenize ( s , regex ) : left = 0 for m in re . finditer ( regex , s , re . U ) : right , next = m . span ( ) if right != 0 : yield left , right left = next yield left , len ( s ) | Return spans that identify tokens in s split using regex . | 61 | 11 |
238,310 | def tokenize ( self , s ) : return [ s [ start : end ] for start , end in self . span_tokenize ( s ) ] | Return a list of token strings from the given sentence . | 32 | 11 |
238,311 | def span_tokenize ( self , s ) : if self . _tokenizer is None : self . _tokenizer = load_model ( self . model ) # for debug in tokenizer.debug_decisions(s): # log.debug(format_debug_decision(debug)) return self . _tokenizer . span_tokenize ( s ) | Return a list of integer offsets that identify sentences in the given text . | 76 | 14 |
238,312 | def _split_span ( self , span , index , length = 0 ) : offset = span [ 1 ] + index if index < 0 else span [ 0 ] + index # log.debug([(span[0], offset), (offset, offset + length), (offset + length, span[1])]) return [ ( span [ 0 ] , offset ) , ( offset , offset + length ) , ( offset + length , span [ 1 ] ) ] | Split a span into two or three separate spans at certain indices . | 95 | 13 |
238,313 | def _closing_bracket_index ( self , text , bpair = ( '(' , ')' ) ) : level = 1 for i , char in enumerate ( text [ 1 : ] ) : if char == bpair [ 0 ] : level += 1 elif char == bpair [ 1 ] : level -= 1 if level == 0 : return i + 1 | Return the index of the closing bracket that matches the opening bracket at the start of the text . | 78 | 19 |
238,314 | def _opening_bracket_index ( self , text , bpair = ( '(' , ')' ) ) : level = 1 for i , char in enumerate ( reversed ( text [ : - 1 ] ) ) : if char == bpair [ 1 ] : level += 1 elif char == bpair [ 0 ] : level -= 1 if level == 0 : return len ( text ) - i - 2 | Return the index of the opening bracket that matches the closing bracket at the end of the text . | 86 | 19 |
238,315 | def _is_saccharide_arrow ( self , before , after ) : if ( before and after and before [ - 1 ] . isdigit ( ) and after [ 0 ] . isdigit ( ) and before . rstrip ( '0123456789' ) . endswith ( '(' ) and after . lstrip ( '0123456789' ) . startswith ( ')-' ) ) : return True else : return False | Return True if the arrow is in a chemical name . | 96 | 11 |
238,316 | def get_names ( cs ) : records = [ ] for c in cs : records . extend ( c . get ( 'names' , [ ] ) ) return records | Return list of every name . | 35 | 6 |
238,317 | def get_labels ( cs ) : records = [ ] for c in cs : records . extend ( c . get ( 'labels' , [ ] ) ) return records | Return list of every label . | 37 | 6 |
238,318 | def get_ids ( cs ) : records = [ ] for c in cs : records . append ( { k : c [ k ] for k in c if k in { 'names' , 'labels' } } ) return records | Return chemical identifier records . | 49 | 5 |
238,319 | def memoized_property ( fget ) : attr_name = '_{}' . format ( fget . __name__ ) @ functools . wraps ( fget ) def fget_memoized ( self ) : if not hasattr ( self , attr_name ) : setattr ( self , attr_name , fget ( self ) ) return getattr ( self , attr_name ) return property ( fget_memoized ) | Decorator to create memoized properties . | 100 | 9 |
238,320 | def memoize ( obj ) : cache = obj . cache = { } @ functools . wraps ( obj ) def memoizer ( * args , * * kwargs ) : if args not in cache : cache [ args ] = obj ( * args , * * kwargs ) return cache [ args ] return memoizer | Decorator to create memoized functions methods or classes . | 68 | 12 |
238,321 | def evaluate ( self , gold ) : tagged_sents = self . tag_sents ( [ w for ( w , t ) in sent ] for sent in gold ) gold_tokens = sum ( gold , [ ] ) test_tokens = sum ( tagged_sents , [ ] ) accuracy = float ( sum ( x == y for x , y in six . moves . zip ( gold_tokens , test_tokens ) ) ) / len ( test_tokens ) return accuracy | Evaluate the accuracy of this tagger using a gold standard corpus . | 110 | 15 |
238,322 | def predict ( self , features ) : scores = defaultdict ( float ) for feat in features : if feat not in self . weights : continue weights = self . weights [ feat ] for label , weight in weights . items ( ) : scores [ label ] += weight # Do a secondary alphabetic sort, for stability return max ( self . classes , key = lambda label : ( scores [ label ] , label ) ) | Dot - product the features and current weights and return the best label . | 86 | 15 |
238,323 | def update ( self , truth , guess , features ) : def upd_feat ( c , f , w , v ) : param = ( f , c ) self . _totals [ param ] += ( self . i - self . _tstamps [ param ] ) * w self . _tstamps [ param ] = self . i self . weights [ f ] [ c ] = w + v self . i += 1 if truth == guess : return None for f in features : weights = self . weights . setdefault ( f , { } ) upd_feat ( truth , f , weights . get ( truth , 0.0 ) , 1.0 ) upd_feat ( guess , f , weights . get ( guess , 0.0 ) , - 1.0 ) return None | Update the feature weights . | 166 | 5 |
238,324 | def average_weights ( self ) : for feat , weights in self . weights . items ( ) : new_feat_weights = { } for clas , weight in weights . items ( ) : param = ( feat , clas ) total = self . _totals [ param ] total += ( self . i - self . _tstamps [ param ] ) * weight averaged = round ( total / float ( self . i ) , 3 ) if averaged : new_feat_weights [ clas ] = averaged self . weights [ feat ] = new_feat_weights return None | Average weights from all iterations . | 122 | 6 |
238,325 | def save ( self , path ) : with io . open ( path , 'wb' ) as fout : return pickle . dump ( dict ( self . weights ) , fout ) | Save the pickled model weights . | 39 | 7 |
238,326 | def load ( self , path ) : with io . open ( path , 'rb' ) as fin : self . weights = pickle . load ( fin ) | Load the pickled model weights . | 33 | 7 |
238,327 | def train ( self , sentences , nr_iter = 5 ) : self . _make_tagdict ( sentences ) self . perceptron . classes = self . classes for iter_ in range ( nr_iter ) : c = 0 n = 0 for sentence in sentences : prev , prev2 = self . START context = [ t [ 0 ] for t in sentence ] for i , ( token , tag ) in enumerate ( sentence ) : guess = self . tagdict . get ( token ) if not guess : feats = self . _get_features ( i , context , prev , prev2 ) guess = self . perceptron . predict ( feats ) self . perceptron . update ( tag , guess , feats ) prev2 = prev prev = guess c += guess == tag n += 1 random . shuffle ( sentences ) log . debug ( 'Iter %s: %s/%s=%s' % ( iter_ , c , n , ( float ( c ) / n ) * 100 ) ) self . perceptron . average_weights ( ) | Train a model from sentences . | 220 | 6 |
238,328 | def save ( self , f ) : return pickle . dump ( ( self . perceptron . weights , self . tagdict , self . classes , self . clusters ) , f , protocol = pickle . HIGHEST_PROTOCOL ) | Save pickled model to file . | 51 | 7 |
238,329 | def load ( self , model ) : self . perceptron . weights , self . tagdict , self . classes , self . clusters = load_model ( model ) self . perceptron . classes = self . classes | Load pickled model . | 44 | 5 |
238,330 | def train ( self , sentences , model ) : trainer = pycrfsuite . Trainer ( verbose = True ) trainer . set_params ( self . params ) for sentence in sentences : tokens , labels = zip ( * sentence ) features = [ self . _get_features ( tokens , i ) for i in range ( len ( tokens ) ) ] trainer . append ( features , labels ) trainer . train ( model ) self . load ( model ) | Train the CRF tagger using CRFSuite . | 94 | 12 |
238,331 | def load ( self , model ) : self . _dawg . load ( find_data ( model ) ) self . _loaded_model = True | Load pickled DAWG from disk . | 32 | 9 |
238,332 | def build ( self , words ) : words = [ self . _normalize ( tokens ) for tokens in words ] self . _dawg = dawg . CompletionDAWG ( words ) self . _loaded_model = True | Construct dictionary DAWG from tokenized words . | 51 | 10 |
238,333 | def _normalize ( self , tokens ) : if self . case_sensitive : return ' ' . join ( self . lexicon [ t ] . normalized for t in tokens ) else : return ' ' . join ( self . lexicon [ t ] . lower for t in tokens ) | Normalization transform to apply to both dictionary words and input tokens . | 59 | 13 |
238,334 | def standardize_role ( role ) : role = role . lower ( ) if any ( c in role for c in { 'synthesis' , 'give' , 'yield' , 'afford' , 'product' , 'preparation of' } ) : return 'product' return role | Convert role text into standardized form . | 65 | 8 |
238,335 | def list ( ctx ) : log . debug ( 'chemdataextractor.data.list' ) click . echo ( 'Downloaded\tPackage' ) for package in PACKAGES : click . echo ( '%s\t%s' % ( package . local_exists ( ) , package . path ) ) | List active data packages . | 68 | 5 |
238,336 | def download ( ctx ) : log . debug ( 'chemdataextractor.data.download' ) count = 0 for package in PACKAGES : success = package . download ( ) if success : count += 1 click . echo ( 'Successfully downloaded %s new data packages (%s existing)' % ( count , len ( PACKAGES ) - count ) ) | Download data . | 74 | 3 |
238,337 | def find_data ( path , warn = True ) : full_path = os . path . join ( get_data_dir ( ) , path ) if warn and not os . path . isfile ( full_path ) : for package in PACKAGES : if path == package . path : log . warn ( '%s doesn\'t exist. Run `cde data download` to get it.' % path ) break return full_path | Return the absolute path to a data file within the data directory . | 92 | 13 |
238,338 | def load_model ( path ) : abspath = find_data ( path ) cached = _model_cache . get ( abspath ) if cached is not None : log . debug ( 'Using cached copy of %s' % path ) return cached log . debug ( 'Loading model %s' % path ) try : with io . open ( abspath , 'rb' ) as f : model = six . moves . cPickle . load ( f ) except IOError : raise ModelNotFoundError ( 'Could not load %s. Have you run `cde data download`?' % path ) _model_cache [ abspath ] = model return model | Load a model from a pickle file in the data directory . Cached so model is only loaded once . | 138 | 22 |
238,339 | def normalize ( self , text ) : text = super ( ChemNormalizer , self ) . normalize ( text ) # Normalize element spelling if self . chem_spell : text = re . sub ( r'sulph' , r'sulf' , text , flags = re . I ) text = re . sub ( r'aluminum' , r'aluminium' , text , flags = re . I ) text = re . sub ( r'cesium' , r'caesium' , text , flags = re . I ) return text | Normalize unicode hyphens whitespace and some chemistry terms and formatting . | 117 | 15 |
238,340 | def add ( self , text ) : # logging.debug('Adding to lexicon: %s' % text) if text not in self . lexemes : normalized = self . normalized ( text ) self . lexemes [ text ] = Lexeme ( text = text , normalized = normalized , lower = self . lower ( normalized ) , first = self . first ( normalized ) , suffix = self . suffix ( normalized ) , shape = self . shape ( normalized ) , length = self . length ( normalized ) , upper_count = self . upper_count ( normalized ) , lower_count = self . lower_count ( normalized ) , digit_count = self . digit_count ( normalized ) , is_alpha = self . is_alpha ( normalized ) , is_ascii = self . is_ascii ( normalized ) , is_digit = self . is_digit ( normalized ) , is_lower = self . is_lower ( normalized ) , is_upper = self . is_upper ( normalized ) , is_title = self . is_title ( normalized ) , is_punct = self . is_punct ( normalized ) , is_hyphenated = self . is_hyphenated ( normalized ) , like_url = self . like_url ( normalized ) , like_number = self . like_number ( normalized ) , cluster = self . cluster ( normalized ) ) | Add text to the lexicon . | 295 | 7 |
238,341 | def serialize ( self ) : data = { 'type' : self . __class__ . __name__ , 'caption' : self . caption . serialize ( ) , 'headings' : [ [ cell . serialize ( ) for cell in hrow ] for hrow in self . headings ] , 'rows' : [ [ cell . serialize ( ) for cell in row ] for row in self . rows ] , } return data | Convert Table element to python dictionary . | 95 | 8 |
238,342 | def merge ( self , other ) : log . debug ( 'Merging: %s and %s' % ( self . serialize ( ) , other . serialize ( ) ) ) for k in self . keys ( ) : for new_item in other [ k ] : if new_item not in self [ k ] : self [ k ] . append ( new_item ) log . debug ( 'Result: %s' % self . serialize ( ) ) return self | Merge data from another Compound into this Compound . | 100 | 12 |
238,343 | def merge_contextual ( self , other ) : # TODO: This is currently dependent on our data model? Make more robust to schema changes # Currently we assume all lists at Compound level, with 1 further potential nested level of lists for k in self . keys ( ) : # print('key: %s' % k) for item in self [ k ] : # print('item: %s' % item) for other_item in other . get ( k , [ ] ) : # Skip text properties (don't merge names, labels, roles) if isinstance ( other_item , six . text_type ) : continue for otherk in other_item . keys ( ) : if isinstance ( other_item [ otherk ] , list ) : if len ( other_item [ otherk ] ) > 0 and len ( item [ otherk ] ) > 0 : other_nested_item = other_item [ otherk ] [ 0 ] for othernestedk in other_nested_item . keys ( ) : for nested_item in item [ otherk ] : if not nested_item [ othernestedk ] : nested_item [ othernestedk ] = other_nested_item [ othernestedk ] elif not item [ otherk ] : item [ otherk ] = other_item [ otherk ] log . debug ( 'Result: %s' % self . serialize ( ) ) return self | Merge in contextual info from a template Compound . | 306 | 11 |
238,344 | def is_id_only ( self ) : for key , value in self . items ( ) : if key not in { 'names' , 'labels' , 'roles' } and value : return False if self . names or self . labels : return True return False | Return True if identifier information only . | 58 | 7 |
238,345 | def join ( tokens , start , result ) : texts = [ ] if len ( result ) > 0 : for e in result : for child in e . iter ( ) : if child . text is not None : texts . append ( child . text ) return [ E ( result [ 0 ] . tag , ' ' . join ( texts ) ) ] | Join tokens into a single string with spaces between . | 72 | 10 |
238,346 | def strip_stop ( tokens , start , result ) : for e in result : for child in e . iter ( ) : if child . text . endswith ( '.' ) : child . text = child . text [ : - 1 ] return result | Remove trailing full stop from tokens . | 53 | 7 |
238,347 | def fix_whitespace ( tokens , start , result ) : for e in result : for child in e . iter ( ) : child . text = child . text . replace ( ' , ' , ', ' ) for hyphen in HYPHENS : child . text = child . text . replace ( ' %s ' % hyphen , '%s' % hyphen ) child . text = re . sub ( r'- (.) -' , r'-\1-' , child . text ) return result | Fix whitespace around hyphens and commas . Can be used to remove whitespace tokenization artefacts . | 106 | 22 |
238,348 | def detect ( self , fstring , fname = None ) : if fname is not None and '.' in fname : extension = fname . rsplit ( '.' , 1 ) [ 1 ] if extension in { 'pdf' , 'html' , 'xml' } : return False return True | Have a stab at most files . | 64 | 7 |
238,349 | def _process_layout ( self , layout ) : # Here we just group text into paragraphs elements = [ ] for lt_obj in layout : if isinstance ( lt_obj , LTTextBox ) or isinstance ( lt_obj , LTTextLine ) : elements . append ( Paragraph ( lt_obj . get_text ( ) . strip ( ) ) ) elif isinstance ( lt_obj , LTFigure ) : # Recursive... elements . extend ( self . _process_layout ( lt_obj ) ) return elements | Process an LTPage layout and return a list of elements . | 119 | 13 |
238,350 | def get_encoding ( input_string , guesses = None , is_html = False ) : converted = UnicodeDammit ( input_string , override_encodings = [ guesses ] if guesses else [ ] , is_html = is_html ) return converted . original_encoding | Return the encoding of a byte string . Uses bs4 UnicodeDammit . | 61 | 16 |
238,351 | def levenshtein ( s1 , s2 , allow_substring = False ) : len1 , len2 = len ( s1 ) , len ( s2 ) lev = [ ] for i in range ( len1 + 1 ) : lev . append ( [ 0 ] * ( len2 + 1 ) ) for i in range ( len1 + 1 ) : lev [ i ] [ 0 ] = i for j in range ( len2 + 1 ) : lev [ 0 ] [ j ] = 0 if allow_substring else j for i in range ( len1 ) : for j in range ( len2 ) : lev [ i + 1 ] [ j + 1 ] = min ( lev [ i ] [ j + 1 ] + 1 , lev [ i + 1 ] [ j ] + 1 , lev [ i ] [ j ] + ( s1 [ i ] != s2 [ j ] ) ) return min ( lev [ len1 ] ) if allow_substring else lev [ len1 ] [ len2 ] | Return the Levenshtein distance between two strings . | 216 | 11 |
238,352 | def bracket_level ( text , open = { '(' , '[' , '{' } , close = { ')' , ']' , '}' } ) : level = 0 for c in text : if c in open : level += 1 elif c in close : level -= 1 return level | Return 0 if string contains balanced brackets or no brackets . | 63 | 11 |
238,353 | def list ( ctx ) : log . debug ( 'chemdataextractor.config.list' ) for k in config : click . echo ( '%s : %s' % ( k , config [ k ] ) ) | List all config values . | 48 | 5 |
238,354 | def train_crf ( ctx , input , output , clusters ) : click . echo ( 'chemdataextractor.crf.train' ) sentences = [ ] for line in input : sentence = [ ] for t in line . split ( ) : token , tag , iob = t . rsplit ( '/' , 2 ) sentence . append ( ( ( token , tag ) , iob ) ) if sentence : sentences . append ( sentence ) tagger = CrfCemTagger ( clusters = clusters ) tagger . train ( sentences , output ) | Train CRF CEM recognizer . | 118 | 8 |
238,355 | def sentences ( self ) : sents = [ ] spans = self . sentence_tokenizer . span_tokenize ( self . text ) for span in spans : sent = Sentence ( text = self . text [ span [ 0 ] : span [ 1 ] ] , start = span [ 0 ] , end = span [ 1 ] , word_tokenizer = self . word_tokenizer , lexicon = self . lexicon , abbreviation_detector = self . abbreviation_detector , pos_tagger = self . pos_tagger , ner_tagger = self . ner_tagger , parsers = self . parsers , document = self . document ) sents . append ( sent ) return sents | Return a list of Sentences that make up this text passage . | 152 | 13 |
238,356 | def records ( self ) : return ModelList ( * [ r for sent in self . sentences for r in sent . records ] ) | Return a list of records for this text passage . | 27 | 10 |
238,357 | def tokens ( self ) : spans = self . word_tokenizer . span_tokenize ( self . text ) toks = [ Token ( text = self . text [ span [ 0 ] : span [ 1 ] ] , start = span [ 0 ] + self . start , end = span [ 1 ] + self . start , lexicon = self . lexicon ) for span in spans ] return toks | Return a list of token Spans for this sentence . | 85 | 11 |
238,358 | def tags ( self ) : tags = self . pos_tags for i , tag in enumerate ( self . ner_tags ) : if tag is not None : tags [ i ] = tag return tags | Return combined POS and NER tags . | 42 | 8 |
238,359 | def records ( self ) : compounds = ModelList ( ) seen_labels = set ( ) # Ensure no control characters are sent to a parser (need to be XML compatible) tagged_tokens = [ ( CONTROL_RE . sub ( '' , token ) , tag ) for token , tag in self . tagged_tokens ] for parser in self . parsers : for record in parser . parse ( tagged_tokens ) : p = record . serialize ( ) if not p : # TODO: Potential performance issues? continue # Skip duplicate records if record in compounds : continue # Skip just labels that have already been seen (bit of a hack) if all ( k in { 'labels' , 'roles' } for k in p . keys ( ) ) and set ( record . labels ) . issubset ( seen_labels ) : continue seen_labels . update ( record . labels ) compounds . append ( record ) return compounds | Return a list of records for this sentence . | 202 | 9 |
238,360 | def prepare_gold ( ctx , annotations , gout ) : click . echo ( 'chemdataextractor.chemdner.prepare_gold' ) for line in annotations : pmid , ta , start , end , text , category = line . strip ( ) . split ( '\t' ) gout . write ( '%s\t%s:%s:%s\n' % ( pmid , ta , start , end ) ) | Prepare bc - evaluate gold file from annotations supplied by CHEMDNER . | 98 | 16 |
238,361 | def prepare_tokens ( ctx , input , annotations , tout , lout ) : click . echo ( 'chemdataextractor.chemdner.prepare_tokens' ) # Collect the annotations into a dict anndict = defaultdict ( list ) for line in annotations : pmid , ta , start , end , text , category = line . strip ( ) . split ( '\t' ) anndict [ ( pmid , ta ) ] . append ( ( int ( start ) , int ( end ) , text ) ) # Process the corpus for line in input : pmid , title , abstract = line . strip ( ) . split ( u'\t' ) for t , section , anns in [ ( Title ( title ) , 'T' , anndict . get ( ( pmid , u'T' ) , [ ] ) ) , ( Paragraph ( abstract ) , u'A' , anndict . get ( ( pmid , u'A' ) , [ ] ) ) ] : # Write our tokens with POS and IOB tags tagged = _prep_tags ( t , anns ) for i , sentence in enumerate ( tagged ) : tout . write ( u' ' . join ( [ u'/' . join ( [ token , tag , label ] ) for token , tag , label in sentence ] ) ) lout . write ( u' ' . join ( [ u'/' . join ( [ token , label ] ) for token , tag , label in sentence ] ) ) tout . write ( u'\n' ) lout . write ( u'\n' ) tout . write ( u'\n' ) lout . write ( u'\n' ) | Prepare tokenized and tagged corpus file from those supplied by CHEMDNER . | 371 | 17 |
238,362 | def _prep_tags ( t , annotations ) : tags = [ [ 'O' for _ in sent . tokens ] for sent in t . sentences ] for start , end , text in annotations : done_first = False for i , sent in enumerate ( t . sentences ) : for j , token in enumerate ( sent . tokens ) : if start <= token . start < end or start < token . end <= end : # Token start or end occurs within the annotation tags [ i ] [ j ] = 'I-CM' if done_first else 'B-CM' done_first = True tagged = [ [ ( token [ 0 ] , token [ 1 ] , tags [ i ] [ j ] ) for j , token in enumerate ( sentence . pos_tagged_tokens ) ] for i , sentence in enumerate ( t . sentences ) ] return tagged | Apply IOB chemical entity tags and POS tags to text . | 184 | 12 |
238,363 | def train_all ( ctx , output ) : click . echo ( 'chemdataextractor.pos.train_all' ) click . echo ( 'Output: %s' % output ) ctx . invoke ( train , output = '%s_wsj_nocluster.pickle' % output , corpus = 'wsj' , clusters = False ) ctx . invoke ( train , output = '%s_wsj.pickle' % output , corpus = 'wsj' , clusters = True ) ctx . invoke ( train , output = '%s_genia_nocluster.pickle' % output , corpus = 'genia' , clusters = False ) ctx . invoke ( train , output = '%s_genia.pickle' % output , corpus = 'genia' , clusters = True ) ctx . invoke ( train , output = '%s_wsj_genia_nocluster.pickle' % output , corpus = 'wsj+genia' , clusters = False ) ctx . invoke ( train , output = '%s_wsj_genia.pickle' % output , corpus = 'wsj+genia' , clusters = True ) | Train POS tagger on WSJ GENIA and both . With and without cluster features . | 263 | 18 |
238,364 | def evaluate_all ( ctx , model ) : click . echo ( 'chemdataextractor.pos.evaluate_all' ) click . echo ( 'Model: %s' % model ) ctx . invoke ( evaluate , model = '%s_wsj_nocluster.pickle' % model , corpus = 'wsj' , clusters = False ) ctx . invoke ( evaluate , model = '%s_wsj_nocluster.pickle' % model , corpus = 'genia' , clusters = False ) ctx . invoke ( evaluate , model = '%s_wsj.pickle' % model , corpus = 'wsj' , clusters = True ) ctx . invoke ( evaluate , model = '%s_wsj.pickle' % model , corpus = 'genia' , clusters = True ) ctx . invoke ( evaluate , model = '%s_genia_nocluster.pickle' % model , corpus = 'wsj' , clusters = False ) ctx . invoke ( evaluate , model = '%s_genia_nocluster.pickle' % model , corpus = 'genia' , clusters = False ) ctx . invoke ( evaluate , model = '%s_genia.pickle' % model , corpus = 'wsj' , clusters = True ) ctx . invoke ( evaluate , model = '%s_genia.pickle' % model , corpus = 'genia' , clusters = True ) ctx . invoke ( evaluate , model = '%s_wsj_genia_nocluster.pickle' % model , corpus = 'wsj' , clusters = False ) ctx . invoke ( evaluate , model = '%s_wsj_genia_nocluster.pickle' % model , corpus = 'genia' , clusters = False ) ctx . invoke ( evaluate , model = '%s_wsj_genia.pickle' % model , corpus = 'wsj' , clusters = True ) ctx . invoke ( evaluate , model = '%s_wsj_genia.pickle' % model , corpus = 'genia' , clusters = True ) | Evaluate POS taggers on WSJ and GENIA . | 473 | 13 |
238,365 | def train ( ctx , output , corpus , clusters ) : click . echo ( 'chemdataextractor.pos.train' ) click . echo ( 'Output: %s' % output ) click . echo ( 'Corpus: %s' % corpus ) click . echo ( 'Clusters: %s' % clusters ) wsj_sents = [ ] genia_sents = [ ] if corpus == 'wsj' or corpus == 'wsj+genia' : wsj_sents = list ( wsj_training . tagged_sents ( ) ) # For WSJ, remove all tokens with -NONE- tag for i , wsj_sent in enumerate ( wsj_sents ) : wsj_sents [ i ] = [ t for t in wsj_sent if not t [ 1 ] == '-NONE-' ] if corpus == 'genia' or corpus == 'wsj+genia' : genia_sents = list ( genia_training . tagged_sents ( ) ) # Translate GENIA for i , genia_sent in enumerate ( genia_sents ) : for j , ( token , tag ) in enumerate ( genia_sent ) : if tag == '(' : genia_sents [ i ] [ j ] = ( token , '-LRB-' ) # ( to -RLB- (also do for evaluation) elif tag == ')' : genia_sents [ i ] [ j ] = ( token , '-RRB-' ) # ) to -RRB- (also do for evaluation) elif tag == 'CT' : genia_sents [ i ] [ j ] = ( token , 'DT' ) # Typo? elif tag == 'XT' : genia_sents [ i ] [ j ] = ( token , 'DT' ) # Typo? elif tag == '-' : genia_sents [ i ] [ j ] = ( token , ':' ) # Single hyphen character for dash elif tag == 'N' : genia_sents [ i ] [ j ] = ( token , 'NN' ) # Typo? elif tag == 'PP' : genia_sents [ i ] [ j ] = ( token , 'PRP' ) # Typo? elif tag == '' and token == ')' : genia_sents [ i ] [ j ] = ( token , '-RRB-' ) # Typo? elif tag == '' and token == 'IFN-gamma' : genia_sents [ i ] [ j ] = ( token , 'NN' ) # Typo? elif '|' in tag : genia_sents [ i ] [ j ] = ( token , tag . split ( '|' ) [ 0 ] ) # If contains |, choose first part # Filter any tags not in the allowed tagset (Shouldn't be any left anyway) genia_sents [ i ] = [ t for t in genia_sent if t [ 1 ] in TAGS ] if corpus == 'wsj' : training_corpus = wsj_sents elif corpus == 'genia' : training_corpus = genia_sents elif corpus == 'wsj+genia' : training_corpus = wsj_sents + genia_sents else : raise click . ClickException ( 'Invalid corpus' ) tagger = ChemCrfPosTagger ( clusters = clusters ) tagger . train ( training_corpus , output ) | Train POS Tagger . | 784 | 5 |
238,366 | def evaluate ( ctx , model , corpus , clusters ) : click . echo ( 'chemdataextractor.pos.evaluate' ) if corpus == 'wsj' : evaluation = wsj_evaluation sents = list ( evaluation . tagged_sents ( ) ) for i , wsj_sent in enumerate ( sents ) : sents [ i ] = [ t for t in wsj_sent if not t [ 1 ] == '-NONE-' ] elif corpus == 'genia' : evaluation = genia_evaluation sents = list ( evaluation . tagged_sents ( ) ) # Translate GENIA bracket tags for i , genia_sent in enumerate ( sents ) : for j , ( token , tag ) in enumerate ( genia_sent ) : if tag == '(' : sents [ i ] [ j ] = ( token , '-LRB-' ) elif tag == ')' : sents [ i ] [ j ] = ( token , '-RRB-' ) else : raise click . ClickException ( 'Invalid corpus' ) tagger = ChemCrfPosTagger ( model = model , clusters = clusters ) accuracy = tagger . evaluate ( sents ) click . echo ( '%s on %s: %s' % ( model , evaluation , accuracy ) ) | Evaluate performance of POS Tagger . | 286 | 9 |
238,367 | def evaluate_perceptron ( ctx , model , corpus ) : click . echo ( 'chemdataextractor.pos.evaluate' ) if corpus == 'wsj' : evaluation = wsj_evaluation sents = list ( evaluation . tagged_sents ( ) ) for i , wsj_sent in enumerate ( sents ) : sents [ i ] = [ t for t in wsj_sent if not t [ 1 ] == u'-NONE-' ] elif corpus == 'genia' : evaluation = genia_evaluation sents = list ( evaluation . tagged_sents ( ) ) # Translate GENIA bracket tags for i , genia_sent in enumerate ( sents ) : for j , ( token , tag ) in enumerate ( genia_sent ) : if tag == u'(' : sents [ i ] [ j ] = ( token , u'-LRB-' ) elif tag == u')' : sents [ i ] [ j ] = ( token , u'-RRB-' ) else : raise click . ClickException ( 'Invalid corpus' ) tagger = ChemApPosTagger ( model = model ) accuracy = tagger . evaluate ( sents ) click . echo ( '%s on %s: %s' % ( model , evaluation , accuracy ) ) | Evaluate performance of Averaged Perceptron POS Tagger . | 285 | 15 |
238,368 | def tag ( ctx , input , output ) : log . info ( 'chemdataextractor.pos.tag' ) log . info ( 'Reading %s' % input . name ) doc = Document . from_file ( input ) for element in doc . elements : if isinstance ( element , Text ) : for sentence in element . sentences : output . write ( u' ' . join ( u'/' . join ( [ token , tag ] ) for token , tag in sentence . pos_tagged_tokens ) ) output . write ( u'\n' ) | Output POS - tagged tokens . | 122 | 6 |
238,369 | def make_request ( self , session , url , * * kwargs ) : log . debug ( 'Making request: GET %s %s' % ( url , kwargs ) ) return session . get ( url , * * kwargs ) | Make a HTTP GET request . | 54 | 6 |
238,370 | def make_request ( self , session , url , * * kwargs ) : log . debug ( 'Making request: POST %s %s' % ( url , kwargs ) ) return session . post ( url , * * kwargs ) | Make a HTTP POST request . | 54 | 6 |
238,371 | def run ( self , url ) : url = self . process_url ( url ) if not url : return response = self . make_request ( self . http , url ) selector = self . process_response ( response ) entities = [ ] for root in self . get_roots ( selector ) : entity = self . entity ( root ) entity = self . process_entity ( entity ) if entity : entities . append ( entity ) return EntityList ( * entities ) | Request URL scrape response and return an EntityList . | 97 | 10 |
238,372 | def clean_html ( self , html ) : result_type = type ( html ) if isinstance ( html , six . string_types ) : doc = html_fromstring ( html ) else : doc = copy . deepcopy ( html ) self ( doc ) if issubclass ( result_type , six . binary_type ) : return tostring ( doc , encoding = 'utf-8' ) elif issubclass ( result_type , six . text_type ) : return tostring ( doc , encoding = 'unicode' ) else : return doc | Apply Cleaner to HTML string or document and return a cleaned string or document . | 119 | 16 |
238,373 | def clean_markup ( self , markup , parser = None ) : result_type = type ( markup ) if isinstance ( markup , six . string_types ) : doc = fromstring ( markup , parser = parser ) else : doc = copy . deepcopy ( markup ) self ( doc ) if issubclass ( result_type , six . binary_type ) : return tostring ( doc , encoding = 'utf-8' ) elif issubclass ( result_type , six . text_type ) : return tostring ( doc , encoding = 'unicode' ) else : return doc | Apply Cleaner to markup string or document and return a cleaned string or document . | 126 | 16 |
238,374 | def floats ( s ) : try : return float ( s ) except ValueError : s = re . sub ( r'(\d)\s*\(\d+(\.\d+)?\)' , r'\1' , s ) # Remove bracketed numbers from end s = re . sub ( r'(\d)\s*±\s*\d+(\.\d+)?', '\1', ) Remove uncertainties from end s = s . rstrip ( '\'"+-=<>/,.:;!?)]}…∼~≈×*_≥≤') ailing punctuation s = s . lstrip ( '\'"+=<>/([{∼~≈×*_≥≤£$€#§') ing punctuation s = s . replace ( ',' , '' ) # Remove commas s = '' . join ( s . split ( ) ) # Strip whitespace s = re . sub ( r'(\d)\s*[×x]\s*10\^?(-?\d)', '\1e\2', ) Convert scientific notation return float ( s ) | Convert string to float . Handles more string formats that the standard python conversion . | 245 | 17 |
238,375 | def strip_querystring ( url ) : p = six . moves . urllib . parse . urlparse ( url ) return p . scheme + "://" + p . netloc + p . path | Remove the querystring from the end of a URL . | 43 | 11 |
238,376 | def extract_emails ( text ) : text = text . replace ( u'\u2024' , '.' ) emails = [ ] for m in EMAIL_RE . findall ( text ) : emails . append ( m [ 0 ] ) return emails | Return a list of email addresses extracted from the string . | 56 | 11 |
238,377 | def unapostrophe ( text ) : text = re . sub ( r'[%s]s?$' % '' . join ( APOSTROPHES ) , '' , text ) return text | Strip apostrophe and s from the end of a string . | 44 | 13 |
238,378 | def getLocalTime ( date , time , * args , * * kwargs ) : if time is not None : return getLocalDateAndTime ( date , time , * args , * * kwargs ) [ 1 ] | Get the time in the local timezone from date and time | 48 | 12 |
238,379 | def getLocalDateAndTime ( date , time , * args , * * kwargs ) : localDt = getLocalDatetime ( date , time , * args , * * kwargs ) if time is not None : return ( localDt . date ( ) , localDt . timetz ( ) ) else : return ( localDt . date ( ) , None ) | Get the date and time in the local timezone from date and optionally time | 82 | 15 |
238,380 | def getLocalDatetime ( date , time , tz = None , timeDefault = dt . time . max ) : localTZ = timezone . get_current_timezone ( ) if tz is None or tz == localTZ : localDt = getAwareDatetime ( date , time , tz , timeDefault ) else : # create in event's time zone eventDt = getAwareDatetime ( date , time , tz , timeDefault ) # convert to local time zone localDt = eventDt . astimezone ( localTZ ) if time is None : localDt = getAwareDatetime ( localDt . date ( ) , None , localTZ , timeDefault ) return localDt | Get a datetime in the local timezone from date and optionally time | 161 | 14 |
238,381 | def getAwareDatetime ( date , time , tz , timeDefault = dt . time . max ) : if time is None : time = timeDefault datetime = dt . datetime . combine ( date , time ) # arbitary rule to handle DST transitions: # if daylight savings causes an error then use standard time datetime = timezone . make_aware ( datetime , tz , is_dst = False ) return datetime | Get a datetime in the given timezone from date and optionally time . If time is not given it will default to timeDefault if that is given or if not then to the end of the day . | 96 | 41 |
238,382 | def _iso_num_weeks ( iso_year ) : year_start = _iso_year_start ( iso_year ) next_year_start = _iso_year_start ( iso_year + 1 ) year_num_weeks = ( ( next_year_start - year_start ) . days ) // 7 return year_num_weeks | Get the number of ISO - weeks in this year | 79 | 10 |
238,383 | def _iso_info ( iso_year , iso_week ) : prev_year_start = _iso_year_start ( iso_year - 1 ) year_start = _iso_year_start ( iso_year ) next_year_start = _iso_year_start ( iso_year + 1 ) first_day = year_start + dt . timedelta ( weeks = iso_week - 1 ) last_day = first_day + dt . timedelta ( days = 6 ) prev_year_num_weeks = ( ( year_start - prev_year_start ) . days ) // 7 year_num_weeks = ( ( next_year_start - year_start ) . days ) // 7 return ( first_day , last_day , prev_year_num_weeks , year_num_weeks ) | Give all the iso info we need from one calculation | 184 | 10 |
238,384 | def _iso_week_of_month ( date_value ) : weekday_of_first = date_value . replace ( day = 1 ) . weekday ( ) return ( date_value . day + weekday_of_first - 1 ) // 7 | 0 - starting index which ISO - week in the month this date is | 53 | 14 |
238,385 | def _ssweek_year_start ( ssweek_year ) : fifth_jan = dt . date ( ssweek_year , 1 , 5 ) delta = dt . timedelta ( fifth_jan . weekday ( ) + 1 ) return fifth_jan - delta | The gregorian calendar date of the first day of the given Sundaystarting - week year | 57 | 18 |
238,386 | def _ssweek_to_gregorian ( ssweek_year , ssweek_week , ssweek_day ) : year_start = _ssweek_year_start ( ssweek_year ) return year_start + dt . timedelta ( days = ssweek_day - 1 , weeks = ssweek_week - 1 ) | Gregorian calendar date for the given Sundaystarting - week year week and day | 72 | 15 |
238,387 | def _ssweek_num_weeks ( ssweek_year ) : year_start = _ssweek_year_start ( ssweek_year ) next_year_start = _ssweek_year_start ( ssweek_year + 1 ) year_num_weeks = ( ( next_year_start - year_start ) . days ) // 7 return year_num_weeks | Get the number of Sundaystarting - weeks in this year | 85 | 11 |
238,388 | def _ssweek_info ( ssweek_year , ssweek_week ) : prev_year_start = _ssweek_year_start ( ssweek_year - 1 ) year_start = _ssweek_year_start ( ssweek_year ) next_year_start = _ssweek_year_start ( ssweek_year + 1 ) first_day = year_start + dt . timedelta ( weeks = ssweek_week - 1 ) last_day = first_day + dt . timedelta ( days = 6 ) prev_year_num_weeks = ( ( year_start - prev_year_start ) . days ) // 7 year_num_weeks = ( ( next_year_start - year_start ) . days ) // 7 return ( first_day , last_day , prev_year_num_weeks , year_num_weeks ) | Give all the ssweek info we need from one calculation | 194 | 11 |
238,389 | def _gregorian_to_ssweek ( date_value ) : yearStart = _ssweek_year_start ( date_value . year ) weekNum = ( ( date_value - yearStart ) . days ) // 7 + 1 dayOfWeek = date_value . weekday ( ) + 1 return ( date_value . year , weekNum , dayOfWeek ) | Sundaystarting - week year week and day for the given Gregorian calendar date | 79 | 15 |
238,390 | def _ssweek_of_month ( date_value ) : weekday_of_first = ( date_value . replace ( day = 1 ) . weekday ( ) + 1 ) % 7 return ( date_value . day + weekday_of_first - 1 ) // 7 | 0 - starting index which Sundaystarting - week in the month this date is | 58 | 15 |
238,391 | def byweekday ( self ) : retval = [ ] if self . rule . _byweekday : retval += [ Weekday ( day ) for day in self . rule . _byweekday ] if self . rule . _bynweekday : retval += [ Weekday ( day , n ) for day , n in self . rule . _bynweekday ] return retval | The weekdays where the recurrence will be applied . In RFC5545 this is called BYDAY but is renamed by dateutil to avoid ambiguity . | 82 | 30 |
238,392 | def bymonthday ( self ) : retval = [ ] if self . rule . _bymonthday : retval += self . rule . _bymonthday if self . rule . _bynmonthday : retval += self . rule . _bynmonthday return retval | The month days where the recurrence will be applied . | 58 | 11 |
238,393 | def routeDefault ( self , request , year = None ) : eventsView = request . GET . get ( 'view' , self . default_view ) if eventsView in ( "L" , "list" ) : return self . serveUpcoming ( request ) elif eventsView in ( "W" , "weekly" ) : return self . serveWeek ( request , year ) else : return self . serveMonth ( request , year ) | Route a request to the default calendar view . | 92 | 9 |
238,394 | def routeByMonthAbbr ( self , request , year , monthAbbr ) : month = ( DatePictures [ 'Mon' ] . index ( monthAbbr . lower ( ) ) // 4 ) + 1 return self . serveMonth ( request , year , month ) | Route a request with a month abbreviation to the monthly view . | 57 | 13 |
238,395 | def serveMonth ( self , request , year = None , month = None ) : myurl = self . get_url ( request ) def myUrl ( urlYear , urlMonth ) : if 1900 <= urlYear <= 2099 : return myurl + self . reverse_subpage ( 'serveMonth' , args = [ urlYear , urlMonth ] ) today = timezone . localdate ( ) if year is None : year = today . year if month is None : month = today . month year = int ( year ) month = int ( month ) if year == today . year and month == today . month : weekNum = gregorian_to_week_date ( today ) [ 1 ] else : weekNum = gregorian_to_week_date ( dt . date ( year , month , 7 ) ) [ 1 ] weeklyUrl = myurl + self . reverse_subpage ( 'serveWeek' , args = [ year , weekNum ] ) listUrl = myurl + self . reverse_subpage ( 'serveUpcoming' ) prevMonth = month - 1 prevMonthYear = year if prevMonth == 0 : prevMonth = 12 prevMonthYear -= 1 nextMonth = month + 1 nextMonthYear = year if nextMonth == 13 : nextMonth = 1 nextMonthYear += 1 # TODO Consider changing to a TemplateResponse # https://stackoverflow.com/questions/38838601 return render ( request , "joyous/calendar_month.html" , { 'self' : self , 'page' : self , 'version' : __version__ , 'year' : year , 'month' : month , 'today' : today , 'yesterday' : today - dt . timedelta ( 1 ) , 'lastweek' : today - dt . timedelta ( 7 ) , 'prevMonthUrl' : myUrl ( prevMonthYear , prevMonth ) , 'nextMonthUrl' : myUrl ( nextMonthYear , nextMonth ) , 'prevYearUrl' : myUrl ( year - 1 , month ) , 'nextYearUrl' : myUrl ( year + 1 , month ) , 'weeklyUrl' : weeklyUrl , 'listUrl' : listUrl , 'thisMonthUrl' : myUrl ( today . year , today . month ) , 'monthName' : MONTH_NAMES [ month ] , 'weekdayAbbr' : weekday_abbr , 'events' : self . _getEventsByWeek ( request , year , month ) } ) | Monthly calendar view . | 536 | 5 |
238,396 | def serveWeek ( self , request , year = None , week = None ) : myurl = self . get_url ( request ) def myUrl ( urlYear , urlWeek ) : if ( urlYear < 1900 or urlYear > 2099 or urlYear == 2099 and urlWeek == 53 ) : return None if urlWeek == 53 and num_weeks_in_year ( urlYear ) == 52 : urlWeek = 52 return myurl + self . reverse_subpage ( 'serveWeek' , args = [ urlYear , urlWeek ] ) today = timezone . localdate ( ) thisYear , thisWeekNum , dow = gregorian_to_week_date ( today ) if year is None : year = thisYear if week is None : week = thisWeekNum year = int ( year ) week = int ( week ) firstDay , lastDay , prevYearNumWeeks , yearNumWeeks = week_info ( year , week ) if week == 53 and yearNumWeeks == 52 : raise Http404 ( "Only 52 weeks in {}" . format ( year ) ) eventsInWeek = self . _getEventsByDay ( request , firstDay , lastDay ) if firstDay . year >= 1900 : monthlyUrl = myurl + self . reverse_subpage ( 'serveMonth' , args = [ firstDay . year , firstDay . month ] ) else : monthlyUrl = myurl + self . reverse_subpage ( 'serveMonth' , args = [ 1900 , 1 ] ) listUrl = myurl + self . reverse_subpage ( 'serveUpcoming' ) prevWeek = week - 1 prevWeekYear = year if prevWeek == 0 : prevWeek = prevYearNumWeeks prevWeekYear -= 1 nextWeek = week + 1 nextWeekYear = year if nextWeek > yearNumWeeks : nextWeek = 1 nextWeekYear += 1 # TODO Consider changing to a TemplateResponse # https://stackoverflow.com/questions/38838601 return render ( request , "joyous/calendar_week.html" , { 'self' : self , 'page' : self , 'version' : __version__ , 'year' : year , 'week' : week , 'today' : today , 'yesterday' : today - dt . timedelta ( 1 ) , 'prevWeekUrl' : myUrl ( prevWeekYear , prevWeek ) , 'nextWeekUrl' : myUrl ( nextWeekYear , nextWeek ) , 'prevYearUrl' : myUrl ( year - 1 , week ) , 'nextYearUrl' : myUrl ( year + 1 , week ) , 'thisWeekUrl' : myUrl ( thisYear , thisWeekNum ) , 'monthlyUrl' : monthlyUrl , 'listUrl' : listUrl , 'weekName' : _ ( "Week {weekNum}" ) . format ( weekNum = week ) , 'weekdayAbbr' : weekday_abbr , 'events' : [ eventsInWeek ] } ) | Weekly calendar view . | 646 | 5 |
238,397 | def serveDay ( self , request , year = None , month = None , dom = None ) : myurl = self . get_url ( request ) today = timezone . localdate ( ) if year is None : year = today . year if month is None : month = today . month if dom is None : dom = today . day year = int ( year ) month = int ( month ) dom = int ( dom ) day = dt . date ( year , month , dom ) eventsOnDay = self . _getEventsOnDay ( request , day ) if len ( eventsOnDay . all_events ) == 1 : event = eventsOnDay . all_events [ 0 ] . page return redirect ( event . get_url ( request ) ) monthlyUrl = myurl + self . reverse_subpage ( 'serveMonth' , args = [ year , month ] ) weekNum = gregorian_to_week_date ( today ) [ 1 ] weeklyUrl = myurl + self . reverse_subpage ( 'serveWeek' , args = [ year , weekNum ] ) listUrl = myurl + self . reverse_subpage ( 'serveUpcoming' ) # TODO Consider changing to a TemplateResponse # https://stackoverflow.com/questions/38838601 return render ( request , "joyous/calendar_list_day.html" , { 'self' : self , 'page' : self , 'version' : __version__ , 'year' : year , 'month' : month , 'dom' : dom , 'day' : day , 'monthlyUrl' : monthlyUrl , 'weeklyUrl' : weeklyUrl , 'listUrl' : listUrl , 'monthName' : MONTH_NAMES [ month ] , 'weekdayName' : WEEKDAY_NAMES [ day . weekday ( ) ] , 'events' : eventsOnDay } ) | The events of the day list view . | 406 | 8 |
238,398 | def serveUpcoming ( self , request ) : myurl = self . get_url ( request ) today = timezone . localdate ( ) monthlyUrl = myurl + self . reverse_subpage ( 'serveMonth' , args = [ today . year , today . month ] ) weekNum = gregorian_to_week_date ( today ) [ 1 ] weeklyUrl = myurl + self . reverse_subpage ( 'serveWeek' , args = [ today . year , weekNum ] ) listUrl = myurl + self . reverse_subpage ( 'servePast' ) upcomingEvents = self . _getUpcomingEvents ( request ) paginator = Paginator ( upcomingEvents , self . EventsPerPage ) try : eventsPage = paginator . page ( request . GET . get ( 'page' ) ) except PageNotAnInteger : eventsPage = paginator . page ( 1 ) except EmptyPage : eventsPage = paginator . page ( paginator . num_pages ) # TODO Consider changing to a TemplateResponse # https://stackoverflow.com/questions/38838601 return render ( request , "joyous/calendar_list_upcoming.html" , { 'self' : self , 'page' : self , 'version' : __version__ , 'today' : today , 'weeklyUrl' : weeklyUrl , 'monthlyUrl' : monthlyUrl , 'listUrl' : listUrl , 'events' : eventsPage } ) | Upcoming events list view . | 317 | 6 |
238,399 | def serveMiniMonth ( self , request , year = None , month = None ) : if not request . is_ajax ( ) : raise Http404 ( "/mini/ is for ajax requests only" ) today = timezone . localdate ( ) if year is None : year = today . year if month is None : month = today . month year = int ( year ) month = int ( month ) # TODO Consider changing to a TemplateResponse # https://stackoverflow.com/questions/38838601 return render ( request , "joyous/includes/minicalendar.html" , { 'self' : self , 'page' : self , 'version' : __version__ , 'today' : today , 'year' : year , 'month' : month , 'calendarUrl' : self . get_url ( request ) , 'monthName' : MONTH_NAMES [ month ] , 'weekdayInfo' : zip ( weekday_abbr , weekday_name ) , 'events' : self . _getEventsByWeek ( request , year , month ) } ) | Serve data for the MiniMonth template tag . | 236 | 10 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.