diff --git a/.dockerignore b/.dockerignore index c712142f..96c8053c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,6 @@ # Git -#.git +.git +.github .gitignore # Logs log/* @@ -8,3 +9,9 @@ tmp/* # Editor temp files *.swp *.swo +coverage +create_permissions.log +# Ignore generated test data +test/data/dictionary.txt +test/data/ontology_files/repo/**/* +test/data/tmp/* diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml new file mode 100644 index 00000000..6105c1d8 --- /dev/null +++ b/.github/workflows/docker-image.yml @@ -0,0 +1,42 @@ +name: Docker Image CI + +on: + release: + types: [published] + +jobs: + push_to_registry: + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + steps: + - name: Check out the repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v4 + with: + images: bioportal/ncbo_cron + + - name: Build and push Docker image + uses: docker/build-push-action@v4 + with: + context: . + platforms: linux/amd64,linux/arm64 + build-args: | + RUBY_VERSION=2.7 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/ruby-unit-tests.yml b/.github/workflows/ruby-unit-tests.yml index cde331e3..b61ce745 100644 --- a/.github/workflows/ruby-unit-tests.yml +++ b/.github/workflows/ruby-unit-tests.yml @@ -7,18 +7,17 @@ on: jobs: test: strategy: + fail-fast: false matrix: - backend: ['ruby', 'ruby-agraph'] # ruby runs tests with 4store backend and ruby-agraph runs with AllegroGraph backend + backend: ['ncbo_cron', 'ncbo_cron-agraph'] # ruby runs tests with 4store backend and ruby-agraph runs with AllegroGraph backend runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: copy config.rb file from template run: cp config/config.test.rb config/config.rb - name: Build docker-compose - working-directory: ./test run: docker-compose build - name: Run unit tests - working-directory: ./test run: | ci_env=`bash <(curl -s https://codecov.io/env)` docker-compose run $ci_env -e CI --rm ${{ matrix.backend }} bundle exec rake test TESTOPTS='-v' diff --git a/.gitignore b/.gitignore index c98b8d52..3fdd0c6d 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,8 @@ config/config.rb config/appliance.rb config/config_*.rb config/*.p12 +config/*.json +data/ projectFilesBackup/ .ruby-version repo* diff --git a/Dockerfile b/Dockerfile index cd191621..dfc03492 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,4 +20,6 @@ ENV BUNDLE_PATH=/srv/ontoportal/bundle RUN bundle install COPY . /srv/ontoportal/ncbo_cron +RUN cp /srv/ontoportal/ncbo_cron/config/config.rb.sample /srv/ontoportal/ncbo_cron/config/config.rb + CMD ["/bin/bash"] diff --git a/Gemfile b/Gemfile index b771aaf8..c4fbe255 100644 --- a/Gemfile +++ b/Gemfile @@ -2,13 +2,11 @@ source 'https://rubygems.org' gemspec -gem 'faraday', '~> 1.9' gem 'ffi' -gem "google-apis-analytics_v3" +gem 'google-analytics-data' gem 'mail', '2.6.6' -gem 'minitest', '< 5.0' gem 'multi_json' -gem 'oj', '~> 2.0' +gem 'oj', '~> 3.0' gem 'parseconfig' gem 'pony' gem 'pry' @@ -28,8 +26,8 @@ gem 'ncbo_annotator', github: 'ontoportal-lirmm/ncbo_annotator', branch: 'master # Testing group :test do gem 'email_spec' + gem 'minitest', '< 5.0' gem 'simplecov' gem 'simplecov-cobertura' # for codecov.io gem 'test-unit-minitest' end - diff --git a/Gemfile.lock b/Gemfile.lock index 41535bd7..025d10f0 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -60,7 +60,7 @@ PATH ncbo_cron (0.0.1) dante goo - google-apis-analytics_v3 + google-analytics-data mlanett-redis-lock multi_json ncbo_annotator @@ -83,7 +83,6 @@ GEM connection_pool (2.4.1) cube-ruby (0.0.3) dante (0.2.0) - declarative (0.0.20) docile (1.4.0) domain_name (0.5.20190701) unf (>= 0.0.5, < 1.0.0) @@ -103,17 +102,9 @@ GEM faraday-rack (~> 1.0) faraday-retry (~> 1.0) ruby2_keywords (>= 0.0.4) - faraday-em_http (1.0.0) - faraday-em_synchrony (1.0.0) - faraday-excon (1.1.0) - faraday-httpclient (1.0.1) - faraday-multipart (1.0.4) - multipart-post (~> 2) - faraday-net_http (1.0.1) - faraday-net_http_persistent (1.2.0) - faraday-patron (1.0.0) - faraday-rack (1.0.0) - faraday-retry (1.0.3) + faraday-net_http (3.0.2) + faraday-retry (2.2.0) + faraday (~> 2.0) ffi (1.15.5) google-apis-analytics_v3 (0.13.0) google-apis-core (>= 0.11.0, < 2.a) @@ -129,15 +120,22 @@ GEM googleauth (1.7.0) faraday (>= 0.17.3, < 3.a) jwt (>= 1.4, < 3.0) - memoist (~> 0.16) multi_json (~> 1.11) os (>= 0.9, < 2.0) signet (>= 0.16, < 2.a) + grpc (1.58.0) + google-protobuf (~> 3.23) + googleapis-common-protos-types (~> 1.0) + grpc (1.58.0-x86_64-darwin) + google-protobuf (~> 3.23) + googleapis-common-protos-types (~> 1.0) + grpc (1.58.0-x86_64-linux) + google-protobuf (~> 3.23) + googleapis-common-protos-types (~> 1.0) htmlentities (4.3.4) http-accept (1.7.0) http-cookie (1.0.5) domain_name (~> 0.5) - httpclient (2.8.3) i18n (0.9.5) concurrent-ruby (~> 1.0) json (2.6.3) @@ -151,9 +149,8 @@ GEM systemu (~> 2.6.5) mail (2.6.6) mime-types (>= 1.16, < 4) - memoist (0.16.2) method_source (1.0.0) - mime-types (3.4.1) + mime-types (3.5.1) mime-types-data (~> 3.2015) mime-types-data (3.2023.0218.1) mini_mime (1.1.2) @@ -164,7 +161,7 @@ GEM multipart-post (2.3.0) net-http-persistent (2.9.4) netrc (0.11.0) - oj (2.18.5) + oj (3.16.1) omni_logger (0.1.4) logger os (1.1.4) @@ -196,8 +193,7 @@ GEM http-cookie (>= 1.0.2, < 2.0) mime-types (>= 1.16, < 4.0) netrc (~> 0.8) - retriable (3.1.2) - rexml (3.2.5) + rexml (3.2.6) rsolr (2.5.0) builder (>= 2.1.2) faraday (>= 0.9, < 3, != 2.0.0) @@ -206,7 +202,7 @@ GEM rubyzip (2.3.2) rufus-scheduler (2.0.24) tzinfo (>= 0.3.22) - signet (0.17.0) + signet (0.18.0) addressable (~> 2.8) faraday (>= 0.17.5, < 3.a) jwt (>= 1.5, < 3.0) @@ -228,7 +224,6 @@ GEM trailblazer-option (0.1.2) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uber (0.1.0) unf (0.1.4) unf_ext unf_ext (0.0.8.2) @@ -242,16 +237,15 @@ PLATFORMS DEPENDENCIES cube-ruby email_spec - faraday (~> 1.9) ffi goo! - google-apis-analytics_v3 + google-analytics-data mail (= 2.6.6) minitest (< 5.0) multi_json ncbo_annotator! ncbo_cron! - oj (~> 2.0) + oj (~> 3.0) ontologies_linked_data! parseconfig pony diff --git a/bin/ncbo_cron b/bin/ncbo_cron index a7fbc686..397d726d 100755 --- a/bin/ncbo_cron +++ b/bin/ncbo_cron @@ -111,19 +111,9 @@ opt_parser = OptionParser.new do |opts| opts.on("--disable-update-check", "disable check for updated version of Ontoportal (for VMs)", "(default: #{options[:enable_update_check]})") do |v| options[:enable_update_check] = false end - - - - - opts.on("--disable-dictionary-generation", "disable mgrep dictionary generation job", "(default: #{options[:enable_dictionary_generation]})") do |v| - options[:enable_dictionary_generation] = false + opts.on("--enable-dictionary-generation-cron-job", "ENABLE mgrep dictionary generation JOB and DISABLE dictionary generation during ontology processing. If this is not passed in, dictionary is generated every time an ontology is processed.", "(default: Dictionary is generated on every ontology processing, CRON job is DISABLED)") do |v| + options[:enable_dictionary_generation_cron_job] = true end - - - - - - opts.on("--disable-obofoundry_sync", "disable OBO Foundry synchronization report", "(default: #{options[:enable_obofoundry_sync]})") do |v| options[:enable_obofoundry_sync] = false end @@ -166,18 +156,10 @@ opt_parser = OptionParser.new do |opts| opts.on("--obofoundry_sync SCHED", String, "cron schedule to run OBO Foundry synchronization report", "(default: #{options[:cron_obofoundry_sync]})") do |c| options[:cron_obofoundry_sync] = c end - - - - - opts.on("--dictionary-generation SCHED", String, "cron schedule to run mgrep dictionary generation job", "(default: #{options[:cron_dictionary_generation]})") do |c| - options[:cron_dictionary_generation] = c + opts.on("--dictionary-generation-cron-job SCHED", String, "cron schedule to run mgrep dictionary generation job (if enabled)", "(default: #{options[:cron_dictionary_generation_cron_job]})") do |c| + options[:cron_dictionary_generation_cron_job] = c end - - - - # Display the help screen, all programs are assumed to have this option. opts.on_tail('--help', 'Display this screen') do puts opts @@ -507,49 +489,27 @@ runner.execute do |opts| end end - - - - - - - - # temporary job to generate mgrep dictionary file + # optional job to generate mgrep dictionary file # separate from ontology processing due to # https://github.com/ncbo/ncbo_cron/issues/45 - - if options[:enable_dictionary_generation] + if options[:enable_dictionary_generation_cron_job] dictionary_generation_thread = Thread.new do dictionary_generation_options = options.dup - dictionary_generation_options[:job_name] = "ncbo_cron_dictionary_generation" + dictionary_generation_options[:job_name] = "ncbo_cron_dictionary_generation_cron_job" dictionary_generation_options[:scheduler_type] = :cron - dictionary_generation_options[:cron_schedule] = dictionary_generation_options[:cron_dictionary_generation] - logger.info "Setting up mgrep dictionary generation job with #{dictionary_generation_options[:cron_dictionary_generation]}"; logger.flush + dictionary_generation_options[:cron_schedule] = dictionary_generation_options[:cron_dictionary_generation_cron_job] + logger.info "Setting up mgrep dictionary generation job with #{dictionary_generation_options[:cron_dictionary_generation_cron_job]}"; logger.flush NcboCron::Scheduler.scheduled_locking_job(dictionary_generation_options) do - logger.info "Starting mgrep dictionary generation..."; logger.flush + logger.info "Starting mgrep dictionary generation CRON job..."; logger.flush t0 = Time.now annotator = Annotator::Models::NcboAnnotator.new annotator.generate_dictionary_file() - logger.info "mgrep dictionary generation job completed in #{Time.now - t0} sec."; logger.flush - logger.info "Finished mgrep dictionary generation"; logger.flush + logger.info "mgrep dictionary generation CRON job completed in #{Time.now - t0} sec."; logger.flush + logger.info "Finished mgrep dictionary generation CRON job"; logger.flush end end end - - - - - - - - - - - - - - # Print running child processes require 'sys/proctable' at_exit do @@ -573,12 +533,5 @@ runner.execute do |opts| mapping_counts_thread.join if mapping_counts_thread update_check_thread.join if update_check_thread obofoundry_sync_thread.join if obofoundry_sync_thread - - - - dictionary_generation_thread.join if dictionary_generation_thread - - - end diff --git a/bin/ncbo_ontology_annotate_generate_cache b/bin/ncbo_ontology_annotate_generate_cache index b030bafb..b341e5af 100755 --- a/bin/ncbo_ontology_annotate_generate_cache +++ b/bin/ncbo_ontology_annotate_generate_cache @@ -49,7 +49,7 @@ opt_parser = OptionParser.new do |opts| options[:generate_dictionary] = true end - options[:logfile] = "logs/annotator_cache.log" + options[:logfile] = STDOUT opts.on('-l', '--logfile FILE', "Write log to FILE (default is 'logs/annotator_cache.log').") do |filename| options[:logfile] = filename end diff --git a/bin/ncbo_ontology_archive_old_submissions b/bin/ncbo_ontology_archive_old_submissions index 7a8e5950..535c129e 100755 --- a/bin/ncbo_ontology_archive_old_submissions +++ b/bin/ncbo_ontology_archive_old_submissions @@ -11,52 +11,121 @@ require_relative '../lib/ncbo_cron' config_exists = File.exist?(File.expand_path('../../config/config.rb', __FILE__)) abort("Please create a config/config.rb file using the config/config.rb.sample as a template") unless config_exists require_relative '../config/config' - require 'optparse' -options = {} -options[:force_archiving] = false +options = { delete: false } opt_parser = OptionParser.new do |opts| - options[:ontology] = false - opts.on('-f', '--force-re-archiving', 'Force to re-archive already archived submissions.') do - options[:force_archiving] = true + # Set a banner, displayed at the top of the help screen. + opts.banner = "Usage: #{File.basename(__FILE__)} [options]" + + options[:logfile] = STDOUT + opts.on( '-l', '--logfile FILE', "Write log to FILE (default is STDOUT)" ) do |filename| + options[:logfile] = filename + end + + # Delete submission if it contains bad data + opts.on( '-d', '--delete', "Delete submissions that contain bad data" ) do + options[:delete] = true end # Display the help screen, all programs are assumed to have this option. - opts.on('-h', '--help', 'Display this screen') do + opts.on( '-h', '--help', 'Display this screen' ) do puts opts exit end end opt_parser.parse! - -logfile = 'archive_old_submissions.log' -if File.file?(logfile); - File.delete(logfile); -end +logfile = options[:logfile] +if File.file?(logfile); File.delete(logfile); end logger = Logger.new(logfile) -options = { process_rdf: false, index_search: false, index_commit: false, - run_metrics: false, reasoning: false, archive: true } +process_actions = { process_rdf: false, generate_labels: false, index_search: false, index_commit: false, + process_annotator: false, diff: false, run_metrics: false, archive: true } onts = LinkedData::Models::Ontology.all onts.each { |ont| ont.bring(:acronym, :submissions) } onts.sort! { |a, b| a.acronym <=> b.acronym } -force_archiving = options[:force_archiving] +bad_submissions = {} + onts.each do |ont| latest_sub = ont.latest_submission + unless latest_sub.nil? id = latest_sub.submissionId subs = ont.submissions - old_subs = subs.reject { |sub| sub.submissionId >= id } + + old_subs = subs.reject { |sub| + begin + sub.submissionId >= id + rescue => e + msg = "Invalid submission ID detected (String instead of Integer): #{ont.acronym}/#{sub.submissionId} - #{e.class}:\n#{e.backtrace.join("\n")}" + puts msg + logger.error(msg) + + if options[:delete] + sub.delete if options[:delete] + msg = "Deleted submission #{ont.acronym}/#{sub.submissionId} due to invalid Submission ID" + puts msg + logger.error(msg) + end + bad_submissions["#{ont.acronym}/#{sub.submissionId}"] = "Invalid Submission ID" + true + end + } old_subs.sort! { |a, b| a.submissionId <=> b.submissionId } old_subs.each do |sub| - if !sub.archived? || force_archiving - msg = "#{ont.acronym}: archive old submission with ID #{sub.submissionId}." + unless sub.archived? + msg = "#{ont.acronym}: found un-archived old submission with ID #{sub.submissionId}." puts msg logger.info msg - NcboCron::Models::OntologySubmissionParser.new.process_submission(logger, sub.id.to_s, options) + + begin + NcboCron::Models::OntologySubmissionParser.new.process_submission(logger, sub.id.to_s, process_actions) + rescue => e + if e.class == Goo::Base::NotValidException + if sub.valid? + msg = "Error archiving submission #{ont.acronym}/#{sub.submissionId} - #{e.class}:\n#{e.backtrace.join("\n")}" + puts msg + logger.error(msg) + bad_submissions["#{ont.acronym}/#{sub.submissionId}"] = "Submission passes valid check but cannot be saved" + else + msg = "Error archiving submission #{ont.acronym}/#{sub.submissionId}:\n#{JSON.pretty_generate(sub.errors)}" + puts msg + logger.error(msg) + + if options[:delete] + sub.delete if options[:delete] + msg = "Deleted submission #{ont.acronym}/#{sub.submissionId} due to invalid data" + puts msg + logger.error(msg) + end + bad_submissions["#{ont.acronym}/#{sub.submissionId}"] = "Submission is not valid to be saved" + end + else + msg = "Error archiving submission #{ont.acronym}/#{sub.submissionId} - #{e.class}:\n#{e.backtrace.join("\n")}" + puts msg + logger.error(msg) + + if options[:delete] && (e.class == Net::HTTPBadResponse || e.class == Errno::ECONNREFUSED) + sub.delete + msg = "Deleted submission #{ont.acronym}/#{sub.submissionId} due to a non-working pull URL" + puts msg + logger.error(msg) + end + bad_submissions["#{ont.acronym}/#{sub.submissionId}"] = "#{e.class} - Runtime error" + end + end end end end end +msg = JSON.pretty_generate(bad_submissions) +puts +puts msg +logger.error(msg) + +msg = "Number of errored submissions: #{bad_submissions.length}" +puts msg +logger.error(msg) + + diff --git a/bin/ncbo_ontology_import b/bin/ncbo_ontology_import index db2e90c5..57d63aa1 100755 --- a/bin/ncbo_ontology_import +++ b/bin/ncbo_ontology_import @@ -20,27 +20,27 @@ require 'net/http' require 'optparse' ontologies_acronyms = '' ontology_source = '' -source_api = '' +source_apikey = '' username = '' opt_parser = OptionParser.new do |opts| opts.banner = 'Usage: ncbo_ontology_import [options]' - opts.on('-o', '--ontology ACRONYM', 'Ontologies acronyms which we want to import (separated by comma)') do |acronym| + opts.on('-o', '--ontologies ACRONYM1,ACRONYM2', 'Comma-separated list of ontologies to import') do |acronym| ontologies_acronyms = acronym end - opts.on('--from url', 'The ontoportal api url source of the ontology') do |url| + opts.on('--from URL', 'The ontoportal api url source of the ontology') do |url| ontology_source = url.to_s end - opts.on('--from-api api', 'An apikey to acces the ontoportal api') do |api| - source_api = api.to_s + opts.on('--from-apikey APIKEY', 'An apikey to acces the ontoportal api') do |apikey| + source_apikey = apikey.to_s end - opts.on('--admin-user username', 'The target admin user that will submit the ontology') do |user| + opts.on('--admin-user USERNAME', 'The target admin user that will submit the ontology') do |user| username = user.to_s end # Display the help screen, all programs are assumed to have this option. - opts.on( '-h', '--help', 'Display this screen') do + opts.on('-h', '--help', 'Display this screen') do puts opts exit end @@ -48,9 +48,8 @@ end opt_parser.parse! # URL of the API and APIKEY of the Ontoportal we want to import data FROM -SOURCE_API = ontology_source -SOURCE_APIKEY = source_api - +SOURCE_API = ontology_source +SOURCE_APIKEY = source_apikey # The username of the user that will have the administration rights on the ontology on the target portal TARGETED_PORTAL_USER = username @@ -58,17 +57,15 @@ TARGETED_PORTAL_USER = username # The list of acronyms of ontologies to import ONTOLOGIES_TO_IMPORT = ontologies_acronyms.split(',') || [] - def get_user(username) user = LinkedData::Models::User.find(username).first raise "The user #{username} does not exist" if user.nil? + user.bring_remaining end - # A function to create a new ontology (if already Acronym already existing on the portal it will return HTTPConflict) def create_ontology(ont_info) - new_ontology = LinkedData::Models::Ontology.new new_ontology.acronym = ont_info['acronym'] @@ -97,23 +94,30 @@ def upload_submission(sub_info, ontology) # Build the json body # hasOntologyLanguage options: OWL, UMLS, SKOS, OBO # status: alpha, beta, production, retired - attr_to_reject = %w[id submissionStatus hasOntologyLanguage metrics ontology @id @type contact] - to_copy = sub_info.select do |k,v| + attr_to_reject = %w[id submissionStatus hasOntologyLanguage metrics ontology @id @type contact uploadFilePath diffFilePath] + to_copy = sub_info.select do |k, v| !v.nil? && !v.is_a?(Hash) && !v.to_s.empty? && !attr_to_reject.include?(k) end to_copy["ontology"] = ontology - to_copy["contact"] = [LinkedData::Models::Contact.where(email: USER.email).first] - to_copy["hasOntologyLanguage"] = LinkedData::Models::OntologyFormat.where(acronym: sub_info["hasOntologyLanguage"]).first + + contact = LinkedData::Models::Contact.where(email: USER.email).first + unless contact + contact = LinkedData::Models::Contact.new(name: USER.username, email: USER.email).save + puts "created a new contact; name: #{USER.username}, email: #{USER.email}" + end + + to_copy["contact"] = [contact] + to_copy["hasOntologyLanguage"] = LinkedData::Models::OntologyFormat.where(acronym: sub_info["hasOntologyLanguage"]).first to_copy.each do |key, value| attribute_settings = new_submission.class.attribute_settings(key.to_sym) if attribute_settings - if attribute_settings[:enforce]&.include?(:date_time) + if attribute_settings[:enforce]&.include?(:date_time) value = DateTime.parse(value) elsif attribute_settings[:enforce]&.include?(:uri) && attribute_settings[:enforce]&.include?(:list) value = value.map { |v| RDF::IRI.new(v) } - elsif attribute_settings[:enforce]&.include?(:uri) + elsif attribute_settings[:enforce]&.include?(:uri) value = RDF::IRI.new(value) end end @@ -124,12 +128,11 @@ def upload_submission(sub_info, ontology) new_submission end - USER = get_user username -#get apikey for admin user +# get apikey for admin user TARGET_APIKEY = USER.apikey -SOURCE_APIKEY == '' && abort('--from-api has to be set') +SOURCE_APIKEY == '' && abort('--from-apikey has to be set') SOURCE_API == '' && abort('--from has to be set') def result_log(ressource, errors) @@ -143,10 +146,11 @@ end # Go through all ontologies acronym and get their latest_submission informations ONTOLOGIES_TO_IMPORT.each do |ont| sub_info = JSON.parse(Net::HTTP.get(URI.parse("#{SOURCE_API}/ontologies/#{ont}/latest_submission?apikey=#{SOURCE_APIKEY}&display=all"))) - puts "Import #{ont} " , + puts "Import #{ont} ", "From #{SOURCE_API}" # if the ontology is already created then it will return HTTPConflict, no consequences raise "The ontology #{ont} does not exist" if sub_info['ontology'].nil? + new_ontology = create_ontology(sub_info['ontology']) errors = nil if new_ontology.valid? @@ -159,6 +163,7 @@ ONTOLOGIES_TO_IMPORT.each do |ont| new_ontology ||= LinkedData::Models::Ontology.where(acronym: ont).first new_submission = upload_submission(sub_info, new_ontology) + if new_submission.valid? new_submission.save errors = nil @@ -167,6 +172,3 @@ ONTOLOGIES_TO_IMPORT.each do |ont| end result_log(sub_info["id"], errors) end - - - diff --git a/bin/ncbo_ontology_process b/bin/ncbo_ontology_process index 05a1169b..e9b68182 100755 --- a/bin/ncbo_ontology_process +++ b/bin/ncbo_ontology_process @@ -31,9 +31,14 @@ opt_parser = OptionParser.new do |opts| end options[:tasks] = NcboCron::Models::OntologySubmissionParser::ACTIONS - opts.on('-t', '--tasks process_rdf,index_search,run_metrics', "Optional comma-separated list of processing tasks to perform. Default: #{NcboCron::Models::OntologySubmissionParser::ACTIONS.keys.join(',')}") do |tasks| - t = tasks.split(",").map {|t| t.strip.sub(/^:/, '').to_sym} - options[:tasks].each {|k, _| options[:tasks][k] = false unless t.include?(k)} + opts.on('-t', '--tasks process_rdf,generate_labels=false,index_search,run_metrics', "Optional comma-separated list of processing tasks to perform (or exclude). Default: #{NcboCron::Models::OntologySubmissionParser::ACTIONS.keys.join(',')}") do |tasks| + tasks_obj = {} + tasks.split(',').each { |t| + t_arr = t.gsub(/\s+/, '').gsub(/^:/, '').split('=') + tasks_obj[t_arr[0].to_sym] = (t_arr.length <= 1 || t_arr[1].downcase === 'true') + } + tasks_obj[:generate_labels] = true if tasks_obj[:process_rdf] && !tasks_obj.has_key?(:generate_labels) + options[:tasks].each {|k, _| options[:tasks][k] = false unless tasks_obj[k]} end options[:logfile] = STDOUT diff --git a/config/config.rb.sample b/config/config.rb.sample index b954b77d..ddd8d0d0 100644 --- a/config/config.rb.sample +++ b/config/config.rb.sample @@ -1,164 +1,100 @@ -# Sample for Bioportal (with the good subdomain created: sparql.bioportal.lirmm.fr, data.bioportal.lirmm.fr) +# This file is designed to be used for unit testing with docker-compose -$SITE_URL = "bioportal.lirmm.fr" +GOO_BACKEND_NAME = ENV.include?("GOO_BACKEND_NAME") ? ENV["GOO_BACKEND_NAME"] : "4store" +GOO_HOST = ENV.include?("GOO_HOST") ? ENV["GOO_HOST"] : "localhost" +GOO_PATH_DATA = ENV.include?("GOO_PATH_DATA") ? ENV["GOO_PATH_DATA"] : "/data/" +GOO_PATH_QUERY = ENV.include?("GOO_PATH_QUERY") ? ENV["GOO_PATH_QUERY"] : "/sparql/" +GOO_PATH_UPDATE = ENV.include?("GOO_PATH_UPDATE") ? ENV["GOO_PATH_UPDATE"] : "/update/" +GOO_PORT = ENV.include?("GOO_PORT") ? ENV["GOO_PORT"] : 9000 +MGREP_HOST = ENV.include?("MGREP_HOST") ? ENV["MGREP_HOST"] : "localhost" +MGREP_PORT = ENV.include?("MGREP_PORT") ? ENV["MGREP_PORT"] : 55555 +MGREP_DICT_PATH = ENV.include?("MGREP_DICT_PATH") ? ENV["MGREP_DICT_PATH"] : "./test/data/dictionary.txt" +REDIS_GOO_CACHE_HOST = ENV.include?("REDIS_GOO_CACHE_HOST") ? ENV["REDIS_GOO_CACHE_HOST"] : "localhost" +REDIS_HTTP_CACHE_HOST = ENV.include?("REDIS_HTTP_CACHE_HOST") ? ENV["REDIS_HTTP_CACHE_HOST"] : "localhost" +REDIS_PERSISTENT_HOST = ENV.include?("REDIS_PERSISTENT_HOST") ? ENV["REDIS_PERSISTENT_HOST"] : "localhost" +REDIS_PORT = ENV.include?("REDIS_PORT") ? ENV["REDIS_PORT"] : 6379 +REPORT_PATH = ENV.include?("REPORT_PATH") ? ENV["REPORT_PATH"] : "./test/tmp/ontologies_report.json" +REPOSITORY_FOLDER = ENV.include?("REPOSITORY_FOLDER") ? ENV["REPOSITORY_FOLDER"] : "./test/data/ontology_files/repo" +REST_URL_PREFIX = ENV.include?("REST_URL_PREFIX") ? ENV["REST_URL_PREFIX"] : "http://localhost:9393" +SOLR_PROP_SEARCH_URL = ENV.include?("SOLR_PROP_SEARCH_URL") ? ENV["SOLR_PROP_SEARCH_URL"] : "http://localhost:8983/solr/prop_search_core1" +SOLR_TERM_SEARCH_URL = ENV.include?("SOLR_TERM_SEARCH_URL") ? ENV["SOLR_TERM_SEARCH_URL"] : "http://localhost:8983/solr/term_search_core1" -begin - LinkedData.config do |config| - config.repository_folder = "/srv/ncbo/repository" - config.goo_host = "localhost" - config.goo_port = 8081 - config.search_server_url = "http://localhost:8082/solr/term_search_core1" - config.property_search_server_url = "http://localhost:8082/solr/prop_search_core1" - config.rest_url_prefix = "http://data.#{$SITE_URL}/" - config.replace_url_prefix = true - config.id_url_prefix = "http://data.bioontology.org/" - config.enable_security = true # enable private ontologies hiding - config.apikey = "" - config.ui_host = "#{$SITE_URL}" - config.sparql_endpoint_url = "http://sparql.#{$SITE_URL}/test" - config.enable_monitoring = false - config.cube_host = "localhost" - config.enable_slices = true - config.enable_resource_index = false - - # Used to define other bioportal that can be mapped to - # Example to map to ncbo bioportal : {"ncbo" => {"api" => "http://data.bioontology.org", "ui" => "http://bioportal.bioontology.org", "apikey" => ""} - # Then create the mapping using the following class in JSON : "http://purl.bioontology.org/ontology/MESH/C585345": "ncbo:MESH" - # Where "ncbo" is the namespace used as key in the interportal_hash - config.interportal_hash = {"ncbo" => {"api" => "http://data.bioontology.org", "ui" => "http://bioportal.bioontology.org", "apikey" => ""}, - "agroportal" => {"api" => "http://data.agroportal.lirmm.fr", "ui" => "http://agroportal.lirmm.fr", "apikey" => ""}} - - # Caches - config.http_redis_host = "localhost" - config.http_redis_port = 6380 - config.enable_http_cache = true - config.goo_redis_host = "localhost" - config.goo_redis_port = 6382 - - # Email notifications - config.enable_notifications = true - config.email_sender = "notifications@#{$SITE_URL}" # Default sender for emails - config.email_override = "override@example.org" # all email gets sent here. Disable with email_override_disable. - config.email_disable_override = true - config.smtp_host = "smtp.lirmm.fr" - config.smtp_port = 25 - config.smtp_auth_type = :none # :none, :plain, :login, :cram_md5 - config.smtp_domain = "lirmm.fr" - # Emails of the instance administrators to get mail notifications when new user or new ontology - config.admin_emails = ["my.mail@example.org"] - - # PURL server config parameters - config.enable_purl = false - config.purl_host = "purl.example.org" - config.purl_port = 80 - config.purl_username = "admin" - config.purl_password = "password" - config.purl_maintainers = "admin" - config.purl_target_url_prefix = "http://example.org" - - # Ontology Google Analytics Redis - # disabled - config.ontology_analytics_redis_host = "localhost" - config.enable_ontology_analytics = true - config.ontology_analytics_redis_port = 6379 +LinkedData.config do |config| + config.goo_backend_name = GOO_BACKEND_NAME.to_s + config.goo_host = GOO_HOST.to_s + config.goo_port = GOO_PORT.to_i + config.goo_path_query = GOO_PATH_QUERY.to_s + config.goo_path_data = GOO_PATH_DATA.to_s + config.goo_path_update = GOO_PATH_UPDATE.to_s + config.goo_redis_host = REDIS_GOO_CACHE_HOST.to_s + config.goo_redis_port = REDIS_PORT.to_i + config.http_redis_host = REDIS_HTTP_CACHE_HOST.to_s + config.http_redis_port = REDIS_PORT.to_i + config.ontology_analytics_redis_host = REDIS_PERSISTENT_HOST.to_s + config.ontology_analytics_redis_port = REDIS_PORT.to_i + config.repository_folder = REPOSITORY_FOLDER.to_s + config.search_server_url = SOLR_TERM_SEARCH_URL.to_s + config.property_search_server_url = SOLR_PROP_SEARCH_URL.to_s +# config.replace_url_prefix = false +# config.rest_url_prefix = REST_URL_PREFIX.to_s + # Used to define other bioportal that can be mapped to + # Example to map to ncbo bioportal : {"ncbo" => {"api" => "http://data.bioontology.org", "ui" => "http://bioportal.bioontology.org", "apikey" => ""} + # Then create the mapping using the following class in JSON : "http://purl.bioontology.org/ontology/MESH/C585345": "ncbo:MESH" + # Where "ncbo" is the namespace used as key in the interportal_hash + config.interportal_hash = {"ncbo" => {"api" => "http://data.bioontology.org", "ui" => "http://bioportal.bioontology.org", "apikey" => ""}, + "agroportal" => {"api" => "http://data.agroportal.lirmm.fr", "ui" => "http://agroportal.lirmm.fr", "apikey" => ""}} + + # Email notifications. + config.enable_notifications = true + config.email_sender = "sender@domain.com" # Default sender for emails + config.email_override = "test@domain.com" # By default, all email gets sent here. Disable with email_override_disable. + config.smtp_host = "smtp-unencrypted.stanford.edu" + config.smtp_user = nil + config.smtp_password = nil + config.smtp_auth_type = :none + config.smtp_domain = "localhost.localhost" end rescue NameError puts "(CNFG) >> LinkedData not available, cannot load config" end -begin - Annotator.config do |config| - config.mgrep_dictionary_file = "/srv/mgrep/dictionary/dictionary.txt" - config.stop_words_default_file = "/srv/ncbo/ncbo_cron/config/default_stop_words.txt" - config.mgrep_host = "localhost" - config.mgrep_port = 55555 - config.mgrep_alt_host = "localhost" - config.mgrep_alt_port = 55555 - config.annotator_redis_host = "localhost" - config.annotator_redis_port = 6379 - config.annotator_redis_prefix = "" - config.annotator_redis_alt_prefix = "c2" - config.enable_recognizer_param = true - # This setting allows you to ask for other recognizer in URL params (if installed and class with "annotate_direct" created). Example: ?recognizer=alvis or mallet +Annotator.config do |config| + config.mgrep_host ||= "localhost" + config.annotator_redis_host = REDIS_PERSISTENT_HOST.to_s + config.annotator_redis_port = REDIS_PORT.to_i + config.mgrep_host = MGREP_HOST.to_s + config.mgrep_port = MGREP_PORT.to_i + config.mgrep_dictionary_file = MGREP_DICT_PATH.to_s end rescue NameError puts "(CNFG) >> Annotator not available, cannot load config" end -begin - OntologyRecommender.config do |config| -end -rescue NameError - puts "(CNFG) >> OntologyRecommender not available, cannot load config" -end +NcboCron.config do |config| + config.redis_host = REDIS_PERSISTENT_HOST.to_s + config.redis_port = REDIS_PORT.to_i + # Ontologies Report config + config.ontology_report_path = REPORT_PATH -begin - LinkedData::OntologiesAPI.config do |config| - config.enable_unicorn_workerkiller = true - config.enable_throttling = false - config.enable_monitoring = false - config.cube_host = "localhost" - config.http_redis_host = "localhost" - config.http_redis_port = 6380 - config.ontology_rank = "" -end -rescue NameError - puts "(CNFG) >> OntologiesAPI not available, cannot load config" -end + # do not deaemonize in docker + config.daemonize = false -begin - NcboCron.config do |config| - config.redis_host = Annotator.settings.annotator_redis_host - config.redis_port = Annotator.settings.annotator_redis_port - # If no URL has been specified when reindexing ontologies, use the following - config.search_index_all_url = "http://localhost:8082/solr/term_search_core2" - config.property_search_index_all_url = "http://localhost:8082/solr/prop_search_core2" + config.search_index_all_url = "http://localhost:8983/solr/term_search_core2" + config.property_search_index_all_url = "http://localhost:8983/solr/prop_search_core2" - # Minutes between every process new ontologies check - config.minutes_between = 3 - - # Schedules: run every 4 hours, starting at 00:30 - config.cron_schedule = "30 */4 * * *" - # Pull schedule: run daily at 6 a.m. (18:00) - config.pull_schedule = "00 18 * * *" - # Pull long schedule for ontology that are pulled less frequently: run weekly on monday at 11 a.m. (23:00) - config.pull_schedule_long = "00 23 * * 1" - config.pull_long_ontologies = ["BIOREFINERY", "TRANSMAT", "GO"] - # Delete class graphs of archive submissions: run twice per week on tuesday and friday at 10 a.m. (22:00) - config.cron_flush = "00 22 * * 2,5" - # Remove graphs from deleted ontologies when flushing class graphs - config.remove_zombie_graphs = true - # Warmup long time running queries: run every 3 hours (beginning at 00:00) - config.cron_warmq = "00 */3 * * *" - # Create mapping counts schedule: run twice per week on Wednesday and Saturday at 12:30AM - config.cron_mapping_counts = "30 0 * * 3,6" - - config.enable_ontologies_report = true - # Ontologies report generation schedule: run daily at 1:30 a.m. - config.cron_ontologies_report = "30 1 * * *" - # Ontologies Report file location - config.ontology_report_path = "/srv/ncbo/reports/ontologies_report.json" - - # Ontology analytics refresh schedule: run daily at 4:30 a.m. - config.cron_ontology_analytics = "30 4 * * *" - config.enable_ontology_analytics = true - config.analytics_service_account_email_address = "account-1@bioportal.iam.gserviceaccount.com" - config.analytics_path_to_key_file = "/srv/bioportal-ff92c5b03b63.p12" # you have to get this file from Google - config.analytics_profile_id = "ga:111823321" # replace with your ga view id - config.analytics_app_name = "bioportal" - config.analytics_app_version = "1.0.0" - config.analytics_start_date = "2015-11-13" - # To filter connexions coming from Stanford - config.analytics_filter_str = "ga:networkLocation!@stanford;ga:networkLocation!@amazon" + # Google Analytics GA4 config + config.analytics_path_to_key_file = "config/your_analytics_key.json" + config.analytics_property_id = "123456789" + # path to the Universal Analytics data, which stopped collecting on June 1st, 2023 + config.analytics_path_to_ua_data_file = "data/your_ua_data.json" + # path to the file that will hold your Google Analytics data + # this is in addition to storing it in Redis + config.analytics_path_to_ga_data_file = "data/your_ga_data.json" - # this is a Base64.encode64 encoded personal access token - # you need to run Base64.decode64 on it before using it in your code - # this is a workaround because Github does not allow storing access tokens in a repo - config.git_repo_access_token = "YOUR GITHUB REPO PERSONAL ACCESS TOKEN, encoded using Base64" - end -rescue NameError - #binding.pry - puts "(CNFG) >> NcboCron not available, cannot load config" + # this is a Base64.encode64 encoded personal access token + # you need to run Base64.decode64 on it before using it in your code + # this is a workaround because Github does not allow storing access tokens in a repo + config.git_repo_access_token = "YOUR GITHUB REPO PERSONAL ACCESS TOKEN, encoded using Base64" end Goo.use_cache = true diff --git a/config/config.test.rb b/config/config.test.rb index 0729a4b0..84a621ac 100644 --- a/config/config.test.rb +++ b/config/config.test.rb @@ -1,33 +1,42 @@ # This file is designed to be used for unit testing with docker-compose -# -GOO_PATH_QUERY = ENV.include?("GOO_PATH_QUERY") ? ENV["GOO_PATH_QUERY"] : "/sparql/" -GOO_PATH_DATA = ENV.include?("GOO_PATH_DATA") ? ENV["GOO_PATH_DATA"] : "/data/" -GOO_PATH_UPDATE = ENV.include?("GOO_PATH_UPDATE") ? ENV["GOO_PATH_UPDATE"] : "/update/" -GOO_BACKEND_NAME = ENV.include?("GOO_BACKEND_NAME") ? ENV["GOO_BACKEND_NAME"] : "localhost" -GOO_PORT = ENV.include?("GOO_PORT") ? ENV["GOO_PORT"] : 9000 -GOO_HOST = ENV.include?("GOO_HOST") ? ENV["GOO_HOST"] : "localhost" -REDIS_HOST = ENV.include?("REDIS_HOST") ? ENV["REDIS_HOST"] : "localhost" -REDIS_PORT = ENV.include?("REDIS_PORT") ? ENV["REDIS_PORT"] : 6379 -MGREP_HOST = ENV.include?("MGREP_HOST") ? ENV["MGREP_HOST"] : "localhost" -MGREP_PORT = ENV.include?("MGREP_PORT") ? ENV["MGREP_PORT"] : 55555 -SOLR_TERM_SEARCH_URL = ENV.include?("SOLR_TERM_SEARCH_URL") ? ENV["SOLR_TERM_SEARCH_URL"] : "http://localhost:8983/solr/term_search_core1" -SOLR_PROP_SEARCH_URL = ENV.include?("SOLR_PROP_SEARCH_URL") ? ENV["SOLR_PROP_SEARCH_URL"] : "http://localhost:8983/solr/prop_search_core1" + +GOO_BACKEND_NAME = ENV.include?("GOO_BACKEND_NAME") ? ENV["GOO_BACKEND_NAME"] : "4store" +GOO_HOST = ENV.include?("GOO_HOST") ? ENV["GOO_HOST"] : "localhost" +GOO_PATH_DATA = ENV.include?("GOO_PATH_DATA") ? ENV["GOO_PATH_DATA"] : "/data/" +GOO_PATH_QUERY = ENV.include?("GOO_PATH_QUERY") ? ENV["GOO_PATH_QUERY"] : "/sparql/" +GOO_PATH_UPDATE = ENV.include?("GOO_PATH_UPDATE") ? ENV["GOO_PATH_UPDATE"] : "/update/" +GOO_PORT = ENV.include?("GOO_PORT") ? ENV["GOO_PORT"] : 9000 +MGREP_HOST = ENV.include?("MGREP_HOST") ? ENV["MGREP_HOST"] : "localhost" +MGREP_PORT = ENV.include?("MGREP_PORT") ? ENV["MGREP_PORT"] : 55555 +MGREP_DICT_PATH = ENV.include?("MGREP_DICT_PATH") ? ENV["MGREP_DICT_PATH"] : "./test/data/dictionary.txt" +REDIS_GOO_CACHE_HOST = ENV.include?("REDIS_GOO_CACHE_HOST") ? ENV["REDIS_GOO_CACHE_HOST"] : "localhost" +REDIS_HTTP_CACHE_HOST = ENV.include?("REDIS_HTTP_CACHE_HOST") ? ENV["REDIS_HTTP_CACHE_HOST"] : "localhost" +REDIS_PERSISTENT_HOST = ENV.include?("REDIS_PERSISTENT_HOST") ? ENV["REDIS_PERSISTENT_HOST"] : "localhost" +REDIS_PORT = ENV.include?("REDIS_PORT") ? ENV["REDIS_PORT"] : 6379 +REPORT_PATH = ENV.include?("REPORT_PATH") ? ENV["REPORT_PATH"] : "./test/tmp/ontologies_report.json" +REPOSITORY_FOLDER = ENV.include?("REPOSITORY_FOLDER") ? ENV["REPOSITORY_FOLDER"] : "./test/data/ontology_files/repo" +REST_URL_PREFIX = ENV.include?("REST_URL_PREFIX") ? ENV["REST_URL_PREFIX"] : "http://localhost:9393" +SOLR_PROP_SEARCH_URL = ENV.include?("SOLR_PROP_SEARCH_URL") ? ENV["SOLR_PROP_SEARCH_URL"] : "http://localhost:8983/solr/prop_search_core1" +SOLR_TERM_SEARCH_URL = ENV.include?("SOLR_TERM_SEARCH_URL") ? ENV["SOLR_TERM_SEARCH_URL"] : "http://localhost:8983/solr/term_search_core1" LinkedData.config do |config| + config.goo_backend_name = GOO_BACKEND_NAME.to_s config.goo_host = GOO_HOST.to_s config.goo_port = GOO_PORT.to_i - config.goo_backend_name = GOO_BACKEND_NAME.to_s config.goo_path_query = GOO_PATH_QUERY.to_s config.goo_path_data = GOO_PATH_DATA.to_s config.goo_path_update = GOO_PATH_UPDATE.to_s - config.goo_redis_host = REDIS_HOST.to_s + config.goo_redis_host = REDIS_GOO_CACHE_HOST.to_s config.goo_redis_port = REDIS_PORT.to_i - config.http_redis_host = REDIS_HOST.to_s + config.http_redis_host = REDIS_HTTP_CACHE_HOST.to_s config.http_redis_port = REDIS_PORT.to_i - config.ontology_analytics_redis_host = REDIS_HOST.to_s + config.ontology_analytics_redis_host = REDIS_PERSISTENT_HOST.to_s config.ontology_analytics_redis_port = REDIS_PORT.to_i + config.repository_folder = REPOSITORY_FOLDER.to_s config.search_server_url = SOLR_TERM_SEARCH_URL.to_s config.property_search_server_url = SOLR_PROP_SEARCH_URL.to_s +# config.replace_url_prefix = false +# config.rest_url_prefix = REST_URL_PREFIX.to_s # Email notifications. config.enable_notifications = true config.email_sender = "sender@domain.com" # Default sender for emails @@ -40,15 +49,21 @@ end Annotator.config do |config| - config.annotator_redis_host = REDIS_HOST.to_s - config.annotator_redis_port = REDIS_PORT.to_i - config.mgrep_host = MGREP_HOST.to_s - config.mgrep_port = MGREP_PORT.to_i - config.mgrep_dictionary_file = "./test/data/dictionary.txt" + config.annotator_redis_host = REDIS_PERSISTENT_HOST.to_s + config.annotator_redis_port = REDIS_PORT.to_i + config.mgrep_host = MGREP_HOST.to_s + config.mgrep_port = MGREP_PORT.to_i + config.mgrep_dictionary_file = MGREP_DICT_PATH.to_s end +# LinkedData::OntologiesAPI.config do |config| +# config.http_redis_host = REDIS_HTTP_CACHE_HOST.to_s +# config.http_redis_port = REDIS_PORT.to_i +# end +# NcboCron.config do |config| - config.redis_host = REDIS_HOST.to_s + config.daemonize = false + config.redis_host = REDIS_PERSISTENT_HOST.to_s config.redis_port = REDIS_PORT.to_i - config.ontology_report_path = "./test/ontologies_report.json" + config.ontology_report_path = REPORT_PATH end diff --git a/dip.yml b/dip.yml new file mode 100644 index 00000000..3bbe4444 --- /dev/null +++ b/dip.yml @@ -0,0 +1,54 @@ +version: '7.1' + +# Define default environment variables to pass +# to Docker Compose +#environment: +# RAILS_ENV: development + +compose: + files: + - docker-compose.yml + # project_name: ncbo_cron + +interaction: + # This command spins up a ncbo_cron container with the required dependencies (solr, 4store, etc), + # and opens a terminal within it. + runner: + description: Open a Bash shell within a ncbo_cron container (with dependencies up) + service: ncbo_cron + command: /bin/bash + + # Run a container without any dependent services + bash: + description: Run an arbitrary script within a container (or open a shell without deps) + service: ncbo_cron + command: /bin/bash + compose_run_options: [ no-deps ] + + # A shortcut to run Bundler commands + bundle: + description: Run Bundler commands within ncbo_cron container (with depencendies up) + service: ncbo_cron + command: bundle + + # A shortcut to run unit tests + test: + description: Run unit tests with 4store triplestore + service: ncbo_cron + command: bundle exec rake test TESTOPTS='-v' + + test-ag: + description: Run unit tests with AllegroGraph triplestore + service: ncbo_cron-agraph + command: bundle exec rake test TESTOPTS='-v' + + 'redis-cli': + description: Run Redis console + service: redis-ut + command: redis-cli -h redis-ut + +#provision: + #- dip compose down --volumes + #- dip compose up -d solr 4store + #- dip bundle install + #- dip bash -c bin/setup diff --git a/test/docker-compose.yml b/docker-compose.yml similarity index 50% rename from test/docker-compose.yml rename to docker-compose.yml index db957907..0045ce12 100644 --- a/test/docker-compose.yml +++ b/docker-compose.yml @@ -1,43 +1,47 @@ x-app: &app - build: - context: ../. - args: - RUBY_VERSION: '2.7' - # Increase the version number in the image tag every time Dockerfile or its arguments is changed - image: ncbo_cron-dev:0.0.1 - environment: &env - # default bundle config resolves to /usr/local/bundle/config inside of the container - # we are setting it to local app directory if we need to use 'bundle config local' - BUNDLE_APP_CONFIG: /srv/ontoportal/ncbo_cron/.bundle - BUNDLE_PATH: /srv/ontoportal/bundle - COVERAGE: 'true' # enable simplecov code coverage - REDIS_HOST: redis-ut - REDIS_PORT: 6379 - SOLR_TERM_SEARCH_URL: http://solr-ut:8983/solr/term_search_core1 - SOLR_PROP_SEARCH_URL: http://solr-ut:8983/solr/prop_search_core1 - MGREP_HOST: mgrep-ut - MGREP_PORT: 55555 - stdin_open: true - tty: true - command: /bin/bash - volumes: - # bundle volume for hosting gems installed by bundle; it speeds up gem install in local development - - bundle:/srv/ontoportal/bundle - - ../.:/srv/ontoportal/ncbo_cron - # mount directory containing development version of the gems if you need to use 'bundle config local' - #- /Users/alexskr/ontoportal:/Users/alexskr/ontoportal - depends_on: &depends_on - solr-ut: - condition: service_healthy - redis-ut: - condition: service_healthy - mgrep-ut: - condition: service_healthy - + build: + context: . + args: + RUBY_VERSION: '2.7' + # Increase the version number in the image tag every time Dockerfile or its arguments is changed + image: ncbo_cron:0.0.1 + environment: &env + BUNDLE_PATH: /srv/ontoportal/bundle + # default bundle config resolves to /usr/local/bundle/config inside of the container + # we are setting it to local app directory if we need to use 'bundle config local' + BUNDLE_APP_CONFIG: /srv/ontoportal/ncbo_cron/.bundle + COVERAGE: 'true' + GOO_REDIS_HOST: redis-ut + REDIS_GOO_CACHE_HOST: redis-ut + REDIS_HTTP_CACHE_HOST: redis-ut + REDIS_PERSISTENT_HOST: redis-ut + REDIS_PORT: 6379 + SOLR_TERM_SEARCH_URL: http://solr-ut:8983/solr/term_search_core1 + SOLR_PROP_SEARCH_URL: http://solr-ut:8983/solr/prop_search_core1 + MGREP_HOST: mgrep-ut + MGREP_PORT: 55555 + stdin_open: true + tty: true + command: "bundle exec rackup -o 0.0.0.0 --port 9393" + ports: + - 9393:9393 + volumes: + # bundle volume for hosting gems installed by bundle; it helps in local development with gem udpates + - bundle:/srv/ontoportal/bundle + # ncbo_cron code + - .:/srv/ontoportal/ncbo_cron + # mount directory containing development version of the gems if you need to use 'bundle config local' + #- /Users/alexskr/ontoportal:/Users/alexskr/ontoportal + depends_on: &depends_on + solr-ut: + condition: service_healthy + redis-ut: + condition: service_healthy + mgrep-ut: + condition: service_healthy services: - # environment wtih 4store backend - ruby: + ncbo_cron: <<: *app environment: <<: *env @@ -54,8 +58,7 @@ services: 4store-ut: condition: service_started - # environment with AllegroGraph backend - ruby-agraph: + ncbo_cron-agraph: <<: *app environment: <<: *env @@ -65,16 +68,15 @@ services: GOO_PATH_QUERY: /repositories/bioportal_test GOO_PATH_DATA: /repositories/bioportal_test/statements GOO_PATH_UPDATE: /repositories/bioportal_test/statements - # profiles: - #- agraph + profiles: + - agraph depends_on: <<: *depends_on agraph-ut: - condition: service_started + condition: service_healthy redis-ut: image: redis - command: ["redis-server", "--save", "", "--appendonly", "no"] healthcheck: test: redis-cli ping interval: 10s @@ -83,6 +85,8 @@ services: 4store-ut: image: bde2020/4store + platform: linux/amd64 + #volume: fourstore:/var/lib/4store command: > bash -c "4s-backend-setup --segments 4 ontoportal_kb && 4s-backend ontoportal_kb @@ -91,7 +95,7 @@ services: - 4store solr-ut: - image: ontoportal/solr-ut:0.1 + image: ontoportal/solr-ut:0.0.2 healthcheck: test: ["CMD-SHELL", "curl -sf http://localhost:8983/solr/term_search_core1/admin/ping?wt=json | grep -iq '\"status\":\"OK\"}' || exit 1"] start_period: 3s @@ -100,7 +104,8 @@ services: retries: 5 mgrep-ut: - image: ontoportal/mgrep-ncbo:0.1 + image: ontoportal/mgrep:0.0.1 + platform: linux/amd64 healthcheck: test: ["CMD", "nc", "-z", "-v", "localhost", "55555"] start_period: 3s @@ -109,7 +114,9 @@ services: retries: 5 agraph-ut: - image: franzinc/agraph:v7.3.0 + #image: franzinc/agraph:v7.3.1 + image: ontoportal/agraph:v7.3.1-patch1 + platform: linux/amd64 environment: - AGRAPH_SUPER_USER=test - AGRAPH_SUPER_PASSWORD=xyzzy @@ -122,8 +129,14 @@ services: ; agtool users add anonymous ; agtool users grant anonymous root:bioportal_test:rw ; tail -f /agraph/data/agraph.log" - # profiles: - #- agraph + healthcheck: + test: ["CMD-SHELL", "agtool storage-report bioportal_test || exit 1"] + start_period: 10s + interval: 60s + timeout: 5s + retries: 3 + profiles: + - agraph volumes: bundle: diff --git a/lib/ncbo_cron/config.rb b/lib/ncbo_cron/config.rb index 798768b2..f7e4acad 100644 --- a/lib/ncbo_cron/config.rb +++ b/lib/ncbo_cron/config.rb @@ -42,16 +42,8 @@ def config(&block) @settings.enable_spam_deletion ||= true # enable update check (vor VMs) @settings.enable_update_check ||= true - - - - # enable mgrep dictionary generation job - @settings.enable_dictionary_generation ||= true - - - - + @settings.enable_dictionary_generation_cron_job ||= false # UMLS auto-pull @settings.pull_umls_url ||= "" @@ -94,17 +86,9 @@ def config(&block) @settings.cron_obofoundry_sync ||= "0 8 * * 1,2,3,4,5" # 00 3 * * * - run daily at 3:00AM @settings.cron_update_check ||= "00 3 * * *" - - - - # mgrep dictionary generation schedule # 30 3 * * * - run daily at 3:30AM - @settings.cron_dictionary_generation ||= "30 3 * * *" - - - - + @settings.cron_dictionary_generation_cron_job ||= "30 3 * * *" @settings.log_level ||= :info unless (@settings.log_path && File.exists?(@settings.log_path)) diff --git a/lib/ncbo_cron/ontology_analytics.rb b/lib/ncbo_cron/ontology_analytics.rb index 097821fe..3a91b813 100644 --- a/lib/ncbo_cron/ontology_analytics.rb +++ b/lib/ncbo_cron/ontology_analytics.rb @@ -1,124 +1,214 @@ require 'logger' -require 'google/apis/analytics_v3' -require 'google/api_client/auth/key_utils' +require 'json' +require 'benchmark' +require 'google/analytics/data' + module NcboCron module Models class OntologyAnalytics - ONTOLOGY_ANALYTICS_REDIS_FIELD = "ontology_analytics" + ONTOLOGY_ANALYTICS_REDIS_FIELD = 'ontology_analytics' + UA_START_DATE = '2013-10-01' + GA4_START_DATE = '2023-06-01' def initialize(logger) @logger = logger end def run - redis = Redis.new(:host => NcboCron.settings.redis_host, :port => NcboCron.settings.redis_port) + redis = Redis.new(:host => LinkedData.settings.ontology_analytics_redis_host, :port => LinkedData.settings.ontology_analytics_redis_port) ontology_analytics = fetch_ontology_analytics + File.open(NcboCron.settings.analytics_path_to_ga_data_file, 'w') do |f| + f.write(ontology_analytics.to_json) + end redis.set(ONTOLOGY_ANALYTICS_REDIS_FIELD, Marshal.dump(ontology_analytics)) end def fetch_ontology_analytics - google_client = authenticate_google - aggregated_results = Hash.new - start_year = Date.parse(NcboCron.settings.analytics_start_date).year || 2013 - ont_acronyms = LinkedData::Models::Ontology.where.include(:acronym).all.map {|o| o.acronym} - # ont_acronyms = ["NCIT", "ONTOMA", "CMPO", "AEO", "SNOMEDCT"] - filter_str = (NcboCron.settings.analytics_filter_str.nil? || NcboCron.settings.analytics_filter_str.empty?) ? "" : ";#{NcboCron.settings.analytics_filter_str}" - - # If the user add filter through the configuration file - if !NcboCron.settings.analytics_filter_str.nil? && NcboCron.settings.analytics_filter_str != "" - analytics_filter = ";" + NcboCron.settings.analytics_filter_str - else - analytics_filter = "" - end + @logger.info "Starting Google Analytics refresh..." + @logger.flush + full_data = nil - ont_acronyms.each do |acronym| + time = Benchmark.realtime do max_results = 10000 - num_results = 10000 - start_index = 1 - results = nil - - loop do - results = google_client.get_ga_data( - ids = NcboCron.settings.analytics_profile_id, - start_date = NcboCron.settings.analytics_start_date, - end_date = Date.today.to_s, - metrics = 'ga:pageviews', - { - dimensions: 'ga:pagePath,ga:year,ga:month', - filters: "ga:pagePath=~^(\\/ontologies\\/#{acronym})(\\/?\\?{0}|\\/?\\?{1}.*)$#{filter_str}", - start_index: start_index, - max_results: max_results - } - ) - results.rows ||= [] - start_index += max_results - num_results = results.rows.length - @logger.info "Acronym: #{acronym}, Results: #{num_results}, Start Index: #{start_index}" - @logger.flush - - results.rows.each do |row| - if aggregated_results.has_key?(acronym) - # year - if aggregated_results[acronym].has_key?(row[1].to_i) - # month - if aggregated_results[acronym][row[1].to_i].has_key?(row[2].to_i) - aggregated_results[acronym][row[1].to_i][row[2].to_i] += row[3].to_i + aggregated_results = Hash.new + + @logger.info "Fetching all ontology acronyms from backend..." + @logger.flush + ont_acronyms = LinkedData::Models::Ontology.where.include(:acronym).all.map {|o| o.acronym} + # ont_acronyms = ["NCIT", "SNOMEDCT", "MEDDRA"] + + @logger.info "Authenticating with the Google Analytics Endpoint..." + @logger.flush + google_client = authenticate_google + + date_range = Google::Analytics::Data::V1beta::DateRange.new( + start_date: GA4_START_DATE, + end_date: Date.today.to_s + ) + metrics_page_views = Google::Analytics::Data::V1beta::Metric.new( + name: "screenPageViews" + ) + dimension_path = Google::Analytics::Data::V1beta::Dimension.new( + name: "pagePath" + ) + dimension_year = Google::Analytics::Data::V1beta::Dimension.new( + name: "year" + ) + dimension_month = Google::Analytics::Data::V1beta::Dimension.new( + name: "month" + ) + string_filter = Google::Analytics::Data::V1beta::Filter::StringFilter.new( + match_type: Google::Analytics::Data::V1beta::Filter::StringFilter::MatchType::FULL_REGEXP + ) + filter = Google::Analytics::Data::V1beta::Filter.new( + field_name: "pagePath", + string_filter: string_filter + ) + filter_expression = Google::Analytics::Data::V1beta::FilterExpression.new( + filter: filter + ) + order_year = Google::Analytics::Data::V1beta::OrderBy::DimensionOrderBy.new( + dimension_name: "year" + ) + orderby_year = Google::Analytics::Data::V1beta::OrderBy.new( + desc: false, + dimension: order_year + ) + order_month = Google::Analytics::Data::V1beta::OrderBy::DimensionOrderBy.new( + dimension_name: "month" + ) + orderby_month = Google::Analytics::Data::V1beta::OrderBy.new( + desc: false, + dimension: order_month + ) + @logger.info "Fetching GA4 analytics for all ontologies..." + @logger.flush + + ont_acronyms.each do |acronym| + start_index = 0 + string_filter.value = "^(\\/ontologies\\/#{acronym})(\\/?\\?{0}|\\/?\\?{1}.*)$" + + loop do + request = Google::Analytics::Data::V1beta::RunReportRequest.new( + property: "properties/#{NcboCron.settings.analytics_property_id}", + metrics: [metrics_page_views], + dimension_filter: filter_expression, + dimensions: [dimension_path, dimension_year, dimension_month], + date_ranges: [date_range], + order_bys: [orderby_year, orderby_month], + offset: start_index, + limit: max_results + ) + response = google_client.run_report request + + response.rows ||= [] + start_index += max_results + num_results = response.rows.length + @logger.info "Acronym: #{acronym}, Results: #{num_results}, Start Index: #{start_index}" + @logger.flush + + response.rows.each do |row| + row_h = row.to_h + year_month_hits = row_h[:dimension_values].map.with_index { + |v, i| i > 0 ? v[:value].to_i.to_s : row_h[:metric_values][0][:value].to_i + }.rotate(1) + + if aggregated_results.has_key?(acronym) + # year + if aggregated_results[acronym].has_key?(year_month_hits[0]) + # month + if aggregated_results[acronym][year_month_hits[0]].has_key?(year_month_hits[1]) + aggregated_results[acronym][year_month_hits[0]][year_month_hits[1]] += year_month_hits[2] + else + aggregated_results[acronym][year_month_hits[0]][year_month_hits[1]] = year_month_hits[2] + end else - aggregated_results[acronym][row[1].to_i][row[2].to_i] = row[3].to_i + aggregated_results[acronym][year_month_hits[0]] = Hash.new + aggregated_results[acronym][year_month_hits[0]][year_month_hits[1]] = year_month_hits[2] end else - aggregated_results[acronym][row[1].to_i] = Hash.new - aggregated_results[acronym][row[1].to_i][row[2].to_i] = row[3].to_i + aggregated_results[acronym] = Hash.new + aggregated_results[acronym][year_month_hits[0]] = Hash.new + aggregated_results[acronym][year_month_hits[0]][year_month_hits[1]] = year_month_hits[2] end - else - aggregated_results[acronym] = Hash.new - aggregated_results[acronym][row[1].to_i] = Hash.new - aggregated_results[acronym][row[1].to_i][row[2].to_i] = row[3].to_i end - end + break if num_results < max_results + end # loop + end # ont_acronyms + @logger.info "Refresh complete, merging GA4 and UA data..." + @logger.flush + full_data = merge_ga4_ua_data(aggregated_results) + @logger.info "Merged" + @logger.flush + end # Benchmark.realtime + @logger.info "Completed Google Analytics refresh in #{(time/60).round(1)} minutes." + @logger.flush + full_data + end - if num_results < max_results - # fill up non existent years - (start_year..Date.today.year).each do |y| - aggregated_results[acronym] = Hash.new if aggregated_results[acronym].nil? - aggregated_results[acronym][y] = Hash.new unless aggregated_results[acronym].has_key?(y) - end - # fill up non existent months with zeros - (1..12).each { |n| aggregated_results[acronym].values.each { |v| v[n] = 0 unless v.has_key?(n) } } - break + def merge_ga4_ua_data(ga4_data) + ua_data_file = File.read(NcboCron.settings.analytics_path_to_ua_data_file) + ua_data = JSON.parse(ua_data_file) + ua_ga4_intersecting_year = Date.parse(GA4_START_DATE).year.to_s + ua_ga4_intersecting_month = Date.parse(GA4_START_DATE).month.to_s + + # add up hits for June of 2023 (the only intersecting month between UA and GA4) + ua_data.each do |acronym, _| + if ga4_data.has_key?(acronym) + if ga4_data[acronym][ua_ga4_intersecting_year].has_key?(ua_ga4_intersecting_month) + ua_data[acronym][ua_ga4_intersecting_year][ua_ga4_intersecting_month] += + ga4_data[acronym][ua_ga4_intersecting_year][ua_ga4_intersecting_month] + # delete data for June of 2023 from ga4_data to avoid overwriting when merging + ga4_data[acronym][ua_ga4_intersecting_year].delete(ua_ga4_intersecting_month) end end end + # merge ua and ga4 data + merged_data = ua_data.deep_merge(ga4_data) + # fill missing years and months + fill_missing_data(merged_data) + # sort acronyms, years and months + sort_ga_data(merged_data) + end - @logger.info "Completed ontology analytics refresh..." - @logger.flush + def fill_missing_data(ga_data) + # fill up non existent years + start_year = Date.parse(UA_START_DATE).year + + ga_data.each do |acronym, _| + (start_year..Date.today.year).each do |y| + ga_data[acronym] = Hash.new if ga_data[acronym].nil? + ga_data[acronym][y.to_s] = Hash.new unless ga_data[acronym].has_key?(y.to_s) + end + # fill up non existent months with zeros + (1..12).each { |n| ga_data[acronym].values.each { |v| v[n.to_s] = 0 unless v.has_key?(n.to_s) } } + end + end - aggregated_results + def sort_ga_data(ga_data) + ga_data.transform_values { |value| + value.transform_values { |val| + val.sort_by { |key, _| key.to_i }.to_h + }.sort_by { |k, _| k.to_i }.to_h + }.sort.to_h end def authenticate_google - Google::Apis::ClientOptions.default.application_name = NcboCron.settings.analytics_app_name - Google::Apis::ClientOptions.default.application_version = NcboCron.settings.analytics_app_version - # enable google api call retries in order to - # minigate analytics processing failure due to ocasional google api timeouts and other outages - Google::Apis::RequestOptions.default.retries = 5 - # uncoment to enable logging for debugging purposes - # Google::Apis.logger.level = Logger::DEBUG - # Google::Apis.logger = @logger - client = Google::Apis::AnalyticsV3::AnalyticsService.new - key = Google::APIClient::KeyUtils::load_from_pkcs12(NcboCron.settings.analytics_path_to_key_file, 'notasecret') - client.authorization = Signet::OAuth2::Client.new( - :token_credential_uri => 'https://accounts.google.com/o/oauth2/token', - :audience => 'https://accounts.google.com/o/oauth2/token', - :scope => 'https://www.googleapis.com/auth/analytics.readonly', - :issuer => NcboCron.settings.analytics_service_account_email_address, - :signing_key => key - ).tap { |auth| auth.fetch_access_token! } - client + Google::Analytics::Data.analytics_data do |config| + config.credentials = NcboCron.settings.analytics_path_to_key_file + end end - end + end # class + + end +end + +class ::Hash + def deep_merge(second) + merger = proc { |key, v1, v2| Hash === v1 && Hash === v2 ? v1.merge(v2, &merger) : v2 } + self.merge(second, &merger) end end @@ -127,7 +217,8 @@ def authenticate_google # require 'ncbo_annotator' # require 'ncbo_cron/config' # require_relative '../../config/config' -# ontology_analytics_log_path = File.join("logs", "ontology-analytics.log") -# ontology_analytics_logger = Logger.new(ontology_analytics_log_path) +# # ontology_analytics_log_path = File.join("logs", "ontology-analytics.log") +# # ontology_analytics_logger = Logger.new(ontology_analytics_log_path) +# ontology_analytics_logger = Logger.new(STDOUT) # NcboCron::Models::OntologyAnalytics.new(ontology_analytics_logger).run -# ./bin/ncbo_cron --disable-processing true --disable-pull true --disable-flush true --disable-warmq true --disable-ontologies-report true --disable-mapping-counts true --disable-spam-deletion true --ontology-analytics '14 * * * *' +# # ./bin/ncbo_cron --disable-processing true --disable-pull true --disable-flush true --disable-warmq true --disable-ontologies-report true --disable-mapping-counts true --disable-spam-deletion true --ontology-analytics '14 * * * *' diff --git a/lib/ncbo_cron/ontology_submission_parser.rb b/lib/ncbo_cron/ontology_submission_parser.rb index b0a3309c..3c908442 100644 --- a/lib/ncbo_cron/ontology_submission_parser.rb +++ b/lib/ncbo_cron/ontology_submission_parser.rb @@ -11,6 +11,7 @@ class OntologySubmissionParser ACTIONS = { :process_rdf => true, :extract_metadata => true, + :generate_labels => true, :index_search => true, :index_properties => true, :run_metrics => true, @@ -182,7 +183,7 @@ def process_submission(logger, submission_id, actions=ACTIONS) # Check to make sure the file has been downloaded if sub.pullLocation && (!sub.uploadFilePath || !File.exist?(sub.uploadFilePath)) - multi_logger.debug "Pull location found, but no file in the upload file path. Retrying download." + multi_logger.debug "Pull location found (#{sub.pullLocation}, but no file in the upload file path (#{sub.uploadFilePath}. Retrying download." file, filename = sub.download_ontology_file file_location = sub.class.copy_file_repository(sub.ontology.acronym, sub.submissionId, file, filename) file_location = "../" + file_location if file_location.start_with?(".") # relative path fix @@ -237,10 +238,11 @@ def process_annotator(logger, sub) begin annotator = Annotator::Models::NcboAnnotator.new annotator.create_term_cache_for_submission(logger, sub) - # commenting this action out for now due to a problem with hgetall in redis + # this action only occurs if the CRON dictionary generation job is disabled + # if the CRON dictionary generation job is running, + # the dictionary will NOT be generated on each ontology parsing # see https://github.com/ncbo/ncbo_cron/issues/45 for details - # mgrep dictionary generation will occur as a separate CRON task - # annotator.generate_dictionary_file() + annotator.generate_dictionary_file() unless NcboCron.settings.enable_dictionary_generation_cron_job rescue Exception => e logger.error(e.message + "\n" + e.backtrace.join("\n\t")) logger.flush() diff --git a/lib/ncbo_cron/spam_deletion.rb b/lib/ncbo_cron/spam_deletion.rb index 8db5568b..e2ec64f8 100644 --- a/lib/ncbo_cron/spam_deletion.rb +++ b/lib/ncbo_cron/spam_deletion.rb @@ -25,8 +25,18 @@ def initialize(logger=nil) end def run - auth_token = Base64.decode64(NcboCron.settings.git_repo_access_token) + auth_token = NcboCron.settings.git_repo_access_token res = `curl --header 'Authorization: token #{auth_token}' --header 'Accept: application/vnd.github.v3.raw' --location #{FULL_FILE_PATH}` + + begin + error_json = JSON.parse(res) + msg = "\nError while fetching the SPAM user list from #{FULL_FILE_PATH}: #{error_json}" + @logger.error(msg) + puts msg + exit + rescue JSON::ParserError + @logger.info("Successfully downloaded the SPAM user list from #{FULL_FILE_PATH}") + end usernames = res.split(",").map(&:strip) delete_spam(usernames) end diff --git a/ncbo_cron.gemspec b/ncbo_cron.gemspec index 821881d1..c8faa03d 100644 --- a/ncbo_cron.gemspec +++ b/ncbo_cron.gemspec @@ -8,7 +8,7 @@ Gem::Specification.new do |gem| gem.summary = %q{} gem.homepage = "https://github.com/ncbo/ncbo_cron" - gem.files = `git ls-files`.split($\) + gem.files = Dir['**/*'] gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) } gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) gem.name = "ncbo_cron" @@ -16,7 +16,7 @@ Gem::Specification.new do |gem| gem.add_dependency("dante") gem.add_dependency("goo") - gem.add_dependency("google-apis-analytics_v3") + gem.add_dependency("google-analytics-data") gem.add_dependency("mlanett-redis-lock") gem.add_dependency("multi_json") gem.add_dependency("ncbo_annotator") diff --git a/rakelib/purl_management.rake b/rakelib/purl_management.rake new file mode 100644 index 00000000..58cfadd7 --- /dev/null +++ b/rakelib/purl_management.rake @@ -0,0 +1,28 @@ +# Task for updating and adding missing purl for all ontologies +# +desc 'Purl Utilities' +namespace :purl do + require 'bundler/setup' + # Configure the process for the current cron configuration. + require_relative '../lib/ncbo_cron' + config_exists = File.exist?(File.expand_path('../../config/config.rb', __FILE__)) + abort('Please create a config/config.rb file using the config/config.rb.sample as a template') unless config_exists + require_relative '../config/config' + + desc 'update purl for all ontologies' + task :update_all do + purl_client = LinkedData::Purl::Client.new + LinkedData::Models::Ontology.all.each do |ont| + ont.bring(:acronym) + acronym = ont.acronym + + if purl_client.purl_exists(acronym) + puts "#{acronym} exists" + purl_client.fix_purl(acronym) + else + puts "#{acronym} DOES NOT exist" + purl_client.create_purl(acronym) + end + end + end +end