diff --git a/etc/db/storm_be_ISAM_mysql_update_from_1.0.0_to_1.1.0.sql b/etc/db/storm_be_ISAM_mysql_update_from_1.0.0_to_1.1.0.sql
deleted file mode 100644
index ca1bcdc73..000000000
--- a/etc/db/storm_be_ISAM_mysql_update_from_1.0.0_to_1.1.0.sql
+++ /dev/null
@@ -1,27 +0,0 @@
---
--- Update StoRM tape recall database from 1.0.0 to 1.1.0
---
-
-DELETE FROM storm_be_ISAM.db_version;
-INSERT INTO storm_be_ISAM.db_version (major,minor,revision,description) VALUES (1,1,0,'27 May 2011');
-
-DROP TABLE IF EXISTS storm_be_ISAM.storage_file;
-
-ALTER TABLE storm_be_ISAM.tape_recall
- ADD `groupTaskId` CHAR(36) NOT NULL,
- ADD `inProgressTime` datetime,
- ADD `finalStatusTime` datetime,
- ADD INDEX groupTaskId_index (groupTaskId);
-
-ALTER TABLE storm_be_ISAM.storage_space
- ADD `USED_SIZE` bigint(20) NOT NULL default '-1',
- ADD `BUSY_SIZE` bigint(20) NOT NULL default '-1',
- ADD `UNAVAILABLE_SIZE` bigint(20) NOT NULL default '-1',
- ADD `AVAILABLE_SIZE` bigint(20) NOT NULL default '-1',
- ADD `RESERVED_SIZE` bigint(20) NOT NULL default '-1',
- ADD `UPDATE_TIME` TIMESTAMP NOT NULL default '1970-01-02 00:00:00',
- MODIFY COLUMN `CREATED` TIMESTAMP NOT NULL default CURRENT_TIMESTAMP,
- ADD INDEX ALIAS_index (ALIAS),
- ADD INDEX TOKEN_index (SPACE_TOKEN);
-
-
diff --git a/etc/db/storm_database_config.sh b/etc/db/storm_database_config.sh
deleted file mode 100644
index ee4722aa1..000000000
--- a/etc/db/storm_database_config.sh
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/bin/bash
-#
-# The following environment variables are used by this script and can be set outside:
-# STORM_MYSQL_HOSTNAME (hostname.domain which runs mysql)
-# STORM_DBSCRIPT_DIR (directory containing the StoRM scripts for the StoRM DB)
-# MYSQL_PASSWORD (mysql root password)
-
-
-################################ Set environment variables ####################
-STORM_DB_NAME=storm_db
-if [ -z "$STORM_MYSQL_HOSTNAME" ]; then
- STORM_MYSQL_HOSTNAME=`hostname`
- # extract the short name (i.e. stop at the first dot)
- STORM_MYSQL_HOSTNAME_SHORT=`expr "$STORM_MYSQL_HOSTNAME" : '\([^.]*\)'`
-fi
-
-if [ -z "$STORM_DBSCRIPT_DIR" ]; then
- STORM_DBSCRIPT_DIR=/etc/storm/backend-server/db
-fi
-
-if [ -z "$MYSQL_PASSWORD" ]; then
- MYSQL_PASSWORD=storm
-fi
-
-
-############################### Function definition ###########################
-function get_stormdb_version () {
- local MYSQL_OPTS="-h $STORM_MYSQL_HOSTNAME -u root ${MYSQL_PWD_OPTION} "
- local STORMDB_VERSION_MAJOR=`mysql $MYSQL_OPTS -s -e"use storm_db;select major from db_version;"`
- local STORMDB_VERSION_MINOR=`mysql $MYSQL_OPTS -s -e"use storm_db;select minor from db_version;"`
- local STORMDB_VERSION_REVISION=`mysql $MYSQL_OPTS -s -e"use storm_db;select revision from db_version;"`
- STORMDB_VERSION="$STORMDB_VERSION_MAJOR.$STORMDB_VERSION_MINOR.$STORMDB_VERSION_REVISION"
-}
-
-function get_stormbeISAM_version () {
- local MYSQL_OPTS="-h $STORM_MYSQL_HOSTNAME -u root ${MYSQL_PWD_OPTION} "
- local STORMBEISAM_VERSION_MAJOR=`mysql $MYSQL_OPTS -s -e"use storm_be_ISAM;select major from db_version;"`
- local STORMBEISAM_VERSION_MINOR=`mysql $MYSQL_OPTS -s -e"use storm_be_ISAM;select minor from db_version;"`
- local STORMBEISAM_VERSION_REVISION=`mysql $MYSQL_OPTS -s -e"use storm_be_ISAM;select revision from db_version;"`
- STORMBEISAM_VERSION="$STORMBEISAM_VERSION_MAJOR.$STORMBEISAM_VERSION_MINOR.$STORMBEISAM_VERSION_REVISION"
-}
-
-function set_transition_script_filename () {
- if [ -n "$STORMDB_VERSION" ]; then
-
- tmp=`ls $STORM_DBSCRIPT_DIR/storm_mysql_update_from_${STORMDB_VERSION}* 2>&1`
-
- if [ $? -eq 0 ]; then
- TRANSITION_SCRIPT_FILENAME=$tmp
- else
- TRANSITION_SCRIPT_FILENAME=script_not_found # foo value, just a filename that doesn't exist
- fi
- else
- TRANSITION_SCRIPT_FILENAME=script_not_found # foo value, just a filename that doesn't exist
- fi
-}
-
-function set_stormbeISAM_transition_script_filename () {
- if [ -n "$STORMBEISAM_VERSION" ]; then
-
- tmp=`ls $STORM_DBSCRIPT_DIR/storm_be_ISAM_mysql_update_from_${STORMBEISAM_VERSION}* 2>&1`
-
- if [ $? -eq 0 ]; then
- TRANSITION_SCRIPT_FILENAME=$tmp
- else
- TRANSITION_SCRIPT_FILENAME=script_not_found # foo value, just a filename that doesn't exist
- fi
- else
- TRANSITION_SCRIPT_FILENAME=script_not_found # foo value, just a filename that doesn't exist
- fi
-}
-
-function create_new_storm_db () {
- echo "Creating new db..."
- mysql -u root $MYSQL_PWD_OPTION < $STORM_DBSCRIPT_DIR/storm_mysql_tbl.sql
- tmp=`mktemp /tmp/sql.XXXXXX`
-
- sed s/__HOST__/${STORM_MYSQL_HOSTNAME_SHORT}/g $STORM_DBSCRIPT_DIR/storm_mysql_grant.sql | \
- sed s/__STORMUSER__/${STORM_DB_USER}/g | \
- sed s/__HOSTDOMAIN__/${STORM_MYSQL_HOSTNAME}/g > $tmp
-
- mysql -u root $MYSQL_PWD_OPTION < $tmp
- rm -f $tmp
- echo "Created new DB"
-}
-
-function update_storm_db () {
- get_stormdb_version
- set_transition_script_filename
- while [ "$TRANSITION_SCRIPT_FILENAME" != script_not_found ]
- do
- if [ -e "$TRANSITION_SCRIPT_FILENAME" ]; then
- mysql -u root $MYSQL_PWD_OPTION < $TRANSITION_SCRIPT_FILENAME
- fi
- get_stormdb_version
- set_transition_script_filename
- # After running the script the DB version should be changed, if not then
- # there is nothing else to do and the DB is up to date.
- done
- echo "Update done!"
-}
-
-function update_storm_be_ISAM () {
- get_stormbeISAM_version
- set_stormbeISAM_transition_script_filename
- while [ "$TRANSITION_SCRIPT_FILENAME" != script_not_found ]
- do
- if [ -e "$TRANSITION_SCRIPT_FILENAME" ]; then
- mysql -u root $MYSQL_PWD_OPTION < $TRANSITION_SCRIPT_FILENAME
- fi
- get_stormbeISAM_version
- set_stormbeISAM_transition_script_filename
- # After running the script the DB version should be changed, if not then
- # there is nothing else to do and the DB is up to date.
- done
- echo "Update done!"
-}
-
-################################## Main #######################################
-# check for the existence of mysql
-which mysql > /dev/null 2> /dev/null
-if [ "$?" -ne 0 ] # check "which" exit status
-then
- echo "Error: mysql not found (install mysql or add it to the PATH environment variable)."
- exit 1
-fi
-
-#echo "*** WARNING: When you are asked for a password, it's the 'root' MySQL user password. ***"
-# check if mysql need a root password
-mysql -u root -e ";" 2>/dev/null
-if [ "$?" -ne 0 ]; then # the exit status is not zero
- MYSQL_PWD_OPTION="-p$MYSQL_PASSWORD";
-else # the exit status is zero, i.e. no passwd
- MYSQL_PWD_OPTION=""
-fi
-
-# check that the storm database exists
-mysql -h $STORM_MYSQL_HOSTNAME -u root ${MYSQL_PWD_OPTION} -e"use ${STORM_DB_NAME};" > /dev/null 2> /dev/null
-if [ "$?" -ne 0 ]; then
- create_new_storm_db
-else
- update_storm_db
- update_storm_be_ISAM
-fi
-
-exit 0
-
diff --git a/etc/db/storm_mysql_grant.sql b/etc/db/storm_mysql_grant.sql
deleted file mode 100644
index bfad075f5..000000000
--- a/etc/db/storm_mysql_grant.sql
+++ /dev/null
@@ -1,28 +0,0 @@
---###################################################
---# Copyright (c) 2008 on behalf of the INFN CNAF
---# The Italian National Institute for Nuclear Physics (INFN),
---# All rights reserved.
---#
---# createrole sql script for a database
---#
---# author: luca.magnoni@cnaf.infn.it
---# contributes: flavia.donno@cern.ch
---# changelog: Added grant permission on storm_be_ISAM database.
---#
---#
---###################################################
-USE mysql;
-GRANT ALL PRIVILEGES ON storm_db.* TO __STORMUSER__ IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION;
-GRANT ALL PRIVILEGES ON storm_db.* TO __STORMUSER__@'localhost' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION;
-GRANT ALL PRIVILEGES ON storm_db.* TO __STORMUSER__@'__HOST__' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION;
-GRANT ALL PRIVILEGES ON storm_db.* TO __STORMUSER__@'__HOSTDOMAIN__' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION;
-
-GRANT ALL PRIVILEGES ON storm_be_ISAM.* TO __STORMUSER__ IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION;
-GRANT ALL PRIVILEGES ON storm_be_ISAM.* TO __STORMUSER__@'localhost' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION;
-GRANT ALL PRIVILEGES ON storm_be_ISAM.* TO __STORMUSER__@'__HOST__' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION;
-GRANT ALL PRIVILEGES ON storm_be_ISAM.* TO __STORMUSER__@'__HOSTDOMAIN__' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION;
-
-
-FLUSH PRIVILEGES;
-
-
diff --git a/etc/db/storm_mysql_tbl.sql b/etc/db/storm_mysql_tbl.sql
deleted file mode 100644
index 15d237862..000000000
--- a/etc/db/storm_mysql_tbl.sql
+++ /dev/null
@@ -1,461 +0,0 @@
---###################################################
---#
---# Copyright (c) 2008 on behalf of the INFN CNAF
---# The Italian National Institute for Nuclear Physics (INFN),
---# All rights reserved.
---#
---# create StoRM databases
---#
---# author: luca.magnoni@cnaf.infn.it
---# changelog: Add "ON DELETE CASCADE" for requestDirOption.
---#
---###################################################
-
-CREATE DATABASE IF NOT EXISTS storm_db;
-
-USE storm_db;
-
-CREATE TABLE IF NOT EXISTS db_version (
- ID int NOT NULL auto_increment,
- major int,
- minor int,
- revision int,
- description VARCHAR(100),
- primary key (ID)
-) engine=InnoDB;
-
-DELETE FROM storm_db.db_version;
-INSERT INTO storm_db.db_version (major,minor,revision,description) VALUES (1,7,2,'10 Mar 2015');
-
-CREATE TABLE IF NOT EXISTS request_queue (
- ID int not null auto_increment,
- config_FileStorageTypeID CHAR(1),
- config_AccessPatternID CHAR(1),
- config_ConnectionTypeID CHAR(1),
- config_OverwriteID CHAR(1),
- config_RequestTypeID VARCHAR(3) not null,
- client_dn VARCHAR(255) BINARY,
- u_token VARCHAR(255) BINARY,
- retrytime int,
- pinLifetime int,
- s_token VARCHAR(255) BINARY,
- status int not null,
- errstring VARCHAR(255),
- r_token VARCHAR(255) BINARY,
- remainingTotalTime int NOT NULL DEFAULT -1,
- fileLifetime int,
- nbreqfiles int,
- numOfCompleted int,
- numOfWaiting int,
- numOfFailed int,
- timeStamp datetime not null,
- proxy blob,
- deferredStartTime int,
- remainingDeferredStartTime int,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_Get (
- ID int not null auto_increment,
- request_DirOptionID int,
- request_queueID int,
- sourceSURL text not null,
- normalized_sourceSURL_StFN text,
- sourceSURL_uniqueID int,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS status_Get (
- ID int not null auto_increment,
- statusCode int not null,
- explanation VARCHAR(255),
- fileSize bigint,
- estimatedWaitTime int,
- remainingPinTime int,
- transferURL text,
- request_GetID int not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_Put (
- ID int not null auto_increment,
- request_queueID int not null,
- targetSURL text not null,
- expectedFileSize bigint,
- normalized_targetSURL_StFN text,
- targetSURL_uniqueID int,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS status_Put (
- ID int not null auto_increment,
- statusCode int not null,
- explanation VARCHAR(255),
- fileSize bigint,
- estimatedWaitTime int,
- remainingPinTime int,
- remainingFileTime int,
- transferURL text,
- request_PutID int not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_BoL (
- ID int not null auto_increment,
- sourceSURL text not null,
- request_DirOptionID int,
- request_queueID int,
- normalized_sourceSURL_StFN text,
- sourceSURL_uniqueID int,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS status_BoL (
- ID int not null auto_increment,
- request_BoLID int,
- statusCode int not null,
- explanation VARCHAR(255),
- fileSize bigint,
- estimatedWaitTime int,
- remainingPinTime int,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_Copy (
- ID int not null auto_increment,
- request_queueID int,
- request_DirOptionID int,
- sourceSURL text not null,
- targetSURL text not null,
- normalized_sourceSURL_StFN text,
- sourceSURL_uniqueID int,
- normalized_targetSURL_StFN text,
- targetSURL_uniqueID int,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS status_Copy (
- ID int not null auto_increment,
- statusCode int not null,
- explanation VARCHAR(255),
- fileSize bigint,
- estimatedWaitTime int,
- remainingFileTime int,
- request_CopyID int not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_ExtraInfo (
- ID int not null auto_increment,
- request_queueID int,
- status_GetID int,
- request_queueID2 int,
- status_PutID int,
- ei_key VARCHAR(255) not null,
- ei_value VARCHAR(255),
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_RetentionPolicyInfo (
- ID int not null auto_increment,
- request_queueID int not null,
- config_RetentionPolicyID CHAR(1),
- config_AccessLatencyID CHAR(1),
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_ClientNetworks (
- ID int not null auto_increment,
- network VARCHAR(255) not null,
- request_queueID int, primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_TransferProtocols (
- ID int not null auto_increment,
- request_queueID int,
- config_ProtocolsID VARCHAR(30),
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_DirOption (
- ID int not null auto_increment,
- isSourceADirectory tinyint(1) default 0 not null,
- allLevelRecursive tinyint(1) default 0,
- numOfLevels int default 1,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS request_VOMSAttributes (
- ID int not null auto_increment,
- request_queueID int,
- vo VARCHAR(255) not null,
- voms_group text,
- voms_role text,
- voms_capability text,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS volatile (
- ID int not null auto_increment,
- file text not null,
- start datetime not null,
- fileLifetime int not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS jit (
- ID int not null auto_increment,
- file text not null,
- acl int not null,
- uid int not null,
- start datetime not null,
- pinLifetime int not null,
- gid int not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS config_Protocols (
- ID VARCHAR(30) not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS config_RetentionPolicy (
- ID CHAR(1) not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS config_AccessLatency (
- ID CHAR(1) not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS config_FileStorageType (
- ID CHAR(1) not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS config_AccessPattern (
- ID CHAR(1) not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS config_ConnectionType (
- ID CHAR(1) not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS config_Overwrite (
- ID CHAR(1) not null,
- primary key (ID)) engine=InnoDB;
-
-CREATE TABLE IF NOT EXISTS config_RequestType (
- ID VARCHAR(3) not null,
- primary key (ID)) engine=InnoDB;
-
-
-
-ALTER TABLE request_queue
- add index FK_request_qu_2651 (config_FileStorageTypeID),
- add constraint FK_request_qu_2651 foreign key (config_FileStorageTypeID) references config_FileStorageType (ID);
-
-ALTER TABLE request_queue
- add index FK_request_qu_4029 (config_AccessPatternID),
- add constraint FK_request_qu_4029 foreign key (config_AccessPatternID) references config_AccessPattern (ID);
-
-ALTER TABLE request_queue
- add index FK_request_qu_8833 (config_ConnectionTypeID),
- add constraint FK_request_qu_8833 foreign key (config_ConnectionTypeID) references config_ConnectionType (ID);
-
-ALTER TABLE request_queue
- add index FK_request_qu_8815 (config_OverwriteID),
- add constraint FK_request_qu_8815 foreign key (config_OverwriteID) references config_Overwrite (ID);
-
-ALTER TABLE request_queue
- add index FK_request_qu_375 (config_RequestTypeID),
- add constraint FK_request_qu_375 foreign key (config_RequestTypeID) references config_RequestType (ID);
-
-CREATE INDEX r_token_index ON request_queue (r_token(8));
-CREATE INDEX status_index on request_queue (status);
-
-ALTER TABLE request_Get
- add index FK_request_Ge_9630 (request_DirOptionID),
- add constraint FK_request_Ge_9630 foreign key (request_DirOptionID) references request_DirOption (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_Get
- add index FK_request_Ge_3811 (request_queueID),
- add constraint FK_request_Ge_3811 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-CREATE INDEX index_sourceSURL_uniqueID on request_Get (sourceSURL_uniqueID);
-
-ALTER TABLE status_Get
- add index FK_status_Get_4853 (request_GetID),
- add constraint FK_status_Get_4853 foreign key (request_GetID) references request_Get (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_Put
- add index FK_request_Pu_4665 (request_queueID),
- add constraint FK_request_Pu_4665 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-CREATE INDEX index_targetSURL on request_Put (targetSURL(255));
-CREATE INDEX index_targetSURL_uniqueID on request_Put (targetSURL_uniqueID);
-
-ALTER TABLE status_Put
- add index FK_status_Put_3223 (request_PutID),
- add constraint FK_status_Put_3223 foreign key (request_PutID) references request_Put (ID) ON DELETE CASCADE;
-
-CREATE INDEX statusCode_index on status_Put (statusCode);
-CREATE INDEX statusCodeGet_index on status_Get (statusCode);
-CREATE INDEX transferURL_index ON status_Put (transferURL(255));
-
-ALTER TABLE request_BoL
- add index FK_request_Bo_4166 (request_DirOptionID),
- add constraint FK_request_Bo_4166 foreign key (request_DirOptionID) references request_DirOption (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_BoL
- add index FK_request_Bo_8346 (request_queueID),
- add constraint FK_request_Bo_8346 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-CREATE INDEX index_sourceSURL_uniqueID on request_BoL (sourceSURL_uniqueID);
-
-ALTER TABLE status_BoL
- add index FK_status_BoL_1747 (request_BoLID),
- add constraint FK_status_BoL_1747 foreign key (request_BoLID) references request_BoL (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_Copy
- add index FK_request_Co_6810 (request_queueID),
- add constraint FK_request_Co_6810 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_Copy
- add index FK_request_Co_2630 (request_DirOptionID),
- add constraint FK_request_Co_2630 foreign key (request_DirOptionID) references request_DirOption (ID) ON DELETE CASCADE;
-
-CREATE INDEX index_sourceSURL_uniqueID on request_Copy (sourceSURL_uniqueID);
-CREATE INDEX index_targetSURL_uniqueID on request_Copy (targetSURL_uniqueID);
-
-ALTER TABLE status_Copy
- add index FK_status_Cop_447 (request_CopyID),
- add constraint FK_status_Cop_447 foreign key (request_CopyID) references request_Copy (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_ExtraInfo
- add index FK_request_Ex_2570 (request_queueID),
- add constraint FK_request_Ex_2570 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_ExtraInfo
- add index FK_request_Ex_9422 (status_GetID),
- add constraint FK_request_Ex_9422 foreign key (status_GetID) references status_Get (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_ExtraInfo
- add index FK_request_Ex_9425 (request_queueID2),
- add constraint FK_request_Ex_9425 foreign key (request_queueID2) references request_queue (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_ExtraInfo
- add index FK_request_Ex_8646 (status_PutID),
- add constraint FK_request_Ex_8646 foreign key (status_PutID) references status_Put (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_RetentionPolicyInfo
- add index FK_request_Re_5291 (request_queueID),
- add constraint FK_request_Re_5291 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_RetentionPolicyInfo
- add index FK_request_Re_503 (config_RetentionPolicyID),
- add constraint FK_request_Re_503 foreign key (config_RetentionPolicyID) references config_RetentionPolicy (ID);
-
-ALTER TABLE request_RetentionPolicyInfo
- add index FK_request_Re_2860 (config_AccessLatencyID),
- add constraint FK_request_Re_2860 foreign key (config_AccessLatencyID) references config_AccessLatency (ID);
-
-ALTER TABLE request_ClientNetworks
- add index FK_request_Cl_4686 (request_queueID),
- add constraint FK_request_Cl_4686 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_TransferProtocols
- add index FK_request_Tr_6848 (request_queueID),
- add constraint FK_request_Tr_6848 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-ALTER TABLE request_TransferProtocols
- add index FK_request_Tr_8127 (config_ProtocolsID),
- add constraint FK_request_Tr_8127 foreign key (config_ProtocolsID) references config_Protocols (ID);
-
-ALTER TABLE request_VOMSAttributes
- add index FK_request_VO_5290 (request_queueID),
- add constraint FK_request_VO_5290 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE;
-
-CREATE INDEX file_index ON volatile (file(255));
-
-REPLACE INTO config_Protocols (ID) VALUES ('file');
-REPLACE INTO config_Protocols (ID) VALUES ('gsiftp');
-REPLACE INTO config_Protocols (ID) VALUES ('rfio');
-REPLACE INTO config_Protocols (ID) VALUES ('root');
-REPLACE INTO config_Protocols (ID) VALUES ('http');
-REPLACE INTO config_Protocols (ID) VALUES ('https');
-REPLACE INTO config_Protocols (ID) VALUES ('xroot');
-
-REPLACE INTO config_Overwrite (ID) VALUES ('N');
-REPLACE INTO config_Overwrite (ID) VALUES ('A');
-REPLACE INTO config_Overwrite (ID) VALUES ('D');
-
-REPLACE INTO config_FileStorageType (ID) VALUES ('V');
-REPLACE INTO config_FileStorageType (ID) VALUES ('P');
-REPLACE INTO config_FileStorageType (ID) VALUES ('D');
-
-REPLACE INTO config_RequestType (ID) VALUES ('BOL');
-REPLACE INTO config_RequestType (ID) VALUES ('PTG');
-REPLACE INTO config_RequestType (ID) VALUES ('PTP');
-REPLACE INTO config_RequestType (ID) VALUES ('COP');
-
-REPLACE INTO config_RetentionPolicy (ID) VALUES ('R');
-REPLACE INTO config_RetentionPolicy (ID) VALUES ('C');
-REPLACE INTO config_RetentionPolicy (ID) VALUES ('O');
-
-REPLACE INTO config_AccessLatency (ID) VALUES ('O');
-REPLACE INTO config_AccessLatency (ID) VALUES ('N');
-
---
--- StoRM Backend DATABASE
--- storm_be_ISAM
---
-
-CREATE DATABASE IF NOT EXISTS storm_be_ISAM;
-USE storm_be_ISAM;
-
-CREATE TABLE IF NOT EXISTS db_version (
- ID int NOT NULL auto_increment,
- major int,
- minor int,
- revision int,
- description VARCHAR(100),
- primary key (ID)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-DELETE FROM storm_be_ISAM.db_version;
-INSERT INTO storm_be_ISAM.db_version (major,minor,revision,description) VALUES (1,1,0,'27 May 2011');
-
---
--- Table structure for table `storage_space`
---
-CREATE TABLE IF NOT EXISTS `storage_space` (
- `SS_ID` bigint(20) NOT NULL auto_increment,
- `USERDN` VARCHAR(150) NOT NULL default '',
- `VOGROUP` VARCHAR(20) NOT NULL default '',
- `ALIAS` VARCHAR(100) default NULL,
- `SPACE_TOKEN` VARCHAR(100) BINARY NOT NULL default '',
- `CREATED` TIMESTAMP NOT NULL default CURRENT_TIMESTAMP,
- `TOTAL_SIZE` bigint(20) NOT NULL default '0',
- `GUAR_SIZE` bigint(20) NOT NULL default '0',
- `FREE_SIZE` bigint(20) default NULL default '-1',
- `SPACE_FILE` VARCHAR(145) NOT NULL default '',
- `STORAGE_INFO` VARCHAR(255) default NULL,
- `LIFETIME` bigint(20) default NULL,
- `SPACE_TYPE` VARCHAR(10) NOT NULL default '',
- `USED_SIZE` bigint(20) NOT NULL default '-1',
- `BUSY_SIZE` bigint(20) NOT NULL default '-1',
- `UNAVAILABLE_SIZE` bigint(20) NOT NULL default '-1',
- `AVAILABLE_SIZE` bigint(20) NOT NULL default '-1',
- `RESERVED_SIZE` bigint(20) NOT NULL default '-1',
- `UPDATE_TIME` TIMESTAMP NOT NULL default '1970-01-02 00:00:00',
- PRIMARY KEY (`SS_ID`),
- INDEX ALIAS_index (`ALIAS`),
- INDEX TOKEN_index (`SPACE_TOKEN`),
- KEY `SPACE_NAME` (`SPACE_TOKEN`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-
-
---
--- Table structure for table `tape_recall`
---
-CREATE TABLE IF NOT EXISTS tape_recall (
- taskId CHAR(36) NOT NULL,
- requestToken VARCHAR(255) BINARY NOT NULL,
- requestType CHAR(4),
- fileName text not null,
- pinLifetime int,
- status int,
- voName VARCHAR(255) BINARY,
- userID VARCHAR(255) BINARY,
- retryAttempt int,
- timeStamp datetime not null,
- deferredStartTime datetime not null,
- groupTaskId CHAR(36) NOT NULL,
- inProgressTime datetime,
- finalStatusTime datetime,
- primary key (taskId , requestToken)) ENGINE=InnoDB;
-
-ALTER TABLE tape_recall
- ADD INDEX deferredStartTime (deferredStartTime),
- ADD INDEX groupTaskId_index (groupTaskId);
\ No newline at end of file
diff --git a/etc/db/storm_mysql_update_from_1.7.0_to_1.7.1.sql b/etc/db/storm_mysql_update_from_1.7.0_to_1.7.1.sql
deleted file mode 100644
index de72ab900..000000000
--- a/etc/db/storm_mysql_update_from_1.7.0_to_1.7.1.sql
+++ /dev/null
@@ -1,4 +0,0 @@
-DELETE FROM storm_db.db_version;
-INSERT INTO storm_db.db_version (major,minor,revision,description) VALUES (1,7,1,'27 Jan 2015');
-
-INSERT INTO storm_db.config_Protocols VALUES ('xroot');
\ No newline at end of file
diff --git a/etc/db/storm_mysql_update_from_1.7.1_to_1.7.2.sql b/etc/db/storm_mysql_update_from_1.7.1_to_1.7.2.sql
deleted file mode 100644
index 017115a14..000000000
--- a/etc/db/storm_mysql_update_from_1.7.1_to_1.7.2.sql
+++ /dev/null
@@ -1,5 +0,0 @@
-DELETE FROM storm_db.db_version;
-INSERT INTO storm_db.db_version (major,minor,revision,description) VALUES (1,7,2,'10 Mar 2015');
-
-CREATE INDEX statusCodeGet_index on storm_db.status_Get (statusCode);
-
diff --git a/etc/storm.properties.template b/etc/storm.properties.template
index 43bde6974..a4dd5358e 100644
--- a/etc/storm.properties.template
+++ b/etc/storm.properties.template
@@ -15,7 +15,7 @@
# ============================
# StoRM Service DNS
# ============================
-# hostname with which the service is published
+# Host with which the SRM service is published on BDII
storm.service.FE-public.hostname =
@@ -49,10 +49,18 @@ storm.service.SURL.default-ports =
# FE/BE communication RDBMS
# ============================
#
-# Parameters to connect to the DB used as channel for the requests.
-storm.service.request-db.host =
-storm.service.request-db.username =
-storm.service.request-db.passwd =
+# Parameters to connect to the DB used as channel for the requests.
+# Deprecated since v1.12.0:
+# storm.service.request-db.host =
+# storm.service.request-db.username =
+# storm.service.request-db.passwd =
+# storm.service.request-db.properties =
+# Added v1.12.0
+storm.service.db.host =
+storm.service.db.username =
+storm.service.db.password =
+storm.service.db.port =
+storm.service.db.properties =
#############################################
############ PROFILE PARAMETERS ############
@@ -104,8 +112,24 @@ default.storagetype = P
# ============================
# BE-private RDBMS
# ============================
-persistence.internal-db.connection-pool.maxActive = 50
-persistence.internal-db.connection-pool.maxWait = 50
+
+# Removed by 1.12.0
+# persistence.internal-db.connection-pool.maxActive = 50
+# persistence.internal-db.connection-pool.maxWait = 50
+# persistence.internal-db.connection-pool.size = 50
+
+# Added with 1.12.0:
+
+# Sets the maximum permitted lifetime of a connection in milliseconds. A value of zero or less indicates an infinite lifetime.
+storm.service.db.pool.maxWaitMillis = -1
+storm.service.db.pool.testOnBorrow = true
+storm.service.db.pool.testWhileIdle = true
+
+storm.service.db.pool.stormdb.maxTotal = 500
+storm.service.db.pool.stormdb.minIdle = 50
+
+storm.service.db.pool.stormbeisam.maxTotal = 200
+storm.service.db.pool.stormbeisam.minIdle = 10
# ============================
@@ -132,8 +156,8 @@ scheduler.chunksched.copy.queueSize=500
# ============================
# ASYNCH PICKER Component parameters
# ============================
-asynch.db.ReconnectPeriod=18000
-asynch.db.DelayPeriod=30
+asynch.db.ReconnectPeriod=18000 # removed since v1.12.0
+asynch.db.DelayPeriod=30 # removed since v1.12.0
asynch.PickingInitialDelay=1
# Polling time in seconds for pick up new requests from DB
asynch.PickingTimeInterval=2
@@ -211,7 +235,15 @@ purge.size=800
#Time after that the GC consider a _terminated_ request as garbage
#Default: 21600s (6h)
expired.request.time=21600
-
+#
+#Time after that the GC consider an _in-progress_ PtP request as terminated
+#Deprecates "expired.inprogress.time"
+#Default: 2592000s (720h)
+expired.inprogress.ptp.time=2592000
+#
+#Time after that the GC consider an _in-progress_ BoL request as terminated
+#Default: 2592000s (720h)
+expired.inprogress.bol.time=2592000
# ==========================================================
# Expired-Put-Requests-Agent parameters
@@ -229,6 +261,9 @@ transit.delay = 10
# Skip ACL setup for PtG requests
ptg.skip-acl-setup = false
+# Skip ACL setup for PtP requests
+ptp.skip-acl-setup = false
+
# The caching policy for successful name lookups from the name service.
# The value is specified as integer to indicate the number of seconds to cache the successful lookup.
diff --git a/pom.xml b/pom.xml
index de61913c6..af651a4fb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -29,7 +29,7 @@
2.25.1
1.1
8.1.9.v20130131
- 3.3.0
+ 5.14.0
20080701
4.13.2
1.2.3
@@ -56,7 +56,7 @@
11
- 1.0.7
+ 2.0.0
UTF-8
@@ -158,7 +158,6 @@
maven-assembly-plugin
${plugin.assembly.version}
- storm-backend-server
false
${project.build.directory}
@@ -307,6 +306,12 @@
${jnaVersion}
+
+ net.java.dev.jna
+ jna-platform
+ ${jnaVersion}
+
+
org.codehaus.jettison
jettison
@@ -490,4 +495,4 @@
-
\ No newline at end of file
+
diff --git a/src/main/assemblies/assembly.xml b/src/main/assemblies/assembly.xml
index 48771a721..69a77b817 100644
--- a/src/main/assemblies/assembly.xml
+++ b/src/main/assemblies/assembly.xml
@@ -47,19 +47,6 @@
etc/storm/backend-server
-
-
- etc/db
-
- storm_be_ISAM_mysql_update_from_1.0.0_to_1.1.0.sql
- storm_mysql_grant.sql
- storm_mysql_tbl.sql
- storm_mysql_update_from_1.7.0_to_1.7.1.sql
- storm_mysql_update_from_1.7.1_to_1.7.2.sql
-
- etc/storm/backend-server/db
-
-
src
@@ -84,12 +71,6 @@
-
-
- etc/storm/backend-server/db
- 0755
-
-
usr/share/java/storm-backend-server
diff --git a/src/main/java/it/grid/storm/Main.java b/src/main/java/it/grid/storm/Main.java
index 7e636ad5a..f5f439756 100644
--- a/src/main/java/it/grid/storm/Main.java
+++ b/src/main/java/it/grid/storm/Main.java
@@ -6,20 +6,57 @@
import static java.lang.System.exit;
+import java.io.IOException;
+
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.commons.configuration.ConfigurationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.xml.sax.SAXException;
+import it.grid.storm.config.StormConfiguration;
+import it.grid.storm.namespace.Namespace;
+import it.grid.storm.namespace.NamespaceException;
+import it.grid.storm.startup.Bootstrap;
import it.grid.storm.startup.BootstrapException;
public class Main {
private static final Logger log = LoggerFactory.getLogger(Main.class);
+ public static final String DEFAULT_CONFIG_DIR = "/etc/storm/backend-server";
+ public static final String DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR + "/storm.properties";
+ public static final String DEFAULT_NAMESPACE_FILE = DEFAULT_CONFIG_DIR + "/namespace.xml";
+ public static final String DEFAULT_NAMESPACE_SCHEMA_FILE =
+ DEFAULT_CONFIG_DIR + "/namespace-1.5.0.xsd";
+ public static final String DEFAULT_LOGGING_FILE = DEFAULT_CONFIG_DIR + "/logging.xml";
+
private Main() {}
public static void main(String[] args) {
- StoRM storm = new StoRM();
+ log.info("Configure logging from {} ...", DEFAULT_LOGGING_FILE);
+ Bootstrap.configureLogging(DEFAULT_LOGGING_FILE);
+
+
+ log.info("Load configuration from {} ...", DEFAULT_CONFIG_FILE);
+ try {
+ StormConfiguration.init(DEFAULT_CONFIG_FILE);
+ } catch (IOException | ConfigurationException e) {
+ log.error(e.getMessage(), e);
+ exit(1);
+ }
+
+ log.info("Load namespace from {} ...", DEFAULT_NAMESPACE_FILE);
+ try {
+ Namespace.init(DEFAULT_NAMESPACE_FILE, true);
+ } catch (RuntimeException | NamespaceException | ConfigurationException | ParserConfigurationException | SAXException | IOException e) {
+ log.error(e.getMessage(), e);
+ exit(1);
+ }
+
+ StoRM storm = new StoRM(StormConfiguration.getInstance(), Namespace.getInstance());
try {
storm.init();
diff --git a/src/main/java/it/grid/storm/StoRM.java b/src/main/java/it/grid/storm/StoRM.java
index 76d441167..e0238456a 100644
--- a/src/main/java/it/grid/storm/StoRM.java
+++ b/src/main/java/it/grid/storm/StoRM.java
@@ -19,35 +19,37 @@
import it.grid.storm.asynch.AdvancedPicker;
import it.grid.storm.catalogs.ReservedSpaceCatalog;
-import it.grid.storm.catalogs.StoRMDataSource;
-import it.grid.storm.catalogs.timertasks.ExpiredPutRequestsAgent;
+import it.grid.storm.catalogs.executors.RequestFinalizerService;
+import it.grid.storm.catalogs.timertasks.RequestsGarbageCollector;
import it.grid.storm.check.CheckManager;
import it.grid.storm.check.CheckResponse;
import it.grid.storm.check.CheckStatus;
import it.grid.storm.check.SimpleCheckManager;
-import it.grid.storm.config.Configuration;
-import it.grid.storm.health.HealthDirector;
+import it.grid.storm.check.sanity.filesystem.SupportedFSType;
+import it.grid.storm.config.StormConfiguration;
+import it.grid.storm.health.HealthMonitor;
import it.grid.storm.info.du.DiskUsageService;
import it.grid.storm.metrics.StormMetricsReporter;
-import it.grid.storm.namespace.NamespaceDirector;
-import it.grid.storm.namespace.NamespaceInterface;
+import it.grid.storm.namespace.Namespace;
+import it.grid.storm.namespace.NamespaceException;
+import it.grid.storm.namespace.model.Property;
+import it.grid.storm.namespace.model.Property.SizeUnitType;
+import it.grid.storm.namespace.model.Quota;
import it.grid.storm.namespace.model.VirtualFS;
import it.grid.storm.rest.RestServer;
+import it.grid.storm.space.SpaceHelper;
+import it.grid.storm.space.gpfsquota.GPFSFilesetQuotaInfo;
import it.grid.storm.space.gpfsquota.GPFSQuotaManager;
+import it.grid.storm.space.gpfsquota.GetGPFSFilesetQuotaInfoCommand;
+import it.grid.storm.srm.types.TSizeInBytes;
+import it.grid.storm.srm.types.TSpaceToken;
import it.grid.storm.startup.Bootstrap;
import it.grid.storm.startup.BootstrapException;
import it.grid.storm.synchcall.SimpleSynchcallDispatcher;
+import it.grid.storm.util.GPFSSizeHelper;
import it.grid.storm.xmlrpc.StoRMXmlRpcException;
import it.grid.storm.xmlrpc.XMLRPCHttpServer;
-/**
- * This class represents a StoRM as a whole: it sets the configuration file which contains
- * properties necessary for other classes of StoRM, it sets up logging, as well as the advanced
- * picker.
- *
- * @author EGRID - ICTP Trieste; INFN - CNAF Bologna @date March 28th, 2005 @version 7.0
- */
-
public class StoRM {
private static final Logger log = LoggerFactory.getLogger(StoRM.class);
@@ -56,54 +58,70 @@ public class StoRM {
private XMLRPCHttpServer xmlrpcServer;
// Timer object in charge to call periodically the Space Garbage Collector
- private final Timer gc = new Timer();
+ private final Timer gc;
private TimerTask cleaningTask;
- private boolean isSpaceGCRunning = false;
+ private boolean isSpaceGCRunning;
/*
- * Timer object in charge of transit expired put requests from SRM_SPACE_AVAILABLE to
- * SRM_FILE_LIFETIME_EXPIRED and from SRM_REQUEST_INPROGRESS to SRM_FAILURE
+ * Agent in charge of transit expired ptg/ptp/bol requests to final statuses
*/
- private final Timer transiter = new Timer();
- private TimerTask expiredAgent;
- private boolean isExpiredAgentRunning = false;
-
- private boolean isDiskUsageServiceEnabled = false;
+ private RequestFinalizerService expiredAgent;
+ private boolean isExpiredAgentRunning;
+
+ /* Requests Garbage Collector */
+ private final Timer rgc;
+ private TimerTask rgcTask;
+ private boolean isRequestGCRunning;
+
+ private boolean isDiskUsageServiceEnabled;
private DiskUsageService duService;
+ private boolean isPickerRunning;
+ private boolean isXmlrpcServerRunning;
+
+ private boolean isRestServerRunning;
+ private RestServer restServer;
+
+ private final StormConfiguration config;
private final ReservedSpaceCatalog spaceCatalog;
+ private final Namespace namespace;
- private boolean isPickerRunning = false;
- private boolean isXmlrpcServerRunning = false;
+ public StoRM(StormConfiguration config, Namespace namespace) {
- private boolean isRestServerRunning = false;
- private RestServer restServer;
+ this.config = config;
+ this.namespace = namespace;
+ this.spaceCatalog = ReservedSpaceCatalog.getInstance();
- private final Configuration config;
+ this.picker = new AdvancedPicker();
+ this.isPickerRunning = false;
- public StoRM() {
+ this.isXmlrpcServerRunning = false;
- config = Configuration.getInstance();
- picker = new AdvancedPicker();
- spaceCatalog = new ReservedSpaceCatalog();
+ this.isRestServerRunning = false;
+ this.gc = new Timer();
+ this.isSpaceGCRunning = false;
+ this.isExpiredAgentRunning = false;
+
+ this.rgc = new Timer();
+ this.isRequestGCRunning = false;
+
+ this.isDiskUsageServiceEnabled = false;
}
public void init() throws BootstrapException {
configureIPv6();
- configureLogging();
+ handleTotalOnlineSizeFromGPFSQuota();
+
+ updateSA();
configureSecurity();
configureMetricsReporting();
- configureStoRMDataSource();
-
- loadNamespaceConfiguration();
-
- HealthDirector.initializeDirector(false);
+ HealthMonitor.init();
loadPathAuthzDBConfiguration();
@@ -126,11 +144,84 @@ private void configureIPv6() {
log.info("java.net.preferIPv6Addresses is {}", System.getProperty("java.net.preferIPv6Addresses"));
}
- private void configureLogging() {
+ private void handleTotalOnlineSizeFromGPFSQuota() {
+
+ namespace.getAllDefinedVFS().forEach(storageArea -> {
+ if (SupportedFSType.parseFS(storageArea.getFSType()) == SupportedFSType.GPFS) {
+ Quota quota = storageArea.getCapabilities().getQuota();
+ if (quota != null && quota.getEnabled()) {
+
+ GPFSFilesetQuotaInfo quotaInfo = getGPFSQuotaInfo(storageArea);
+ if (quotaInfo != null) {
+ updateTotalOnlineSizeFromGPFSQuota(storageArea, quotaInfo);
+ }
+ }
+ }
+ });
+ }
+
+ private GPFSFilesetQuotaInfo getGPFSQuotaInfo(VirtualFS storageArea) {
+
+ GetGPFSFilesetQuotaInfoCommand cmd = new GetGPFSFilesetQuotaInfoCommand(storageArea);
+
+ try {
+ return cmd.call();
+ } catch (Throwable t) {
+ log.warn(
+ "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml "
+ + "for Storage Area {}. Reason: {}",
+ storageArea.getAliasName(), t.getMessage());
+ return null;
+ }
+ }
+
+ private void updateTotalOnlineSizeFromGPFSQuota(VirtualFS storageArea,
+ GPFSFilesetQuotaInfo quotaInfo) {
+
+ long gpfsTotalOnlineSize = GPFSSizeHelper.getBytesFromKIB(quotaInfo.getBlockSoftLimit());
+ Property newProperties = Property.from(storageArea.getProperties());
+ try {
+ newProperties.setTotalOnlineSize(SizeUnitType.BYTE.getTypeName(), gpfsTotalOnlineSize);
+ storageArea.setProperties(newProperties);
+ log.warn("TotalOnlineSize as specified in namespace.xml will be ignored "
+ + "since quota is enabled on the GPFS {} Storage Area.", storageArea.getAliasName());
+ } catch (NamespaceException e) {
+ log.warn(
+ "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml "
+ + "for Storage Area {}.",
+ storageArea.getAliasName(), e);
+ }
+ }
+
+ private void updateSA() {
+
+ SpaceHelper spaceHelp = new SpaceHelper();
+ log.debug("Updating Space Catalog with Storage Area defined within NAMESPACE");
+ namespace.getAllDefinedVFS().forEach(vfs ->{
+
+ String vfsAliasName = vfs.getAliasName();
+ log.debug(" Considering VFS : {}", vfsAliasName);
+ String aliasName = vfs.getSpaceTokenDescription();
+ if (aliasName == null) {
+ // Found a VFS without the optional element Space Token Description
+ log.debug(
+ "XMLNamespaceParser.UpdateSA() : Found a VFS ('{}') without space-token-description. "
+ + "Skipping the Update of SA",
+ vfsAliasName);
+ } else {
+ TSizeInBytes onlineSize = vfs.getProperties().getTotalOnlineSize();
+ String spaceFileName = vfs.getRootPath();
+ TSpaceToken spaceToken = spaceHelp.createVOSA_Token(aliasName, onlineSize, spaceFileName);
+ vfs.setSpaceToken(spaceToken);
+
+ log.debug(" Updating SA ('{}'), token:'{}', onlineSize:'{}', spaceFileName:'{}'", aliasName,
+ spaceToken, onlineSize, spaceFileName);
+ }
+
+ });
+ spaceHelp.purgeOldVOSA_token();
+ log.debug("Updating Space Catalog... DONE!!");
- String configurationDir = config.configurationDir();
- String logFile = configurationDir + "logging.xml";
- Bootstrap.configureLogging(logFile);
}
private void configureSecurity() {
@@ -155,12 +246,6 @@ private void configureMetricsReporting() {
}
- private void loadNamespaceConfiguration() {
-
- NamespaceDirector.initializeDirector();
-
- }
-
private void loadPathAuthzDBConfiguration() throws BootstrapException {
String pathAuthzDBFileName = config.configurationDir() + "path-authz.db";
@@ -214,11 +299,6 @@ private void performSanityChecks() throws BootstrapException {
}
- private void configureStoRMDataSource() {
-
- StoRMDataSource.init();
- }
-
/**
* Method used to start the picker.
*/
@@ -284,11 +364,11 @@ public synchronized void stopXmlRpcServer() {
private void configureRestService() {
- int restServicePort = Configuration.getInstance().getRestServicesPort();
- boolean isTokenEnabled = Configuration.getInstance().getXmlRpcTokenEnabled();
- String token = Configuration.getInstance().getXmlRpcToken();
- int maxThreads = Configuration.getInstance().getRestServicesMaxThreads();
- int maxQueueSize = Configuration.getInstance().getRestServicesMaxQueueSize();
+ int restServicePort = StormConfiguration.getInstance().getRestServicesPort();
+ boolean isTokenEnabled = StormConfiguration.getInstance().getXmlRpcTokenEnabled();
+ String token = StormConfiguration.getInstance().getXmlRpcToken();
+ int maxThreads = StormConfiguration.getInstance().getRestServicesMaxThreads();
+ int maxQueueSize = StormConfiguration.getInstance().getRestServicesMaxQueueSize();
restServer = new RestServer(restServicePort, maxThreads, maxQueueSize, isTokenEnabled, token);
}
@@ -388,14 +468,6 @@ public synchronized boolean spaceGCIsRunning() {
return isSpaceGCRunning;
}
- /**
- * Starts the internal timer needed to periodically check and transit requests whose pinLifetime
- * has expired and are in SRM_SPACE_AVAILABLE, to SRM_FILE_LIFETIME_EXPIRED. Moreover, the
- * physical file corresponding to the SURL gets removed; then any JiT entry gets removed, except
- * those on traverse for the parent directory; finally any volatile entry gets removed too. This
- * internal timer also transit requests whose status is still SRM_REQUEST_INPROGRESS after a
- * configured period to SRM_FAILURE.
- */
public synchronized void startExpiredAgent() {
if (isExpiredAgentRunning) {
@@ -403,16 +475,8 @@ public synchronized void startExpiredAgent() {
return;
}
- /* Delay time before starting cleaning thread! Set to 1 minute */
- final long delay = config.getTransitInitialDelay() * 1000L;
- /* Period of execution of cleaning! Set to 1 hour */
- final long period = config.getTransitTimeInterval() * 1000L;
- /* Expiration time before starting move in-progress requests to failure */
- final long inProgressExpirationTime = config.getInProgressPutRequestExpirationTime();
-
log.debug("Starting Expired Agent.");
- expiredAgent = new ExpiredPutRequestsAgent(inProgressExpirationTime);
- transiter.scheduleAtFixedRate(expiredAgent, delay, period);
+ expiredAgent = new RequestFinalizerService(config);
isExpiredAgentRunning = true;
log.debug("Expired Agent started.");
}
@@ -426,7 +490,7 @@ public synchronized void stopExpiredAgent() {
log.debug("Stopping Expired Agent.");
if (expiredAgent != null) {
- expiredAgent.cancel();
+ expiredAgent.stop();
}
log.debug("Expired Agent stopped.");
isExpiredAgentRunning = false;
@@ -441,7 +505,7 @@ private void configureDiskUsageService() {
isDiskUsageServiceEnabled = config.getDiskUsageServiceEnabled();
- NamespaceInterface namespace = NamespaceDirector.getNamespace();
+ Namespace namespace = Namespace.getInstance();
List quotaEnabledVfs = namespace.getVFSWithQuotaEnabled();
List sas = namespace.getAllDefinedVFS()
.stream()
@@ -496,6 +560,40 @@ public synchronized void stopDiskUsageService() {
}
}
+ public synchronized void startRequestGarbageCollector() {
+
+ if (isRequestGCRunning) {
+ log.debug("Requests Garbage Collector is already running.");
+ return;
+ }
+
+ /* Delay time before starting cleaning thread */
+ final long delay = config.getCleaningInitialDelay() * 1000L;
+ /* Period of execution of cleaning */
+ final long period = config.getCleaningTimeInterval() * 1000L;
+
+ log.debug("Starting Requests Garbage Collector .");
+ rgcTask = new RequestsGarbageCollector(rgc, period);
+ rgc.schedule(rgcTask, delay);
+ isRequestGCRunning = true;
+ log.debug("Requests Garbage Collector started.");
+ }
+
+ public synchronized void stopRequestGarbageCollector() {
+
+ if (!isRequestGCRunning) {
+ log.debug("Requests Garbage Collector is not running.");
+ return;
+ }
+
+ log.debug("Stopping Requests Garbage Collector.");
+ if (rgcTask != null) {
+ rgcTask.cancel();
+ }
+ log.debug("Requests Garbage Collector stopped.");
+ isRequestGCRunning = false;
+ }
+
public void startServices() throws Exception {
startPicker();
@@ -503,6 +601,7 @@ public void startServices() throws Exception {
startRestServer();
startSpaceGC();
startExpiredAgent();
+ startRequestGarbageCollector();
startDiskUsageService();
}
@@ -513,6 +612,7 @@ public void stopServices() {
stopRestServer();
stopSpaceGC();
stopExpiredAgent();
+ stopRequestGarbageCollector();
stopDiskUsageService();
GPFSQuotaManager.INSTANCE.shutdown();
diff --git a/src/main/java/it/grid/storm/acl/AclManager.java b/src/main/java/it/grid/storm/acl/AclManager.java
index 42b5a831c..5a1f89e36 100644
--- a/src/main/java/it/grid/storm/acl/AclManager.java
+++ b/src/main/java/it/grid/storm/acl/AclManager.java
@@ -8,11 +8,6 @@
import it.grid.storm.filesystem.LocalFile;
import it.grid.storm.griduser.LocalUser;
-/**
- * @author Michele Dibenedetto
- *
- */
-
public interface AclManager {
/**
@@ -24,7 +19,7 @@ public interface AclManager {
* a not existent file
*/
FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
+ FilesystemPermission permission);
/**
* @param localFile an existent file if received null parameters or the LocalFile object refers to
@@ -36,7 +31,7 @@ FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUs
* a not existent file
*/
FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
+ FilesystemPermission permission);
/**
* @param localFile an existent file
@@ -45,8 +40,7 @@ FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUse
* @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
* a not existent file
*/
- FilesystemPermission removeGroupPermission(LocalFile localFile, LocalUser localUser)
- throws IllegalArgumentException;
+ FilesystemPermission removeGroupPermission(LocalFile localFile, LocalUser localUser);
/**
* @param localFile an existent file
@@ -55,8 +49,7 @@ FilesystemPermission removeGroupPermission(LocalFile localFile, LocalUser localU
* @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
* a not existent file
*/
- FilesystemPermission removeUserPermission(LocalFile localFile, LocalUser localUser)
- throws IllegalArgumentException;
+ FilesystemPermission removeUserPermission(LocalFile localFile, LocalUser localUser);
/**
* @param localFile an existent file
@@ -67,7 +60,7 @@ FilesystemPermission removeUserPermission(LocalFile localFile, LocalUser localUs
* a not existent file
*/
FilesystemPermission revokeGroupPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
+ FilesystemPermission permission);
/**
* @param localFile an existent file
@@ -78,7 +71,7 @@ FilesystemPermission revokeGroupPermission(LocalFile localFile, LocalUser localU
* a not existent file
*/
FilesystemPermission revokeUserPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
+ FilesystemPermission permission);
/**
* @param localFile an existent file
@@ -89,7 +82,7 @@ FilesystemPermission revokeUserPermission(LocalFile localFile, LocalUser localUs
* a not existent file
*/
FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
+ FilesystemPermission permission);
/**
* @param localFile an existent file
@@ -100,14 +93,14 @@ FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser
* a not existent file
*/
FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
+ FilesystemPermission permission);
/**
* @param localFile an existent file
* @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
* a not existent file
*/
- void removeHttpsPermissions(LocalFile localFile) throws IllegalArgumentException;
+ void removeHttpsPermissions(LocalFile localFile);
/**
* @param localFile an existent file
@@ -117,7 +110,7 @@ FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser,
* a not existent file
*/
void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
+ FilesystemPermission permission);
/**
* @param localFile an existent file
@@ -137,7 +130,7 @@ void grantHttpsServiceGroupPermission(LocalFile localFile, FilesystemPermission
* a not existent file
*/
void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
+ FilesystemPermission permission);
/**
* @param localFile an existent file
@@ -146,8 +139,7 @@ void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser,
* @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
* a not existent file
*/
- void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission)
- throws IllegalArgumentException;
+ void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission);
/**
* @param oldLocalFile an existent source file
@@ -155,7 +147,6 @@ void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission p
* @throws IllegalArgumentException if received null parameters or the LocalFile objects refers to
* not existent files
*/
- void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile)
- throws IllegalArgumentException;
+ void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile);
}
diff --git a/src/main/java/it/grid/storm/acl/NoAclManager.java b/src/main/java/it/grid/storm/acl/NoAclManager.java
new file mode 100644
index 000000000..0351ebfa4
--- /dev/null
+++ b/src/main/java/it/grid/storm/acl/NoAclManager.java
@@ -0,0 +1,187 @@
+package it.grid.storm.acl;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import it.grid.storm.filesystem.FilesystemPermission;
+import it.grid.storm.filesystem.LocalFile;
+import it.grid.storm.griduser.LocalUser;
+
+public class NoAclManager implements AclManager {
+
+ @Override
+ public FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUser,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to grant group permission on null local file");
+ checkNotNull(localUser, "Unable to grant group permission on null local user");
+ checkNotNull(permission, "Unable to grant group permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to grant group permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+
+ return localFile.getEffectiveGroupPermission(localUser);
+ }
+
+ @Override
+ public FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUser,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to grant user permission on null local file");
+ checkNotNull(localUser, "Unable to grant user permission on null local user");
+ checkNotNull(permission, "Unable to grant user permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to grant user permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+
+ return localFile.getEffectiveUserPermission(localUser);
+ }
+
+ @Override
+ public FilesystemPermission removeGroupPermission(LocalFile localFile, LocalUser localUser) {
+
+ checkNotNull(localFile, "Unable to remove group permission on null local file");
+ checkNotNull(localUser, "Unable to remove group permission on null local user");
+ checkArgument(localFile.exists(),
+ "Unable to remove group permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+
+ return localFile.getEffectiveGroupPermission(localUser);
+ }
+
+ @Override
+ public FilesystemPermission removeUserPermission(LocalFile localFile, LocalUser localUser) {
+
+ checkNotNull(localFile, "Unable to remove user permission on null local file");
+ checkNotNull(localUser, "Unable to remove user permission on null local user");
+ checkArgument(localFile.exists(),
+ "Unable to remove user permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+
+ return localFile.getEffectiveUserPermission(localUser);
+ }
+
+ @Override
+ public FilesystemPermission revokeGroupPermission(LocalFile localFile, LocalUser localUser,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to revoke group permission on null local file");
+ checkNotNull(localUser, "Unable to revoke group permission on null local user");
+ checkNotNull(permission, "Unable to revoke group permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to revoke group permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+
+ return localFile.getEffectiveGroupPermission(localUser);
+ }
+
+ @Override
+ public FilesystemPermission revokeUserPermission(LocalFile localFile, LocalUser localUser,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to revoke user permission on null local file");
+ checkNotNull(localUser, "Unable to revoke user permission on null local user");
+ checkNotNull(permission, "Unable to revoke user permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to revoke user permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+
+ return localFile.getEffectiveUserPermission(localUser);
+ }
+
+ @Override
+ public FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to set group permission on null local file");
+ checkNotNull(localUser, "Unable to set group permission on null local user");
+ checkNotNull(permission, "Unable to set group permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to set group permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+
+ return localFile.getEffectiveGroupPermission(localUser);
+ }
+
+ @Override
+ public FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to set user permission on null local file");
+ checkNotNull(localUser, "Unable to set user permission on null local user");
+ checkNotNull(permission, "Unable to set user permission with null permission");
+ checkArgument(localFile.exists(), "Unable to set user permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+
+ return localFile.getEffectiveUserPermission(localUser);
+ }
+
+ @Override
+ public void removeHttpsPermissions(LocalFile localFile) {
+
+ checkNotNull(localFile, "Unable to remove https permission on null local file");
+ checkArgument(localFile.exists(),
+ "Unable to remove httès permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+ }
+
+ @Override
+ public void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to grant https user permission on null local file");
+ checkNotNull(localUser, "Unable to grant https user permission on null local user");
+ checkNotNull(permission, "Unable to grant https user permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to grant https user permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+ }
+
+ @Override
+ public void grantHttpsServiceGroupPermission(LocalFile localFile,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to grant https service group permission on null local file");
+ checkNotNull(permission, "Unable to grant https service group permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to grant https service group permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+ }
+
+ @Override
+ public void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to grant https group permission on null local file");
+ checkNotNull(localUser, "Unable to grant https group permission on null local user");
+ checkNotNull(permission, "Unable to grant https group permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to grant https group permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+ }
+
+ @Override
+ public void grantHttpsServiceUserPermission(LocalFile localFile,
+ FilesystemPermission permission) {
+
+ checkNotNull(localFile, "Unable to grant https service user permission on null local file");
+ checkNotNull(permission, "Unable to grant https service user permission with null permission");
+ checkArgument(localFile.exists(),
+ "Unable to grant https service user permission on a non existent local file: "
+ + localFile.getAbsolutePath());
+ }
+
+ @Override
+ public void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile) {
+
+ checkNotNull(oldLocalFile, "Unable to move https permission on null local source file");
+ checkNotNull(newLocalFile, "Unable to move https permission on null local destination file");
+ checkArgument(oldLocalFile.exists(),
+ "Unable to move https permission on a non existent source local file: "
+ + oldLocalFile.getAbsolutePath());
+ checkArgument(newLocalFile.exists(),
+ "Unable to move https permission on a non existent destination local file: "
+ + newLocalFile.getAbsolutePath());
+ }
+
+}
diff --git a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java
index 121a32d38..896f196a0 100644
--- a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java
+++ b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java
@@ -5,8 +5,8 @@
package it.grid.storm.asynch;
import it.grid.storm.catalogs.RequestSummaryCatalog;
-import it.grid.storm.catalogs.RequestSummaryData;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.CrusherScheduler;
import it.grid.storm.scheduler.SchedulerException;
import it.grid.storm.scheduler.SchedulerStatus;
@@ -40,10 +40,10 @@ public class AdvancedPicker {
private TimerTask retrievingTask = null;
/* delay time before starting retriever thread, in mssec */
- private final long delay = Configuration.getInstance().getPickingInitialDelay() * 1000;
+ private final long delay = StormConfiguration.getInstance().getPickingInitialDelay() * 1000;
/* period of execution of retrieving, in mssec */
- private final long period = Configuration.getInstance().getPickingTimeInterval() * 1000;
+ private final long period = StormConfiguration.getInstance().getPickingTimeInterval() * 1000;
/* boolean that indicates there is a token to abort! */
private boolean abort = false;
diff --git a/src/main/java/it/grid/storm/asynch/BoL.java b/src/main/java/it/grid/storm/asynch/BoL.java
index 1bd36fcb6..65827d126 100644
--- a/src/main/java/it/grid/storm/asynch/BoL.java
+++ b/src/main/java/it/grid/storm/asynch/BoL.java
@@ -4,24 +4,27 @@
*/
package it.grid.storm.asynch;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import it.grid.storm.authz.AuthzDirector;
import it.grid.storm.authz.SpaceAuthzInterface;
import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.catalogs.BoLData;
-import it.grid.storm.catalogs.RequestData;
+import it.grid.storm.catalogs.TapeRecallCatalog;
import it.grid.storm.catalogs.surl.SURLStatusManager;
import it.grid.storm.catalogs.surl.SURLStatusManagerFactory;
-import it.grid.storm.common.types.SizeUnit;
import it.grid.storm.ea.StormEA;
import it.grid.storm.filesystem.FSException;
import it.grid.storm.filesystem.LocalFile;
import it.grid.storm.griduser.AbstractGridUser;
import it.grid.storm.griduser.GridUserInterface;
import it.grid.storm.namespace.InvalidSURLException;
-import it.grid.storm.namespace.NamespaceDirector;
+import it.grid.storm.namespace.Namespace;
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.StoRI;
import it.grid.storm.namespace.UnapprochableSurlException;
+import it.grid.storm.persistence.model.BoLData;
+import it.grid.storm.persistence.model.RequestData;
import it.grid.storm.scheduler.Chooser;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.Streets;
@@ -31,17 +34,13 @@
import it.grid.storm.srm.types.TSizeInBytes;
import it.grid.storm.srm.types.TSpaceToken;
import it.grid.storm.srm.types.TStatusCode;
-import it.grid.storm.tape.recalltable.TapeRecallCatalog;
import it.grid.storm.tape.recalltable.model.TapeRecallStatus;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
* Class that represents a chunk of an srmBringOnLine request: it handles a single file of a
- * multifile/directory-expansion request. StoRM then sends the chunk to a chunk-scheduler. Security
+ * multi-file/directory expansion request. StoRM then sends the chunk to a chunk-scheduler. Security
* checks performed as follows: both in the JiT and AoT approach, policies are checked to see if the
- * Griduser has read rights on the requested SURL. If the AuthorisationCollector replies with an
+ * grid-user has read rights on the requested SURL. If the AuthorisationCollector replies with an
* isDeny, then the request fails with SRM_AUTHORIZATION_FAILURE status. If the
* AuthorisationCollector replies with isIndeterminate, then the request fails with SRM_FAILURE and
* explanation string "Failure in PolicySource prevented PolicyCollector from establishing access
@@ -56,21 +55,21 @@
* the grid credentials get mapped; the TURL finally gets constructed. If the local file does not
* exist the request fails with SRM_INVALID_PATH and corresponding explanation string; if the user
* cannot be mapped locally, the request fails with SRM_FAILURE and an explanation String which
- * includes the DN used for maping; if there are internal problems constructing the TURL again the
+ * includes the DN used for mapping; if there are internal problems constructing the TURL again the
* request fails with SRM_FAILURE. Appropriate error messages get logged. (2) Traverse permissions
* get set on all parent directories to allow access to the file. The operation may fail for several
* reasons: the file or any of the parent directories may have been removed resulting in
- * SRM_INVALID_PATH; StoRM cannot set the requested permissions because a filesystem mask does not
- * allow the permissions to be set up; StoRM may be configured for the wrong filesystem; StoRM has
- * not got the right permissions to manipulate the ACLs on the filesystem; StoRM may have
- * encountered an unexpected error when working with the filesystem. In all these circumstances, the
+ * SRM_INVALID_PATH; StoRM cannot set the requested permissions because a file-system mask does not
+ * allow the permissions to be set up; StoRM may be configured for the wrong file-system; StoRM has
+ * not got the right permissions to manipulate the ACLs on the file-system; StoRM may have
+ * encountered an unexpected error when working with the file-system. In all these circumstances, the
* status changes to SRM_FAILURE, together with an appropriate explanation String, and a respective
* log message. (3) The file size is determined. The operation may fail and hence the request too
* gets failed, in the following circumstances: the file somehow does not exist, the path to the
- * file is not found, an error while communicating with the underlaying FileSystem, or a JVM
+ * file is not found, an error while communicating with the underlying FileSystem, or a JVM
* SecurityManager forbids such operation. In the first two cases the state changes to
* SRM_INVALID_PATH, while in the other ones it changes to SRM_FAILURE; proper error strings explain
- * the situation further. Error messages get logged. (3) If AoT acls are in place, then the
+ * the situation further. Error messages get logged. (3) If AoT ACLs are in place, then the
* PinnedFilesCatalog is asked to pinExistingVolatileEntry, that is, it is asked to pin the entry if
* it is already present thereby extending its lifetime (if it is not present, it just means that
* the requested file is PERMANENT and there is no need to pin it); status changes to
@@ -189,7 +188,7 @@ public void doIt() {
StoRI fileStoRI = null;
try {
- fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, gu);
+ fileStoRI = Namespace.getInstance().resolveStoRIbySURL(surl, gu);
} catch (IllegalArgumentException e) {
log.error(
"Unable to build a stori for surl '{}' and user '{}'. " + "IllegalArgumentException: {}",
@@ -306,7 +305,7 @@ private void manageIsPermit(StoRI fileStoRI) {
StormEA.setPinned(localFile.getAbsolutePath(), expDate);
- requestData.setFileSize(TSizeInBytes.make(localFile.length(), SizeUnit.BYTES));
+ requestData.setFileSize(TSizeInBytes.make(localFile.length()));
if (isStoriOndisk(fileStoRI)) {
@@ -319,7 +318,7 @@ private void manageIsPermit(StoRI fileStoRI) {
if (gu instanceof AbstractGridUser) {
voName = ((AbstractGridUser) gu).getVO().getValue();
}
- new TapeRecallCatalog().insertTask(this, voName, localFile.getAbsolutePath());
+ TapeRecallCatalog.getInstance().insertTask(this, voName, localFile.getAbsolutePath());
backupData(localFile);
}
diff --git a/src/main/java/it/grid/storm/asynch/BoLFeeder.java b/src/main/java/it/grid/storm/asynch/BoLFeeder.java
index a947e7ac8..15ec6ee0c 100644
--- a/src/main/java/it/grid/storm/asynch/BoLFeeder.java
+++ b/src/main/java/it/grid/storm/asynch/BoLFeeder.java
@@ -5,19 +5,19 @@
package it.grid.storm.asynch;
import it.grid.storm.catalogs.BoLChunkCatalog;
-import it.grid.storm.catalogs.BoLPersistentChunkData;
-import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException;
import it.grid.storm.catalogs.RequestSummaryCatalog;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException;
import it.grid.storm.namespace.InvalidDescendantsFileRequestException;
import it.grid.storm.namespace.InvalidDescendantsPathRequestException;
import it.grid.storm.namespace.InvalidSURLException;
-import it.grid.storm.namespace.NamespaceDirector;
+import it.grid.storm.namespace.Namespace;
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.StoRI;
import it.grid.storm.namespace.UnapprochableSurlException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.model.BoLPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.SchedulerException;
import it.grid.storm.srm.types.InvalidTDirOptionAttributesException;
@@ -234,7 +234,7 @@ private void manageIsDirectory(BoLPersistentChunkData chunkData) {
StoRI stori = null;
try {
- stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, gu);
+ stori = Namespace.getInstance().resolveStoRIbySURL(surl, gu);
} catch (IllegalArgumentException e) {
log.error(
"Unable to build a stori for surl {} for user {}. " + "IllegalArgumentException: {}",
diff --git a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java
index 67a74cb7e..23696a88f 100644
--- a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java
+++ b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java
@@ -5,9 +5,9 @@
package it.grid.storm.asynch;
import it.grid.storm.catalogs.BoLChunkCatalog;
-import it.grid.storm.catalogs.BoLPersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.BoLPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.PersistentRequestChunk;
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.tape.recalltable.model.TapeRecallStatus;
diff --git a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java
index bd41713b3..ae5f16bd4 100644
--- a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java
+++ b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java
@@ -4,8 +4,8 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.ChunkData;
import it.grid.storm.catalogs.RequestSummaryCatalog;
+import it.grid.storm.persistence.model.ChunkData;
import it.grid.storm.srm.types.TRequestToken;
import it.grid.storm.srm.types.TReturnStatus;
import it.grid.storm.srm.types.TStatusCode;
diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java
index 2ffe5b880..90128667d 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java
@@ -4,9 +4,9 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.BoLPersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.BoLPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* This class represents an Exception thrown when a BoLChunk is created with any null attribute:
diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java
index 150c1274c..e25e253dc 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java
@@ -4,8 +4,8 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* Class that represents an Exception thrown when a BoLFeeder could not be created because the
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java
index c8a8241dc..094923526 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java
@@ -4,9 +4,9 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.PersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.PersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* This class represents an Exceptin thrown when a PtPChunk is created with any null attribute:
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java
index 4f2c87c66..eea11cd30 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java
@@ -4,8 +4,8 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.PtGData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.PtGData;
/**
* @author Michele Dibenedetto
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java
index 97f805f00..f9fe22944 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java
@@ -4,9 +4,9 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestSummaryData;
-import it.grid.storm.catalogs.PtGData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.PtGData;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* This class represents an Exceptin thrown when a PtGChunk is created with any null attribute:
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java
index c9e1bb8eb..01450cf52 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java
@@ -4,8 +4,8 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* Class that represents an Exception thrown when a PtGFeeder could not be created because the
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java
index cc565bc26..f61f824bd 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java
@@ -4,8 +4,8 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* Class that represents an Exception thrown when a PtPFeeder could not be created because the
diff --git a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java
index 0616e6376..e7a389e86 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java
@@ -4,8 +4,8 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.RequestData;
/**
* @author Michele Dibenedetto
diff --git a/src/main/java/it/grid/storm/asynch/PtG.java b/src/main/java/it/grid/storm/asynch/PtG.java
index 8396823c1..96d0dcf8a 100644
--- a/src/main/java/it/grid/storm/asynch/PtG.java
+++ b/src/main/java/it/grid/storm/asynch/PtG.java
@@ -17,12 +17,11 @@
import it.grid.storm.authz.SpaceAuthzInterface;
import it.grid.storm.authz.path.model.SRMFileRequest;
import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.catalogs.PtGData;
+import it.grid.storm.catalogs.TapeRecallCatalog;
import it.grid.storm.catalogs.VolatileAndJiTCatalog;
import it.grid.storm.catalogs.surl.SURLStatusManager;
import it.grid.storm.catalogs.surl.SURLStatusManagerFactory;
-import it.grid.storm.common.types.SizeUnit;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
import it.grid.storm.ea.StormEA;
import it.grid.storm.filesystem.FSException;
import it.grid.storm.filesystem.FilesystemPermission;
@@ -32,7 +31,7 @@
import it.grid.storm.griduser.LocalUser;
import it.grid.storm.namespace.InvalidGetTURLProtocolException;
import it.grid.storm.namespace.InvalidSURLException;
-import it.grid.storm.namespace.NamespaceDirector;
+import it.grid.storm.namespace.Namespace;
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.StoRI;
import it.grid.storm.namespace.TURLBuildingException;
@@ -42,6 +41,7 @@
import it.grid.storm.namespace.model.Protocol;
import it.grid.storm.namespace.model.VirtualFS;
import it.grid.storm.persistence.exceptions.DataAccessException;
+import it.grid.storm.persistence.model.PtGData;
import it.grid.storm.scheduler.Chooser;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.Streets;
@@ -55,7 +55,6 @@
import it.grid.storm.synchcall.command.CommandHelper;
import it.grid.storm.synchcall.data.DataHelper;
import it.grid.storm.synchcall.data.IdentityInputData;
-import it.grid.storm.tape.recalltable.TapeRecallCatalog;
import it.grid.storm.tape.recalltable.model.TapeRecallStatus;
public class PtG implements Delegable, Chooser, Request, Suspendedable {
@@ -70,12 +69,12 @@ public class PtG implements Delegable, Chooser, Request, Suspendedable {
protected PtGData requestData;
/**
- * Time that wil be used in all jit and volatile tracking.
+ * Time that will be used in all JiT and volatile tracking.
*/
protected final Calendar start;
/**
- * boolean that indicates the state of the shunk is failure
+ * boolean that indicates the state of the chunk is failure
*/
protected boolean failure = false;
@@ -105,7 +104,7 @@ public PtG(PtGData reqData) throws IllegalArgumentException {
requestData = reqData;
start = Calendar.getInstance();
- if (Configuration.getInstance().getPTGSkipACLSetup()) {
+ if (StormConfiguration.getInstance().getPTGSkipACLSetup()) {
setupACLs = false;
log.debug("Skipping ACL setup on PTG as requested by configuration.");
}
@@ -139,7 +138,7 @@ public void doIt() {
try {
if (!downgradedToAnonymous && requestData instanceof IdentityInputData) {
try {
- fileStoRI = NamespaceDirector.getNamespace()
+ fileStoRI = Namespace.getInstance()
.resolveStoRIbySURL(surl, ((IdentityInputData) requestData).getUser());
} catch (UnapprochableSurlException e) {
unapprochableSurl = true;
@@ -158,7 +157,7 @@ public void doIt() {
}
} else {
try {
- fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(requestData.getSURL());
+ fileStoRI = Namespace.getInstance().resolveStoRIbySURL(requestData.getSURL());
} catch (UnapprochableSurlException e) {
failure = true;
log.info("Unable to build a stori for surl {}. " + "UnapprochableSurlException: {}", surl,
@@ -195,7 +194,7 @@ public void doIt() {
} else {
if (requestData.getTransferProtocols().allows(Protocol.HTTP)) {
try {
- fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(requestData.getSURL());
+ fileStoRI = Namespace.getInstance().resolveStoRIbySURL(requestData.getSURL());
} catch (UnapprochableSurlException e) {
failure = true;
log.info("Unable to build a stori for surl {}. " + "UnapprochableSurlException: {}",
@@ -334,8 +333,7 @@ private void manageIsPermit(StoRI fileStoRI) {
try {
- TSizeInBytes fileSize =
- TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES);
+ TSizeInBytes fileSize = TSizeInBytes.make(fileStoRI.getLocalFile().length());
requestData.setFileSize(fileSize);
log.debug("File size: {}", fileSize);
@@ -369,8 +367,8 @@ private void manageIsPermit(StoRI fileStoRI) {
}
}
try {
- new TapeRecallCatalog().insertTask(this, voName,
- fileStoRI.getLocalFile().getAbsolutePath());
+ TapeRecallCatalog.getInstance()
+ .insertTask(this, voName, fileStoRI.getLocalFile().getAbsolutePath());
} catch (DataAccessException e) {
requestData.changeStatusSRM_FAILURE("Unable to request file recall from tape");
failure = true;
@@ -423,8 +421,7 @@ private void manageIsPermit(StoRI fileStoRI) {
if (canRead) {
try {
- TSizeInBytes fileSize =
- TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES);
+ TSizeInBytes fileSize = TSizeInBytes.make(fileStoRI.getLocalFile().length());
requestData.setFileSize(fileSize);
log.debug("File size: {}", fileSize);
@@ -486,8 +483,9 @@ private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserEx
if (!downgradedToAnonymous && requestData instanceof IdentityInputData) {
- if (!setupACLs)
+ if (!setupACLs) {
return verifyPath(fileStoRI);
+ }
return verifyPath(fileStoRI)
&& setParentsAcl(fileStoRI, ((IdentityInputData) requestData).getUser().getLocalUser());
diff --git a/src/main/java/it/grid/storm/asynch/PtGBuilder.java b/src/main/java/it/grid/storm/asynch/PtGBuilder.java
index bf89cac09..545e08768 100644
--- a/src/main/java/it/grid/storm/asynch/PtGBuilder.java
+++ b/src/main/java/it/grid/storm/asynch/PtGBuilder.java
@@ -4,13 +4,13 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.AnonymousPtGData;
-import it.grid.storm.catalogs.IdentityPtGData;
-import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException;
-import it.grid.storm.catalogs.InvalidPtGDataAttributesException;
-import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException;
-import it.grid.storm.catalogs.PtGData;
import it.grid.storm.common.types.TURLPrefix;
+import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.model.AnonymousPtGData;
+import it.grid.storm.persistence.model.IdentityPtGData;
+import it.grid.storm.persistence.model.PtGData;
import it.grid.storm.srm.types.TDirOption;
import it.grid.storm.srm.types.TLifeTimeInSeconds;
import it.grid.storm.srm.types.TReturnStatus;
diff --git a/src/main/java/it/grid/storm/asynch/PtGFeeder.java b/src/main/java/it/grid/storm/asynch/PtGFeeder.java
index 5f39e9641..55398a154 100644
--- a/src/main/java/it/grid/storm/asynch/PtGFeeder.java
+++ b/src/main/java/it/grid/storm/asynch/PtGFeeder.java
@@ -4,19 +4,19 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException;
import it.grid.storm.catalogs.PtGChunkCatalog;
-import it.grid.storm.catalogs.PtGPersistentChunkData;
import it.grid.storm.catalogs.RequestSummaryCatalog;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException;
import it.grid.storm.namespace.InvalidDescendantsFileRequestException;
import it.grid.storm.namespace.InvalidDescendantsPathRequestException;
import it.grid.storm.namespace.InvalidSURLException;
-import it.grid.storm.namespace.NamespaceDirector;
+import it.grid.storm.namespace.Namespace;
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.StoRI;
import it.grid.storm.namespace.UnapprochableSurlException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.model.PtGPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.SchedulerException;
import it.grid.storm.srm.types.InvalidTDirOptionAttributesException;
@@ -250,7 +250,7 @@ private void manageIsDirectory(PtGPersistentChunkData chunkData) {
/* Build StoRI for current chunk */
StoRI stori = null;
try {
- stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, chunkData.getUser());
+ stori = Namespace.getInstance().resolveStoRIbySURL(surl, chunkData.getUser());
} catch (IllegalArgumentException e) {
log.error(
"Unable to build a stori for surl {} for user {}. " + "IllegalArgumentException: {}",
diff --git a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java
index 8d01b7cbc..f58e3cbfb 100644
--- a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java
+++ b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java
@@ -6,9 +6,9 @@
import java.util.Arrays;
import it.grid.storm.catalogs.PtGChunkCatalog;
-import it.grid.storm.catalogs.PtGData;
-import it.grid.storm.catalogs.PtGPersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
+import it.grid.storm.persistence.model.PtGData;
+import it.grid.storm.persistence.model.PtGPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.PersistentRequestChunk;
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.synchcall.command.CommandHelper;
diff --git a/src/main/java/it/grid/storm/asynch/PtP.java b/src/main/java/it/grid/storm/asynch/PtP.java
index 111505c63..a154dc279 100644
--- a/src/main/java/it/grid/storm/asynch/PtP.java
+++ b/src/main/java/it/grid/storm/asynch/PtP.java
@@ -4,6 +4,8 @@
*/
package it.grid.storm.asynch;
+import static it.grid.storm.srm.types.TFileStorageType.VOLATILE;
+
import java.io.IOException;
import java.util.Arrays;
import java.util.Calendar;
@@ -12,18 +14,19 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Preconditions;
+
import it.grid.storm.acl.AclManagerFS;
import it.grid.storm.authz.AuthzDecision;
import it.grid.storm.authz.AuthzDirector;
import it.grid.storm.authz.SpaceAuthzInterface;
import it.grid.storm.authz.path.model.SRMFileRequest;
import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.catalogs.PtPData;
import it.grid.storm.catalogs.ReservedSpaceCatalog;
import it.grid.storm.catalogs.VolatileAndJiTCatalog;
import it.grid.storm.catalogs.surl.SURLStatusManager;
import it.grid.storm.catalogs.surl.SURLStatusManagerFactory;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
import it.grid.storm.ea.StormEA;
import it.grid.storm.filesystem.FilesystemPermission;
import it.grid.storm.filesystem.LocalFile;
@@ -33,7 +36,7 @@
import it.grid.storm.namespace.ExpiredSpaceTokenException;
import it.grid.storm.namespace.InvalidGetTURLProtocolException;
import it.grid.storm.namespace.InvalidSURLException;
-import it.grid.storm.namespace.NamespaceDirector;
+import it.grid.storm.namespace.Namespace;
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.StoRI;
import it.grid.storm.namespace.TURLBuildingException;
@@ -42,13 +45,13 @@
import it.grid.storm.namespace.model.DefaultACL;
import it.grid.storm.namespace.model.VirtualFS;
import it.grid.storm.persistence.exceptions.DataAccessException;
+import it.grid.storm.persistence.model.PtPData;
import it.grid.storm.persistence.model.TransferObjectDecodingException;
import it.grid.storm.scheduler.Chooser;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.Streets;
import it.grid.storm.space.SpaceHelper;
import it.grid.storm.space.StorageSpaceData;
-import it.grid.storm.srm.types.TFileStorageType;
import it.grid.storm.srm.types.TOverwriteMode;
import it.grid.storm.srm.types.TRequestToken;
import it.grid.storm.srm.types.TSURL;
@@ -65,13 +68,13 @@
* multifile request. StoRM then sends the chunk to a chunk-scheduler. For an existing file: if
* TOverwriteMode is set to Never, then the chunk fails with SRM_DUPLICATION_ERROR; if
* TOverwriteMode is Always or WhenFilesAreDifferent, the file gets treated in the same fashion:
- * moreover the behaviour is the same as for the case of a non existing file described later on,
+ * moreover the behavior is the same as for the case of a non existing file described later on,
* except that the only policy check made is about the presence of write rights, instead of create
- * rights, as well as erasing the file before going on with the processing - all previous data gets
- * lost! If the SURL refers to a file that does not exist, the behaviour is identical whatever the
+ * rights, as well as erasing the file before going on with the processing all previous data gets
+ * lost! If the SURL refers to a file that does not exist, the behavior is identical whatever the
* TOverwriteMode; in particular: AuthorisationCollector is queried for File Creation policies: if
* it is set to Deny, then the chunk is failed with SRM_AUTHORIZATION_FAILURE. If it is set to
- * Permit, the situation is decribed later on. For any other decisions, the chunk is failed with
+ * Permit, the situation is described later on. For any other decisions, the chunk is failed with
* SRM_FAILURE: it is caused when the policy is missing so no decision can be made, or if there is a
* problem querying the Policies, or any new state for the AuthorisationDecision is introduced but
* the PtP logic is not updated. In case Create rights are granted, the presence of a space token
@@ -83,12 +86,12 @@
* supplied, the space is allocated as requested and again a special mock reserve file gets created.
* A Write ACL is setup on the file regardless of the Security Model (AoT or JiT); if the file is
* specified as VOLATILE, it gets pinned in the PinnedFilesCatalog; if JiT is active, the ACL will
- * live only for the given time interval. A TURL gets filled in, the status transits to
+ * live only for the given time interval. A TURL gets filled in, the status moves to
* SRM_SPACE_AVAILABLE, and the PtPCatalog is updated. There are error situations which get handled
* as follows: If the placeHolder file cannot be created, or the implicit reservation fails, or the
- * supplied space token does not exist, the request fails and chenages state to SRM_FAILURE. If the
+ * supplied space token does not exist, the request fails and changes state to SRM_FAILURE. If the
* setting up of the ACL fails, the request fails too and the state changes to SRM_FAILURE.
- * Appropriate messagges get logged.
+ * Appropriate messages get logged.
*
* @author EGRID - ICTP Trieste
* @date June, 2005
@@ -106,12 +109,12 @@ public class PtP implements Delegable, Chooser, Request {
protected final PtPData requestData;
/**
- * Time that wil be used in all jit and volatile tracking.
+ * Time that will be used in all JiT and volatile tracking.
*/
protected final Calendar start;
/**
- * boolean that indicates the state of the shunk is failure
+ * boolean that indicates the state of the chunk is failure
*/
protected boolean failure = false;
@@ -120,6 +123,11 @@ public class PtP implements Delegable, Chooser, Request {
*/
protected boolean spacefailure = false;
+ /**
+ * boolean that indicates if setting ACL on the 0-size file is necessary or not
+ */
+ protected boolean setupACLs = true;
+
/**
* Constructor requiring the VomsGridUser, the RequestSummaryData, the PtPChunkData about this
* chunk, and the GlobalStatusManager. If the supplied attributes are null, an
@@ -133,6 +141,11 @@ public PtP(PtPData chunkData) throws InvalidRequestAttributesException {
}
this.requestData = chunkData;
start = Calendar.getInstance();
+
+ if (StormConfiguration.getInstance().getPTPSkipACLSetup()) {
+ setupACLs = false;
+ log.debug("Skipping ACL setup on PTP as requested by configuration.");
+ }
}
/**
@@ -176,10 +189,10 @@ public void doIt() {
try {
if (requestData instanceof IdentityInputData) {
- fileStoRI = NamespaceDirector.getNamespace()
+ fileStoRI = Namespace.getInstance()
.resolveStoRIbySURL(surl, ((IdentityInputData) requestData).getUser());
} else {
- fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl);
+ fileStoRI = Namespace.getInstance().resolveStoRIbySURL(surl);
}
} catch (UnapprochableSurlException e) {
@@ -358,11 +371,11 @@ private void managePermit(StoRI fileStoRI) {
requestData.changeStatusSRM_NOT_SUPPORTED(
"Unable to build TURL with " + "specified transfer protocols!");
failure = true;
- log.error("ERROR in PtPChunk! No valid transfer protocol found. {}", e.getMessage(), e);
+ log.error("ERROR in PtPChunk! No valid transfer protocol found. {}", e.getMessage());
return;
} catch (TURLBuildingException e) {
requestData.changeStatusSRM_FAILURE(
- "Unable to build the TURL for the " + "provided transfer protocol");
+ "Unable to build the TURL for the provided transfer protocol");
failure = true;
log.error("ERROR in PtPChunk! There was a failure building the TURL. "
+ "TURLBuildingException: {} ", e.getMessage(), e);
@@ -377,50 +390,103 @@ private void managePermit(StoRI fileStoRI) {
"Unable to find local user for " + DataHelper.getRequestor(requestData));
failure = true;
log.error(
- "ERROR in PtGChunk! Unable to find LocalUser for {}! " + "CannotMapUserException: {}",
- DataHelper.getRequestor(requestData), e.getMessage(), e);
+ "ERROR in PtGChunk! Unable to find LocalUser for {}! CannotMapUserException: {}",
+ DataHelper.getRequestor(requestData), e.getMessage());
return;
}
- if (canTraverse) {
- // Use any reserved space which implies the existence of a
- // file!
- if (managePermitReserveSpaceStep(fileStoRI)) {
- boolean canWrite;
- try {
- canWrite = managePermitSetFileStep(fileStoRI);
- } catch (CannotMapUserException e) {
- requestData.changeStatusSRM_FAILURE(
- "Unable to find local user for " + DataHelper.getRequestor(requestData));
- failure = true;
- log.error(
- "ERROR in PtGChunk! Unable to find LocalUser for {}! " + "CannotMapUserException: {}",
- DataHelper.getRequestor(requestData), e.getMessage(), e);
- return;
- }
- if (!canWrite) {
- // URGENT!!!
- // roll back! ok3, ok2 and ok1
- } else {
- log.debug(
- "PTP CHUNK. Addition of ReadWrite ACL on file successfully " + "completed for {}",
- fileStoRI.getAbsolutePath());
- requestData.setTransferURL(auxTURL);
- requestData.changeStatusSRM_SPACE_AVAILABLE("srmPrepareToPut " + "successfully handled!");
- failure = false;
- if (requestData.fileStorageType().equals(TFileStorageType.VOLATILE)) {
- VolatileAndJiTCatalog.getInstance()
- .trackVolatile(fileStoRI.getPFN(), Calendar.getInstance(),
- requestData.fileLifetime());
- }
- }
+ if (!canTraverse) {
+ failure = true;
+ requestData.changeStatusSRM_FAILURE("Unable to set up parent path");
+ log.error("ERROR in PtGChunk! Unable to set up parent path");
+ return;
+ }
+ if (!hasEnoughSpace(fileStoRI)) {
+ failure = true;
+ requestData.changeStatusSRM_FAILURE("Not enough space on storage area");
+ log.error("ERROR in PtGChunk! Not enough space on storage area");
+ return;
+ }
+ if (!setupACLs) {
+ log.debug("ACL setup and file creation skipped by configuration");
+ requestData.setTransferURL(auxTURL);
+ requestData.changeStatusSRM_SPACE_AVAILABLE("srmPrepareToPut successfully handled!");
+ failure = false;
+ return;
+ }
+ if (!managePermitReserveSpaceStep(fileStoRI)) {
+ failure = true;
+ requestData.changeStatusSRM_FAILURE("Unable to reserve space on storage area");
+ log.error("ERROR in PtGChunk! Unable to reserve space on storage area");
+ return;
+ }
+ boolean canWrite;
+ try {
+ canWrite = managePermitSetFileStep(fileStoRI);
+ } catch (CannotMapUserException e) {
+ requestData.changeStatusSRM_FAILURE(
+ "Unable to find local user for " + DataHelper.getRequestor(requestData));
+ failure = true;
+ log.error("ERROR in PtGChunk! Unable to find LocalUser for {}! CannotMapUserException: {}",
+ DataHelper.getRequestor(requestData), e.getMessage());
+ return;
+ }
+ if (canWrite) {
+ log.debug("PTP CHUNK. Addition of ReadWrite ACL on file successfully completed for {}",
+ fileStoRI.getAbsolutePath());
+ requestData.setTransferURL(auxTURL);
+ requestData.changeStatusSRM_SPACE_AVAILABLE("srmPrepareToPut successfully handled!");
+ failure = false;
+ if (VOLATILE.equals(requestData.fileStorageType())) {
+ VolatileAndJiTCatalog.getInstance()
+ .trackVolatile(fileStoRI.getPFN(), Calendar.getInstance(), requestData.fileLifetime());
+ }
+ return;
+ }
+ }
+
+ private boolean hasEnoughSpace(StoRI fileStoRI) {
+
+ Preconditions.checkNotNull(fileStoRI.getVirtualFileSystem());
+ VirtualFS fs = fileStoRI.getVirtualFileSystem();
+
+ if (!fs.getProperties().isOnlineSpaceLimited()) {
+ log.debug("{} has no online space limited!", fs.getAliasName());
+ return true;
+ }
+ SpaceHelper sp = new SpaceHelper();
+ if (sp.isSAFull(PtP.log, fileStoRI)) {
+ log.debug("{} is full!", fs.getAliasName());
+ return false;
+ }
+ boolean isDiskUsageServiceEnabled = StormConfiguration.getInstance().getDiskUsageServiceEnabled();
+ if (!sp.isSAInitialized(PtP.log, fileStoRI) && isDiskUsageServiceEnabled) {
+ /* Trust we got space, let the request pass */
+ log.debug(
+ "PtPChunk: ReserveSpaceStep: the storage area space initialization is in progress, optimistic approach, considering we have enough space");
+ return true;
+ }
+ TSizeInBytes size = requestData.expectedFileSize();
+ if (size.isEmpty()) {
+ log.debug("Expected size is zero or non-available. We trust there's enough space");
+ return true;
+ }
+ long freeSpace = sp.getSAFreeSpace(PtP.log, fileStoRI);
+ if (freeSpace != -1 && freeSpace <= size.value()) {
+ TSpaceToken SASpaceToken = sp.getTokenFromStoRI(PtP.log, fileStoRI);
+ if (SASpaceToken == null || SASpaceToken.isEmpty()) {
+ log.error(
+ "PtPChunk - ReserveSpaceStep: Unable to get a valid TSpaceToken for stori {} . Unable to verify storage area space initialization",
+ fileStoRI);
+ requestData.changeStatusSRM_FAILURE("No valid space token for the Storage Area");
+
} else {
- // URGENT!!!
- // roll back! ok2 and ok1
+ log.debug("PtPChunk - ReserveSpaceStep: no free space on Storage Area!");
+ requestData.changeStatusSRM_FAILURE("No free space on Storage Area");
}
- } else {
- // URGENT!!!
- // roll back ok1!
+ failure = true;
+ return false;
}
+ return true;
}
/**
@@ -433,12 +499,14 @@ private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserEx
if (!preparePath(fileStoRI)) {
return false;
}
- if (requestData instanceof IdentityInputData) {
- LocalUser user = ((IdentityInputData) requestData).getUser().getLocalUser();
- return setParentAcl(fileStoRI, user);
- }
+ if (setupACLs) {
+ if (requestData instanceof IdentityInputData) {
+ LocalUser user = ((IdentityInputData) requestData).getUser().getLocalUser();
+ return setParentAcl(fileStoRI, user);
+ }
- setHttpsServiceParentAcl(fileStoRI);
+ setHttpsServiceParentAcl(fileStoRI);
+ }
return true;
}
@@ -461,7 +529,7 @@ private boolean preparePath(StoRI fileStoRI) {
private boolean prepareDirectory(LocalFile dir) {
boolean automaticDirectoryCreation =
- Configuration.getInstance().getAutomaticDirectoryCreation();
+ StormConfiguration.getInstance().getAutomaticDirectoryCreation();
if (dir.exists()) {
if (!dir.isDirectory()) {
@@ -501,7 +569,7 @@ private void updateUsedSpace(LocalFile dir) {
VirtualFS vfs;
try {
- vfs = NamespaceDirector.getNamespace().resolveVFSbyLocalFile(dir);
+ vfs = Namespace.getInstance().resolveVFSbyLocalFile(dir);
} catch (NamespaceException e) {
log.error("srmPtP: Error during used space update - {}", e.getMessage());
return;
@@ -634,7 +702,7 @@ private boolean setJiTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermis
private boolean setAoTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermission permission)
throws Exception {
- log.debug("SrmMkdir: Adding AoT ACL {} to user {} for directory: '{}'", permission, localUser,
+ log.debug("SrmMkdir: Adding AoT ACL {} to user {} for file: '{}'", permission, localUser,
fileStori.getAbsolutePath());
try {
@@ -700,47 +768,6 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) {
TSpaceToken spaceToken = requestData.getSpaceToken();
LocalFile localFile = fileStoRI.getLocalFile();
- // In case of SRM Storage Area limitation enabled,
- // the Storage Area free size is retrieved from the database
- // and the PtP fails if there is not enougth space.
-
- VirtualFS fs = fileStoRI.getVirtualFileSystem();
-
- if (fs != null && fs.getProperties().isOnlineSpaceLimited()) {
- SpaceHelper sp = new SpaceHelper();
- long freeSpace = sp.getSAFreeSpace(PtP.log, fileStoRI);
- if ((sp.isSAFull(PtP.log, fileStoRI))
- || (!size.isEmpty() && ((freeSpace != -1) && (freeSpace <= size.value())))) {
- /* Verify if the storage area space has been initialized */
- /*
- * If is not initialized verify if the SpaceInfoManager is currently initializing this
- * storage area
- */
- TSpaceToken SASpaceToken = sp.getTokenFromStoRI(PtP.log, fileStoRI);
- if (SASpaceToken == null || SASpaceToken.isEmpty()) {
- log.error("PtPChunk - ReserveSpaceStep: Unable to get a valid "
- + "TSpaceToken for stori {} . Unable to verify storage area space "
- + "initialization", fileStoRI);
- requestData.changeStatusSRM_FAILURE("No valid space token for the Storage Area");
- failure = true;
- return false;
- } else {
- if (!sp.isSAInitialized(PtP.log, fileStoRI)
- && Configuration.getInstance().getDiskUsageServiceEnabled()) {
- /* Trust we got space, let the request pass */
- log.debug("PtPChunk: ReserveSpaceStep: the storage area space "
- + "initialization is in progress, optimistic approach, considering "
- + "we got enough space");
- } else {
- log.debug("PtPChunk - ReserveSpaceStep: no free space on Storage Area!");
- requestData.changeStatusSRM_FAILURE("No free space on Storage Area");
- failure = true;
- return false;
- }
- }
- }
- }
-
try {
boolean fileWasCreated = localFile.createNewFile();
@@ -798,12 +825,9 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) {
return true;
} catch (SecurityException e) {
- // file.createNewFile could not create file because the Java
- // SecurityManager did not grant
- // write premission! This indicates a possible conflict between a
- // local system administrator
- // who applied a strict local policy, and policies as specified by
- // the PolicyCollector!
+ // file.createNewFile could not create file because the Java SecurityManager did not grant
+ // write permission! This indicates a possible conflict between a local system administrator
+ // who applied a strict local policy, and policies as specified by the PolicyCollector!
requestData.changeStatusSRM_FAILURE("Space Management step in " + "srmPrepareToPut failed!");
failure = true;
log.error("ERROR in PtPChunk! During space reservation step in PtP, "
@@ -811,8 +835,7 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) {
+ "writing the file! ", localFile.toString(), e);
return false;
} catch (IOException e) {
- // file.createNewFile could not create file because of a local IO
- // Error!
+ // file.createNewFile could not create file because of a local IO Error!
requestData.changeStatusSRM_FAILURE("Space Management step in " + "srmPrepareToPut failed!");
failure = true;
log.error(
@@ -844,9 +867,9 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) {
log.info("PtPChunk execution failed. ExpiredSpaceTokenException: {}", e.getMessage());
return false;
} catch (Exception e) {
- // This could be thrown by Java from Filesystem component given that
+ // This could be thrown by Java from FileSystem component given that
// there is GPFS under the hoods, but I do not know exactly how
- // java.io.File behaves with an ACL capable filesystem!!
+ // java.io.File behaves with an ACL capable FileSystem!!
requestData.changeStatusSRM_FAILURE("Space Management step in " + "srmPrepareToPut failed!");
failure = true;
log.error(
@@ -861,7 +884,7 @@ private boolean isExistingSpaceToken(TSpaceToken spaceToken) throws Exception {
StorageSpaceData spaceData = null;
try {
- spaceData = new ReservedSpaceCatalog().getStorageSpace(spaceToken);
+ spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(spaceToken);
} catch (TransferObjectDecodingException e) {
log.error("Unable to build StorageSpaceData from StorageSpaceTO."
+ " TransferObjectDecodingException: {}", e.getMessage());
diff --git a/src/main/java/it/grid/storm/asynch/PtPBuilder.java b/src/main/java/it/grid/storm/asynch/PtPBuilder.java
index f61c57f53..0ab5990b4 100644
--- a/src/main/java/it/grid/storm/asynch/PtPBuilder.java
+++ b/src/main/java/it/grid/storm/asynch/PtPBuilder.java
@@ -6,14 +6,15 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import it.grid.storm.catalogs.AnonymousPtPData;
-import it.grid.storm.catalogs.IdentityPtPData;
-import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException;
-import it.grid.storm.catalogs.InvalidPtPDataAttributesException;
-import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException;
-import it.grid.storm.catalogs.PtPData;
+
import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
+import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.model.AnonymousPtPData;
+import it.grid.storm.persistence.model.IdentityPtPData;
+import it.grid.storm.persistence.model.PtPData;
import it.grid.storm.srm.types.TFileStorageType;
import it.grid.storm.srm.types.TLifeTimeInSeconds;
import it.grid.storm.srm.types.TOverwriteMode;
@@ -42,7 +43,7 @@ public static PtP build(PrepareToPutInputData inputData) throws BuilderException
TLifeTimeInSeconds pinLifetime = inputData.getDesiredPinLifetime();
TLifeTimeInSeconds fileLifetime = inputData.getDesiredFileLifetime();
TFileStorageType fileStorageType = TFileStorageType
- .getTFileStorageType(Configuration.getInstance().getDefaultFileStorageType());
+ .getTFileStorageType(StormConfiguration.getInstance().getDefaultFileStorageType());
TSpaceToken spaceToken = inputData.getTargetSpaceToken();
TSizeInBytes expectedFileSize = inputData.getFileSize();
TURLPrefix transferProtocols = inputData.getTransferProtocols();
diff --git a/src/main/java/it/grid/storm/asynch/PtPFeeder.java b/src/main/java/it/grid/storm/asynch/PtPFeeder.java
index 40f1d0d6c..185943df5 100644
--- a/src/main/java/it/grid/storm/asynch/PtPFeeder.java
+++ b/src/main/java/it/grid/storm/asynch/PtPFeeder.java
@@ -5,10 +5,10 @@
package it.grid.storm.asynch;
import it.grid.storm.catalogs.PtPChunkCatalog;
-import it.grid.storm.catalogs.PtPPersistentChunkData;
import it.grid.storm.catalogs.RequestSummaryCatalog;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.PtPPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.SchedulerException;
import it.grid.storm.srm.types.TSURL;
diff --git a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java
index 21cfcf5ee..79063e134 100644
--- a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java
+++ b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java
@@ -6,9 +6,9 @@
import java.util.Arrays;
import it.grid.storm.catalogs.PtPChunkCatalog;
-import it.grid.storm.catalogs.PtPData;
-import it.grid.storm.catalogs.PtPPersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
+import it.grid.storm.persistence.model.PtPData;
+import it.grid.storm.persistence.model.PtPPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.PersistentRequestChunk;
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.synchcall.command.CommandHelper;
diff --git a/src/main/java/it/grid/storm/asynch/Suspendedable.java b/src/main/java/it/grid/storm/asynch/Suspendedable.java
index 80564d107..5fdd55d31 100644
--- a/src/main/java/it/grid/storm/asynch/Suspendedable.java
+++ b/src/main/java/it/grid/storm/asynch/Suspendedable.java
@@ -4,7 +4,7 @@
*/
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestData;
+import it.grid.storm.persistence.model.RequestData;
import it.grid.storm.tape.recalltable.model.TapeRecallStatus;
public interface Suspendedable {
diff --git a/src/main/java/it/grid/storm/authz/AuthzDirector.java b/src/main/java/it/grid/storm/authz/AuthzDirector.java
index c5340adcf..3c99a9acb 100644
--- a/src/main/java/it/grid/storm/authz/AuthzDirector.java
+++ b/src/main/java/it/grid/storm/authz/AuthzDirector.java
@@ -5,159 +5,157 @@
package it.grid.storm.authz;
import java.io.File;
-import java.util.ArrayList;
-import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
import it.grid.storm.authz.path.PathAuthz;
import it.grid.storm.authz.path.conf.PathAuthzDBReader;
import it.grid.storm.authz.sa.AuthzDBReaderException;
import it.grid.storm.authz.sa.SpaceDBAuthz;
import it.grid.storm.authz.sa.test.MockSpaceAuthz;
-import it.grid.storm.namespace.NamespaceDirector;
+import it.grid.storm.namespace.Namespace;
import it.grid.storm.namespace.NamespaceException;
-import it.grid.storm.namespace.NamespaceInterface;
import it.grid.storm.namespace.model.SAAuthzType;
import it.grid.storm.namespace.model.VirtualFS;
import it.grid.storm.srm.types.TSpaceToken;
public class AuthzDirector {
- private static final Logger log = LoggerFactory
- .getLogger(AuthzDirector.class);
- private static String configurationPATH;
-
- // Map between 'SpaceToken' and the related 'SpaceAuthz'
- private static Map spaceAuthzs = null;
-
- // PathAuthz is only one, shared by all SAs
- private static PathAuthzInterface pathAuthz = null;
-
- /**
- * Scan the Namespace.xml to retrieve the list of file AuthZDB to digest
- */
- private static Map buildSpaceAuthzsMAP() {
-
- HashMap spaceAuthzMap = new HashMap();
-
- // Retrieve the list of VFS from Namespace
- NamespaceInterface ns = NamespaceDirector.getNamespace();
- ArrayList vfss;
- try {
- vfss = new ArrayList(ns.getAllDefinedVFS());
- for (VirtualFS vfs : vfss) {
- String vfsName = vfs.getAliasName();
- SAAuthzType authzTp = vfs.getStorageAreaAuthzType();
- String authzName = "";
- if (authzTp.equals(SAAuthzType.AUTHZDB)) {
- // The Space Authz is based on Authz DB
- authzName = vfs.getStorageAreaAuthzDB();
- log.debug("Loading AuthzDB '{}'", authzName);
- if (existsAuthzDBFile(authzName)) {
- // Digest the Space AuthzDB File
- TSpaceToken spaceToken = vfs.getSpaceToken();
- SpaceAuthzInterface spaceAuthz = new SpaceDBAuthz(authzName);
- spaceAuthzMap.put(spaceToken, spaceAuthz);
- } else {
- log.error("File AuthzDB '{}' related to '{}' does not exists.",
- authzName, vfsName);
- }
- } else {
- authzName = vfs.getStorageAreaAuthzFixed();
- }
- log.debug("VFS ['{}'] = {} : {}", vfsName, authzTp, authzName);
- }
- } catch (NamespaceException e) {
- log.error("Unable to initialize AUTHZ DB! Error: {}", e.getMessage(), e);
- }
-
- return spaceAuthzMap;
- }
-
- /**
- * Utility method
- *
- * @param dbFileName
- * @return
- * @throws AuthzDBReaderException
- */
- private static boolean existsAuthzDBFile(String dbFileName) {
-
- String fileName = configurationPATH + File.separator + dbFileName;
- boolean exists = (new File(fileName)).exists();
- if (!exists) {
- log.warn("The AuthzDB File '{}' does not exists", dbFileName);
- }
- return exists;
- }
-
- // ****************************************
- // PUBLIC METHODS
- // ****************************************
-
- /******************************
- * SPACE AUTHORIZATION ENGINE
- ******************************/
- public static void initializeSpaceAuthz() {
-
- // Build Space Authzs MAP
- spaceAuthzs = buildSpaceAuthzsMAP();
- }
-
- /**
- * Retrieve the Space Authorization module related to the Space Token
- *
- * @param token
- * @return
- */
- public static SpaceAuthzInterface getSpaceAuthz(TSpaceToken token) {
-
- SpaceAuthzInterface spaceAuthz = new MockSpaceAuthz();
- // Retrieve the SpaceAuthz related to the Space Token
- if ((spaceAuthzs != null) && (spaceAuthzs.containsKey(token))) {
- spaceAuthz = spaceAuthzs.get(token);
- log.debug("Space Authz related to S.Token ='{}' is '{}'", token,
- spaceAuthz.getSpaceAuthzID());
- } else {
- log.debug("Space Authz related to S.Token ='{}' does not exists. "
- + "Use the MOCK one.", token);
- }
- return spaceAuthz;
- }
-
- /******************************
- * PATH AUTHORIZATION ENGINE
- ******************************/
-
- /**
- * Initializating the Path Authorization engine
- *
- * @param pathAuthz2
- */
- public static void initializePathAuthz(String pathAuthzDBFileName)
- throws DirectorException {
-
- PathAuthzDBReader authzDBReader;
- try {
- authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName);
- } catch (Exception e) {
- log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e);
- throw new DirectorException("Unable to build a PathAuthzDBReader");
- }
- AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB());
- }
-
- /**
- * Retrieve the Path Authorization module
- *
- * @todo: To implement this.
- */
- public static PathAuthzInterface getPathAuthz() {
-
- return AuthzDirector.pathAuthz;
- }
+ private static final Logger log = LoggerFactory.getLogger(AuthzDirector.class);
+ private static String configurationPATH;
+
+ // Map between 'SpaceToken' and the related 'SpaceAuthz'
+ private static Map spaceAuthzs = null;
+
+ // PathAuthz is only one, shared by all SAs
+ private static PathAuthzInterface pathAuthz = null;
+
+ /**
+ * Scan the Namespace.xml to retrieve the list of file AuthZDB to digest
+ */
+ private static Map buildSpaceAuthzsMAP() {
+
+ Map spaceAuthzMap = Maps.newHashMap();
+
+ // Retrieve the list of VFS from Namespace
+ Namespace ns = Namespace.getInstance();
+ List vfss;
+ try {
+ vfss = Lists.newArrayList(ns.getAllDefinedVFS());
+ for (VirtualFS vfs : vfss) {
+ String vfsName = vfs.getAliasName();
+ SAAuthzType authzTp = vfs.getStorageAreaAuthzType();
+ String authzName = "";
+ if (authzTp.equals(SAAuthzType.AUTHZDB)) {
+ // The Space Authz is based on Authz DB
+ authzName = vfs.getStorageAreaAuthzDB();
+ log.debug("Loading AuthzDB '{}'", authzName);
+ if (existsAuthzDBFile(authzName)) {
+ // Digest the Space AuthzDB File
+ TSpaceToken spaceToken = vfs.getSpaceToken();
+ SpaceAuthzInterface spaceAuthz = new SpaceDBAuthz(authzName);
+ spaceAuthzMap.put(spaceToken, spaceAuthz);
+ } else {
+ log.error("File AuthzDB '{}' related to '{}' does not exists.", authzName, vfsName);
+ }
+ } else {
+ authzName = vfs.getStorageAreaAuthzFixed();
+ }
+ log.debug("VFS ['{}'] = {} : {}", vfsName, authzTp, authzName);
+ }
+ } catch (NamespaceException e) {
+ log.error("Unable to initialize AUTHZ DB! Error: {}", e.getMessage(), e);
+ }
+
+ return spaceAuthzMap;
+ }
+
+ /**
+ * Utility method
+ *
+ * @param dbFileName
+ * @return
+ * @throws AuthzDBReaderException
+ */
+ private static boolean existsAuthzDBFile(String dbFileName) {
+
+ String fileName = configurationPATH + File.separator + dbFileName;
+ boolean exists = (new File(fileName)).exists();
+ if (!exists) {
+ log.warn("The AuthzDB File '{}' does not exists", dbFileName);
+ }
+ return exists;
+ }
+
+ // ****************************************
+ // PUBLIC METHODS
+ // ****************************************
+
+ /******************************
+ * SPACE AUTHORIZATION ENGINE
+ ******************************/
+ public static void initializeSpaceAuthz() {
+
+ // Build Space Authzs MAP
+ spaceAuthzs = buildSpaceAuthzsMAP();
+ }
+
+ /**
+ * Retrieve the Space Authorization module related to the Space Token
+ *
+ * @param token
+ * @return
+ */
+ public static SpaceAuthzInterface getSpaceAuthz(TSpaceToken token) {
+
+ SpaceAuthzInterface spaceAuthz = new MockSpaceAuthz();
+ // Retrieve the SpaceAuthz related to the Space Token
+ if ((spaceAuthzs != null) && (spaceAuthzs.containsKey(token))) {
+ spaceAuthz = spaceAuthzs.get(token);
+ log.debug("Space Authz related to S.Token ='{}' is '{}'", token,
+ spaceAuthz.getSpaceAuthzID());
+ } else {
+ log.debug("Space Authz related to S.Token ='{}' does not exists. " + "Use the MOCK one.",
+ token);
+ }
+ return spaceAuthz;
+ }
+
+ /******************************
+ * PATH AUTHORIZATION ENGINE
+ ******************************/
+
+ /**
+ * Initializing the Path Authorization engine
+ *
+ * @param pathAuthz2
+ */
+ public static void initializePathAuthz(String pathAuthzDBFileName) throws DirectorException {
+
+ PathAuthzDBReader authzDBReader;
+ try {
+ authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName);
+ } catch (Exception e) {
+ log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e);
+ throw new DirectorException("Unable to build a PathAuthzDBReader");
+ }
+ AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB());
+ }
+
+ /**
+ * Retrieve the Path Authorization module
+ *
+ * @todo: To implement this.
+ */
+ public static PathAuthzInterface getPathAuthz() {
+
+ return AuthzDirector.pathAuthz;
+ }
}
diff --git a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java
index 0acd36171..f1fc6b239 100644
--- a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java
+++ b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java
@@ -10,7 +10,7 @@
import it.grid.storm.authz.AuthzException;
import it.grid.storm.authz.path.model.PathACE;
import it.grid.storm.authz.path.model.PathAuthzEvaluationAlgorithm;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
import java.io.BufferedReader;
import java.io.File;
@@ -42,7 +42,7 @@ public PathAuthzDBReader(String filename) throws Exception {
log.info("Path Authorization : Initializing...");
if (!(existsAuthzDBFile(filename))) {
- String configurationPATH = Configuration.getInstance().namespaceConfigPath();
+ String configurationPATH = StormConfiguration.getInstance().namespaceConfigPath();
if (configurationPATH.length() == 0) {
String userDir = System.getProperty("user.dir");
log.debug("Unable to found the configuration path. Assume: '{}'", userDir);
diff --git a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java
index 99ef444a5..4f2301eee 100644
--- a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java
+++ b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java
@@ -17,40 +17,23 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
import it.grid.storm.authz.AuthzDecision;
import it.grid.storm.authz.AuthzDirector;
import it.grid.storm.authz.path.model.PathOperation;
import it.grid.storm.authz.path.model.SRMFileRequest;
import it.grid.storm.authz.remote.Constants;
-import it.grid.storm.catalogs.OverwriteModeConverter;
import it.grid.storm.common.types.InvalidStFNAttributeException;
import it.grid.storm.common.types.StFN;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
import it.grid.storm.griduser.FQAN;
import it.grid.storm.griduser.GridUserInterface;
import it.grid.storm.griduser.GridUserManager;
-import it.grid.storm.namespace.NamespaceDirector;
+import it.grid.storm.namespace.Namespace;
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.model.MappingRule;
import it.grid.storm.namespace.model.Protocol;
import it.grid.storm.namespace.model.VirtualFS;
+import it.grid.storm.persistence.converter.OverwriteModeConverter;
import it.grid.storm.srm.types.TOverwriteMode;
class PermissionEvaluator {
@@ -59,8 +42,7 @@ class PermissionEvaluator {
public static Boolean isOverwriteAllowed() {
- return OverwriteModeConverter.getInstance()
- .toSTORM(Configuration.getInstance().getDefaultOverwriteMode())
+ return OverwriteModeConverter.toSTORM(StormConfiguration.getInstance().getDefaultOverwriteMode())
.equals(TOverwriteMode.ALWAYS);
}
@@ -72,7 +54,7 @@ static Boolean evaluateVomsGridUserPermission(String DNDecoded, String FQANSDeco
VirtualFS fileVFS;
try {
- fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded);
+ fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded);
} catch (NamespaceException e) {
log.error("Unable to determine a VFS that maps the requested file "
+ "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage());
@@ -114,7 +96,7 @@ static Boolean evaluateVomsGridUserPermission(String DNDecoded, String FQANSDeco
VirtualFS fileVFS;
try {
- fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded);
+ fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded);
} catch (NamespaceException e) {
log.error("Unable to determine a VFS that maps the requested file "
+ "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage());
@@ -138,7 +120,7 @@ static Boolean evaluateAnonymousPermission(String filePathDecoded, PathOperation
VirtualFS fileVFS;
try {
- fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded);
+ fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded);
} catch (NamespaceException e) {
log.error("Unable to determine a VFS that maps the requested file "
+ "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage());
@@ -160,7 +142,7 @@ static Boolean evaluateAnonymousPermission(String filePathDecoded, SRMFileReques
VirtualFS fileVFS;
try {
- fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded);
+ fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded);
} catch (NamespaceException e) {
log.error("Unable to determine a VFS that maps the requested file "
+ "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage());
diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java
index 4f605e59e..a5d2414ac 100644
--- a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java
+++ b/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java
@@ -13,7 +13,7 @@
import org.slf4j.LoggerFactory;
import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
import it.grid.storm.griduser.GridUserInterface;
/**
@@ -45,7 +45,7 @@ public static SpaceDBAuthz makeEmpty() {
public SpaceDBAuthz(String dbFileName) {
- Configuration config = Configuration.getInstance();
+ StormConfiguration config = StormConfiguration.getInstance();
configurationPATH = config.namespaceConfigPath();
if (existsAuthzDBFile(dbFileName)) {
this.dbFileName = dbFileName;
diff --git a/src/main/java/it/grid/storm/balancer/cache/ResponsivenessCache.java b/src/main/java/it/grid/storm/balancer/cache/ResponsivenessCache.java
index efce0fc0a..1b64b7071 100644
--- a/src/main/java/it/grid/storm/balancer/cache/ResponsivenessCache.java
+++ b/src/main/java/it/grid/storm/balancer/cache/ResponsivenessCache.java
@@ -13,11 +13,11 @@
import com.google.common.collect.Maps;
import it.grid.storm.balancer.Node;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
public enum ResponsivenessCache {
- INSTANCE(Configuration.getInstance().getServerPoolStatusCheckTimeout());
+ INSTANCE(StormConfiguration.getInstance().getServerPoolStatusCheckTimeout());
private static final Logger log = LoggerFactory.getLogger(ResponsivenessCache.class);
diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java
index ea71f6f27..8dd66e29c 100644
--- a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java
+++ b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java
@@ -4,14 +4,30 @@
*/
package it.grid.storm.catalogs;
-import it.grid.storm.common.types.SizeUnit;
+import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import it.grid.storm.common.types.TURLPrefix;
import it.grid.storm.common.types.TimeUnit;
-import it.grid.storm.config.Configuration;
-import it.grid.storm.griduser.GridUserInterface;
-// import it.grid.storm.namespace.SurlStatusStore;
+import it.grid.storm.config.StormConfiguration;
+import it.grid.storm.persistence.converter.PinLifetimeConverter;
+import it.grid.storm.persistence.converter.StatusCodeConverter;
+import it.grid.storm.persistence.converter.TransferProtocolListConverter;
+import it.grid.storm.persistence.dao.BoLChunkDAO;
+import it.grid.storm.persistence.exceptions.InvalidReducedBoLChunkDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.impl.mysql.BoLChunkDAOMySql;
+import it.grid.storm.persistence.model.BoLChunkDataTO;
+import it.grid.storm.persistence.model.BoLPersistentChunkData;
+import it.grid.storm.persistence.model.ReducedBoLChunkData;
+import it.grid.storm.persistence.model.ReducedBoLChunkDataTO;
import it.grid.storm.srm.types.InvalidTDirOptionAttributesException;
-import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException;
import it.grid.storm.srm.types.InvalidTSURLAttributesException;
import it.grid.storm.srm.types.InvalidTSizeAttributesException;
import it.grid.storm.srm.types.TDirOption;
@@ -23,783 +39,306 @@
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.srm.types.TTURL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class that represents StoRMs BoLChunkCatalog: it collects BoLChunkData and
- * provides methods for looking up a BoLChunkData based on TRequestToken, as
- * well as for adding a new entry and removing an existing one.
- *
- * @author CNAF
- * @date Aug 2009
- * @version 1.0
- */
public class BoLChunkCatalog {
- private static final Logger log = LoggerFactory
- .getLogger(BoLChunkCatalog.class);
-
- /* only instance of BoLChunkCatalog present in StoRM! */
- private static final BoLChunkCatalog cat = new BoLChunkCatalog();
- private final BoLChunkDAO dao = BoLChunkDAO.getInstance();
-
- /*
- * Timer object in charge of transiting expired requests from SRM_FILE_PINNED
- * to SRM_RELEASED!
- */
- private final Timer transiter = new Timer();
- /* Delay time before starting cleaning thread! */
- private final long delay = Configuration.getInstance()
- .getTransitInitialDelay() * 1000;
- /* Period of execution of cleaning! */
- private final long period = Configuration.getInstance()
- .getTransitTimeInterval() * 1000;
-
- /**
- * Private constructor that starts the internal timer needed to periodically
- * check and transit requests whose pinLifetime has expired and are in
- * SRM_FILE_PINNED, to SRM_RELEASED.
- */
- private BoLChunkCatalog() {
-
- TimerTask transitTask = new TimerTask() {
-
- @Override
- public void run() {
-
- transitExpiredSRM_SUCCESS();
- }
- };
- transiter.scheduleAtFixedRate(transitTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of BoLChunkCatalog available.
- */
- public static BoLChunkCatalog getInstance() {
-
- return cat;
- }
-
- /**
- * Method that returns a Collection of BoLChunkData Objects matching the
- * supplied TRequestToken.
- *
- * If any of the data associated to the TRequestToken is not well formed and
- * so does not allow a BoLChunkData Object to be created, then that part of
- * the request is dropped and gets logged, and the processing continues with
- * the next part. All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks to process then an empty Collection is returned, and
- * a message gets logged.
- */
- synchronized public Collection lookup(TRequestToken rt) {
-
- Collection chunkCollection = dao.find(rt);
- log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection);
- List list = new ArrayList();
-
- if (chunkCollection.isEmpty()) {
- log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified "
- + "request: {}", rt);
- return list;
- }
-
- BoLPersistentChunkData chunk;
- for (BoLChunkDataTO chunkTO : chunkCollection) {
- chunk = makeOne(chunkTO, rt);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(completeTO(chunkTO, chunk));
- } catch (InvalidReducedBoLChunkDataAttributesException e) {
- log.warn("BoL CHUNK CATALOG! unable to add missing informations on DB "
- + "to the request: {}", e.getMessage());
- }
- }
- log.debug("BoL CHUNK CATALOG: returning " + list);
- return list;
- }
-
- /**
- * Generates a BoLChunkData from the received BoLChunkDataTO
- *
- * @param auxTO
- * @param rt
- * @return
- */
- private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) {
-
- StringBuilder errorSb = new StringBuilder();
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (auxTO.normalizedStFN() != null) {
- fromSURL.setNormalizedStFN(auxTO.normalizedStFN());
- }
- if (auxTO.sulrUniqueID() != null) {
- fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue());
- }
- // lifeTime
- TLifeTimeInSeconds lifeTime = null;
- try {
- long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(
- auxTO.getLifeTime());
- // Check for max value allowed
- long max = Configuration.getInstance().getPinLifetimeMaximum();
- if (pinLifeTime > max) {
- log.warn("PinLifeTime is greater than the max value allowed. "
- + "Drop the value to the max = {} seconds", max);
- pinLifeTime = max;
- }
- lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // dirOption
- TDirOption dirOption = null;
- try {
- dirOption = new TDirOption(auxTO.getDirOption(),
- auxTO.getAllLevelRecursive(), auxTO.getNumLevel());
- } catch (InvalidTDirOptionAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // transferProtocols
- TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO
- .getProtocolList());
- if (transferProtocols.size() == 0) {
- errorSb.append("\nEmpty list of TransferProtocols or"
- + " could not translate TransferProtocols!");
- /* fail construction of BoLChunkData! */
- transferProtocols = null;
- }
- // fileSize
- TSizeInBytes fileSize = null;
- try {
- fileSize = TSizeInBytes.make(auxTO.getFileSize(), SizeUnit.BYTES);
- } catch (InvalidTSizeAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- auxTO.getStatus());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + auxTO.getStatus());
- } else {
- status = new TReturnStatus(code, auxTO.getErrString());
- }
- // transferURL
- /*
- * whatever is read is just meaningless because BoL will fill it in!!! So
- * create an Empty TTURL by default! Vital to avoid problems with unknown
- * DPM NULL/EMPTY logic policy!
- */
- TTURL transferURL = TTURL.makeEmpty();
- // make BoLChunkData
- BoLPersistentChunkData aux = null;
- try {
- aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption,
- transferProtocols, fileSize, status, transferURL,
- auxTO.getDeferredStartTime());
- aux.setPrimaryKey(auxTO.getPrimaryKey());
- } catch (InvalidSurlRequestDataAttributesException e) {
- dao.signalMalformedBoLChunk(auxTO);
- log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL "
- + "chunk data from persistence. Dropping chunk from request {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- *
- * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique
- * ID taken from the BoLChunkData
- *
- * @param chunkTO
- * @param chunk
- */
- private void completeTO(ReducedBoLChunkDataTO chunkTO,
- final ReducedBoLChunkData chunk) {
-
- chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN());
- chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId()));
- }
-
- /**
- *
- * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and
- * completes it with the normalized StFN and the SURL unique ID taken from the
- * PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- * @return
- * @throws InvalidReducedBoLChunkDataAttributesException
- */
- private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO,
- final BoLPersistentChunkData chunk)
- throws InvalidReducedBoLChunkDataAttributesException {
-
- ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO);
- this.completeTO(reducedChunkTO, this.reduce(chunk));
- return reducedChunkTO;
- }
-
- /**
- * Creates a ReducedBoLChunkData from the data contained in the received
- * BoLChunkData
- *
- * @param chunk
- * @return
- * @throws InvalidReducedBoLChunkDataAttributesException
- */
- private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk)
- throws InvalidReducedBoLChunkDataAttributesException {
-
- ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(),
- chunk.getStatus());
- reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
- return reducedChunk;
- }
-
- /**
- * Creates a ReducedBoLChunkDataTO from the data contained in the received
- * BoLChunkDataTO
- *
- * @param chunkTO
- * @return
- */
- private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) {
-
- ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO();
- reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey());
- reducedChunkTO.setFromSURL(chunkTO.getFromSURL());
- reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
- reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID());
- reducedChunkTO.setStatus(chunkTO.getStatus());
- reducedChunkTO.setErrString(chunkTO.getErrString());
- return reducedChunkTO;
- }
-
- /**
- * Checks if the received BoLChunkDataTO contains the fields not set by the
- * front end but required
- *
- * @param chunkTO
- * @return
- */
- private boolean isComplete(BoLChunkDataTO chunkTO) {
-
- return (chunkTO.normalizedStFN() != null)
- && (chunkTO.sulrUniqueID() != null);
- }
-
- /**
- * Checks if the received ReducedBoLChunkDataTO contains the fields not set by
- * the front end but required
- *
- * @param reducedChunkTO
- * @return
- */
- // TODO MICHELE USER_SURL new method
- private boolean isComplete(ReducedBoLChunkDataTO reducedChunkTO) {
-
- return (reducedChunkTO.normalizedStFN() != null)
- && (reducedChunkTO.surlUniqueID() != null);
- }
-
- /**
- * Method used to update into Persistence a retrieved BoLChunkData. In case
- * any error occurs, the operation does not proceed but no Exception is
- * thrown. Error messages get logged.
- *
- * Only fileSize, StatusCode, errString and transferURL are updated. Likewise
- * for the request pinLifetime.
- */
- synchronized public void update(BoLPersistentChunkData cd) {
-
- BoLChunkDataTO to = new BoLChunkDataTO();
- /* Primary key needed by DAO Object */
- to.setPrimaryKey(cd.getPrimaryKey());
- to.setFileSize(cd.getFileSize().value());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- cd.getStatus().getStatusCode()));
- to.setErrString(cd.getStatus().getExplanation());
- to.setLifeTime(PinLifetimeConverter.getInstance().toDB(
- cd.getLifeTime().value()));
- // TODO MICHELE USER_SURL fill new fields
- to.setNormalizedStFN(cd.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(cd.getSURL().uniqueId()));
-
- dao.update(to);
- // TODO MICHELE SURL STORE
- // SurlStatusStore.getInstance().storeSurlStatus(cd.getSURL(),
- // cd.getStatus().getStatusCode());
- }
-
- /**
- * Refresh method. TODO THIS IS A WORK IN PROGRESS!!!! This method have to
- * synch the ChunkData information with the database status.
- *
- * @param auxTO
- * @param BoLPersistentChunkData
- * inputChunk
- * @return BoLChunkData outputChunk
- */
- synchronized public BoLPersistentChunkData refreshStatus(
- BoLPersistentChunkData inputChunk) {
-
- /* Currently not used */
- // Call the dao refresh method to synch with the db status
- BoLChunkDataTO auxTO = dao.refresh(inputChunk.getPrimaryKey());
-
- log.debug("BoL CHUNK CATALOG: retrieved data {}", auxTO);
- if (auxTO == null) {
- log.warn("BoL CHUNK CATALOG! Empty TO found in persistence for specified "
- + "request: {}", inputChunk.getPrimaryKey());
- return inputChunk;
- }
-
- /*
- * In this first version the only field updated is the Status. Once
- * updated, the new status is rewritten into the input ChunkData
- */
-
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus());
- if (code != TStatusCode.EMPTY) {
- status = new TReturnStatus(code, auxTO.getErrString());
- }
- inputChunk.setStatus(status);
- return inputChunk;
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkData Objects associated
- * to the supplied TRequestToken.
- *
- * If any of the data retrieved for a given chunk is not well formed and so
- * does not allow a ReducedBoLChunkData Object to be created, then that chunk
- * is dropped and gets logged, while processing continues with the next one.
- * All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks associated to the given TRequestToken, then an empty
- * Collection is returned and a messagge gets logged.
- */
- synchronized public Collection lookupReducedBoLChunkData(
- TRequestToken rt) {
-
- Collection reducedChunkDataTOs = dao.findReduced(rt
- .getValue());
- log.debug("BoL CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs);
- ArrayList list = new ArrayList();
- if (reducedChunkDataTOs.isEmpty()) {
- log.debug("BoL CHUNK CATALOG! No chunks found in persistence for {}", rt);
- } else {
- ReducedBoLChunkData reducedChunkData = null;
- for (ReducedBoLChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!this.isComplete(reducedChunkDataTO)) {
- completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("BoL CHUNK CATALOG: returning {}", list);
- }
- return list;
- }
-
- public Collection lookupReducedBoLChunkData(
- TRequestToken requestToken, Collection surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.findReduced(
- requestToken, surlsUniqueIDs, surlsArray);
- return buildReducedChunkDataList(chunkDataTOCollection);
- }
-
- public Collection lookupBoLChunkData(TSURL surl,
- GridUserInterface user) {
-
- return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl }), user);
- }
-
- public Collection lookupBoLChunkData(TSURL surl) {
-
- return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl }));
- }
-
- private Collection lookupBoLChunkData(
- List surls, GridUserInterface user) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.find(surlsUniqueIDs,
- surlsArray, user.getDn());
- log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildChunkDataList(chunkDataTOCollection);
- }
-
- public Collection lookupBoLChunkData(List surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.find(surlsUniqueIDs,
- surlsArray);
- log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildChunkDataList(chunkDataTOCollection);
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOCollection) {
-
- List list = new ArrayList();
- BoLPersistentChunkData chunk;
- for (BoLChunkDataTO chunkTO : chunkDataTOCollection) {
- chunk = makeOne(chunkTO);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(this.completeTO(chunkTO, chunk));
- } catch (InvalidReducedBoLChunkDataAttributesException e) {
- log.warn("BoL CHUNK CATALOG! unable to add missing informations "
- + "on DB to the request: {}", e.getMessage());
- }
- }
- log.debug("BoL CHUNK CATALOG: returning {}", list);
- return list;
- }
-
- private BoLPersistentChunkData makeOne(BoLChunkDataTO chunkTO) {
-
- try {
- return makeOne(chunkTO, new TRequestToken(chunkTO.getRequestToken(),
- chunkTO.getTimeStamp()));
- } catch (InvalidTRequestTokenAttributesException e) {
- throw new IllegalStateException(
- "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: "
- + e);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkData Objects matching
- * the supplied GridUser and Collection of TSURLs.
- *
- * If any of the data retrieved for a given chunk is not well formed and so
- * does not allow a ReducedBoLChunkData Object to be created, then that chunk
- * is dropped and gets logged, while processing continues with the next one.
- * All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks associated to the given GridUser and Collection of
- * TSURLs, then an empty Collection is returned and a message gets logged.
- */
- synchronized public Collection lookupReducedBoLChunkData(
- GridUserInterface gu, Collection tsurlCollection) {
-
- int[] surlsUniqueIDs = new int[tsurlCollection.size()];
- String[] surls = new String[tsurlCollection.size()];
- int index = 0;
- for (TSURL tsurl : tsurlCollection) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.findReduced(
- gu.getDn(), surlsUniqueIDs, surls);
- log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildReducedChunkDataList(chunkDataTOCollection);
- }
-
- private Collection buildReducedChunkDataList(
- Collection chunkDataTOCollection) {
-
- ArrayList list = new ArrayList();
- ReducedBoLChunkData reducedChunkData;
- for (ReducedBoLChunkDataTO reducedChunkDataTO : chunkDataTOCollection) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!this.isComplete(reducedChunkDataTO)) {
- this.completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("BoL CHUNK CATALOG: returning {}", list);
- return list;
- }
-
- /**
- * @param auxTO
- * @return
- */
- private ReducedBoLChunkData makeOneReduced(
- ReducedBoLChunkDataTO reducedChunkDataTO) {
-
- StringBuilder errorSb = new StringBuilder();
- // fromSURL
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (reducedChunkDataTO.normalizedStFN() != null) {
- fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN());
- }
- if (reducedChunkDataTO.surlUniqueID() != null) {
- fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue());
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- reducedChunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + reducedChunkDataTO.status());
- } else {
- status = new TReturnStatus(code, reducedChunkDataTO.errString());
- }
- // make ReducedBoLChunkData
- ReducedBoLChunkData aux = null;
- try {
- aux = new ReducedBoLChunkData(fromSURL, status);
- aux.setPrimaryKey(reducedChunkDataTO.primaryKey());
- } catch (InvalidReducedBoLChunkDataAttributesException e) {
- log.warn("BoL CHUNK CATALOG! Retrieved malformed "
- + "Reduced BoL chunk data from persistence: dropping reduced chunk...");
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- * Method used to add into Persistence a new entry. The supplied BoLChunkData
- * gets the primary key changed to the value assigned in Persistence.
- *
- * This method is intended to be used by a recursive BoL request: the parent
- * request supplies a directory which must be expanded, so all new children
- * requests resulting from the files in the directory are added into
- * persistence.
- *
- * So this method does _not_ add a new SRM prepare_to_get request into the DB!
- *
- * The only children data written into the DB are: sourceSURL, TDirOption,
- * statusCode and explanation.
- *
- * In case of any error the operation does not proceed, but no Exception is
- * thrown! Proper messages get logged by underlaying DAO.
- */
- synchronized public void addChild(BoLPersistentChunkData chunkData) {
-
- BoLChunkDataTO to = new BoLChunkDataTO();
- // needed for now to find ID of request! Must be changed soon!
- to.setRequestToken(chunkData.getRequestToken().toString());
- to.setFromSURL(chunkData.getSURL().toString());
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
-
- to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
- to.setDirOption(chunkData.getDirOption().isDirectory());
- to.setNumLevel(chunkData.getDirOption().getNumLevel());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setDeferredStartTime(chunkData.getDeferredStartTime());
-
- /* add the entry and update the Primary Key field */
- dao.addChild(to);
- chunkData.setPrimaryKey(to.getPrimaryKey());
- }
-
- /**
- * Method used to add into Persistence a new entry. The supplied BoLChunkData
- * gets the primary key changed to the value assigned in the Persistence. The
- * method requires the GridUser to whom associate the added request.
- *
- * This method is intended to be used by an srmCopy request in push mode which
- * implies a local srmBoL. The only fields from BoLChunkData that are
- * considered are: the requestToken, the sourceSURL, the pinLifetime, the
- * dirOption, the protocolList, the status and error string.
- *
- * So this method _adds_ a new SRM prepare_to_get request into the DB!
- *
- * In case of any error the operation does not proceed, but no Exception is
- * thrown! The underlaying DAO logs proper error messages.
- */
- synchronized public void add(BoLPersistentChunkData chunkData,
- GridUserInterface gu) {
-
- /* Currently NOT used */
- BoLChunkDataTO to = new BoLChunkDataTO();
- to.setRequestToken(chunkData.getRequestToken().toString());
- to.setFromSURL(chunkData.getSURL().toString());
- // TODO MICHELE USER_SURL fill new fields
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
-
- to.setLifeTime(new Long(chunkData.getLifeTime().value()).intValue());
- to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
- to.setDirOption(chunkData.getDirOption().isDirectory());
- to.setNumLevel(chunkData.getDirOption().getNumLevel());
- to.setProtocolList(TransferProtocolListConverter.toDB(chunkData
- .getTransferProtocols()));
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setDeferredStartTime(chunkData.getDeferredStartTime());
-
- /* add the entry and update the Primary Key field! */
- dao.addNew(to, gu.getDn());
- chunkData.setPrimaryKey(to.getPrimaryKey());
- }
-
- /**
- * Method used to establish if in Persistence there is a BoLChunkData working
- * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case
- * true is returned. In case none are found or there is any problem, false is
- * returned. This method is intended to be used by srmMv.
- */
- synchronized public boolean isSRM_FILE_PINNED(TSURL surl) {
-
- return (dao.numberInSRM_SUCCESS(surl.uniqueId()) > 0);
- }
-
- /**
- * Method used to transit the specified Collection of ReducedBoLChunkData from
- * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not
- * transited. In case of any error nothing is done, but proper error messages
- * get logged by the underlaying DAO.
- */
- synchronized public void transitSRM_SUCCESStoSRM_RELEASED(
- Collection chunks, TRequestToken token) {
-
- if (chunks == null || chunks.isEmpty()) {
- return;
- }
-
- long[] primaryKeys = new long[chunks.size()];
- int index = 0;
- for (ReducedBoLChunkData chunkData : chunks) {
- if (chunkData != null) {
- primaryKeys[index] = chunkData.primaryKey();
- index++;
- }
- }
- dao.transitSRM_SUCCESStoSRM_RELEASED(primaryKeys, token);
- }
-
- /**
- * This method is intended to be used by srmRm to transit all BoL chunks on
- * the given SURL which are in the SRM_FILE_PINNED state, to SRM_ABORTED. The
- * supplied String will be used as explanation in those chunks return status.
- * The global status of the request is _not_ changed.
- *
- * The TURL of those requests will automatically be set to empty. Notice that
- * both removeAllJit(SURL) and removeVolatile(SURL) are automatically invoked
- * on PinnedFilesCatalog, to remove any entry and corresponding physical ACLs.
- *
- * Beware, that the chunks may be part of requests that have finished, or that
- * still have not finished because other chunks are being processed.
- */
- synchronized public void transitSRM_SUCCESStoSRM_ABORTED(TSURL surl,
- String explanation) {
-
- /* Currently NOT used */
- if (explanation == null) {
- explanation = "";
- }
- dao.transitSRM_SUCCESStoSRM_ABORTED(surl.uniqueId(), surl.toString(),
- explanation);
- }
-
- /**
- * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of
- * all BoL Requests whose pinLifetime has expired and the state still has not
- * been changed (a user forgot to run srmReleaseFiles)!
- */
- synchronized public void transitExpiredSRM_SUCCESS() {
-
- dao.transitExpiredSRM_SUCCESS();
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode,
- newStatusCode, explanation);
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- List surlList, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode) {
-
- int[] surlsUniqueIDs = new int[surlList.size()];
- String[] surls = new String[surlList.size()];
- int index = 0;
- for (TSURL tsurl : surlList) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode);
- }
+ private static final Logger log = LoggerFactory.getLogger(BoLChunkCatalog.class);
+
+ private final BoLChunkDAO dao;
+
+ private static BoLChunkCatalog instance;
+
+ public static synchronized BoLChunkCatalog getInstance() {
+ if (instance == null) {
+ instance = new BoLChunkCatalog();
+ }
+ return instance;
+ }
+
+ /**
+ * Private constructor that starts the internal timer needed to periodically check and transit
+ * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED.
+ */
+ private BoLChunkCatalog() {
+
+ dao = BoLChunkDAOMySql.getInstance();
+ }
+
+ /**
+ * Method that returns a Collection of BoLChunkData Objects matching the supplied TRequestToken.
+ *
+ * If any of the data associated to the TRequestToken is not well formed and so does not allow a
+ * BoLChunkData Object to be created, then that part of the request is dropped and gets logged,
+ * and the processing continues with the next part. All valid chunks get returned: the others get
+ * dropped.
+ *
+ * If there are no chunks to process then an empty Collection is returned, and a message gets
+ * logged.
+ */
+ synchronized public Collection lookup(TRequestToken rt) {
+
+ Collection chunkCollection = dao.find(rt);
+ log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection);
+ List list = new ArrayList();
+
+ if (chunkCollection.isEmpty()) {
+ log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified request: {}", rt);
+ return list;
+ }
+
+ BoLPersistentChunkData chunk;
+ for (BoLChunkDataTO chunkTO : chunkCollection) {
+ chunk = makeOne(chunkTO, rt);
+ if (chunk == null) {
+ continue;
+ }
+ list.add(chunk);
+ if (isComplete(chunkTO)) {
+ continue;
+ }
+ try {
+ dao.updateIncomplete(completeTO(chunkTO, chunk));
+ } catch (InvalidReducedBoLChunkDataAttributesException e) {
+ log.warn(
+ "BoL CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}",
+ e.getMessage());
+ }
+ }
+ log.debug("BoL CHUNK CATALOG: returning " + list);
+ return list;
+ }
+
+ /**
+ * Generates a BoLChunkData from the received BoLChunkDataTO
+ *
+ * @param auxTO
+ * @param rt
+ * @return
+ */
+ private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) {
+
+ StringBuilder errorSb = new StringBuilder();
+ TSURL fromSURL = null;
+ try {
+ fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL());
+ } catch (InvalidTSURLAttributesException e) {
+ errorSb.append(e);
+ }
+ if (auxTO.normalizedStFN() != null) {
+ fromSURL.setNormalizedStFN(auxTO.normalizedStFN());
+ }
+ if (auxTO.sulrUniqueID() != null) {
+ fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue());
+ }
+ // lifeTime
+ TLifeTimeInSeconds lifeTime = null;
+ try {
+ long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.getLifeTime());
+ // Check for max value allowed
+ long max = StormConfiguration.getInstance().getPinLifetimeMaximum();
+ if (pinLifeTime > max) {
+ log.warn("PinLifeTime is greater than the max value allowed. "
+ + "Drop the value to the max = {} seconds", max);
+ pinLifeTime = max;
+ }
+ lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS);
+ } catch (IllegalArgumentException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // dirOption
+ TDirOption dirOption = null;
+ try {
+ dirOption =
+ new TDirOption(auxTO.getDirOption(), auxTO.getAllLevelRecursive(), auxTO.getNumLevel());
+ } catch (InvalidTDirOptionAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // transferProtocols
+ TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.getProtocolList());
+ if (transferProtocols.size() == 0) {
+ errorSb
+ .append("\nEmpty list of TransferProtocols or" + " could not translate TransferProtocols!");
+ /* fail construction of BoLChunkData! */
+ transferProtocols = null;
+ }
+ // fileSize
+ TSizeInBytes fileSize = null;
+ try {
+ fileSize = TSizeInBytes.make(auxTO.getFileSize());
+ } catch (InvalidTSizeAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // status
+ TReturnStatus status = null;
+ TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus());
+ if (code == TStatusCode.EMPTY) {
+ errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.getStatus());
+ } else {
+ status = new TReturnStatus(code, auxTO.getErrString());
+ }
+ // transferURL
+ /*
+ * whatever is read is just meaningless because BoL will fill it in!!! So create an Empty TTURL
+ * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy!
+ */
+ TTURL transferURL = TTURL.makeEmpty();
+ // make BoLChunkData
+ BoLPersistentChunkData aux = null;
+ try {
+ aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption, transferProtocols,
+ fileSize, status, transferURL, auxTO.getDeferredStartTime());
+ aux.setPrimaryKey(auxTO.getPrimaryKey());
+ } catch (InvalidSurlRequestDataAttributesException e) {
+ dao.updateStatus(auxTO, SRM_FAILURE, "Request is malformed!");
+ log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL "
+ + "chunk data from persistence. Dropping chunk from request {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ }
+ // end...
+ return aux;
+ }
+
+ /**
+ *
+ * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique ID taken from the
+ * BoLChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ */
+ private void completeTO(ReducedBoLChunkDataTO chunkTO, final ReducedBoLChunkData chunk) {
+
+ chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN());
+ chunkTO.setSurlUniqueID(Integer.valueOf(chunk.fromSURL().uniqueId()));
+ }
+
+ /**
+ *
+ * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and completes it with the
+ * normalized StFN and the SURL unique ID taken from the PtGChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ * @return
+ * @throws InvalidReducedBoLChunkDataAttributesException
+ */
+ private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO,
+ final BoLPersistentChunkData chunk) throws InvalidReducedBoLChunkDataAttributesException {
+
+ ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO);
+ this.completeTO(reducedChunkTO, this.reduce(chunk));
+ return reducedChunkTO;
+ }
+
+ /**
+ * Creates a ReducedBoLChunkData from the data contained in the received BoLChunkData
+ *
+ * @param chunk
+ * @return
+ * @throws InvalidReducedBoLChunkDataAttributesException
+ */
+ private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk)
+ throws InvalidReducedBoLChunkDataAttributesException {
+
+ ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(), chunk.getStatus());
+ reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
+ return reducedChunk;
+ }
+
+ /**
+ * Creates a ReducedBoLChunkDataTO from the data contained in the received BoLChunkDataTO
+ *
+ * @param chunkTO
+ * @return
+ */
+ private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) {
+
+ ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO();
+ reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey());
+ reducedChunkTO.setFromSURL(chunkTO.getFromSURL());
+ reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
+ reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID());
+ reducedChunkTO.setStatus(chunkTO.getStatus());
+ reducedChunkTO.setErrString(chunkTO.getErrString());
+ return reducedChunkTO;
+ }
+
+ /**
+ * Checks if the received BoLChunkDataTO contains the fields not set by the front end but required
+ *
+ * @param chunkTO
+ * @return
+ */
+ private boolean isComplete(BoLChunkDataTO chunkTO) {
+
+ return (chunkTO.normalizedStFN() != null) && (chunkTO.sulrUniqueID() != null);
+ }
+
+ /**
+ * Method used to update into Persistence a retrieved BoLChunkData. In case any error occurs, the
+ * operation does not proceed but no Exception is thrown. Error messages get logged.
+ *
+ * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request
+ * pinLifetime.
+ */
+ synchronized public void update(BoLPersistentChunkData cd) {
+
+ BoLChunkDataTO to = new BoLChunkDataTO();
+ /* Primary key needed by DAO Object */
+ to.setPrimaryKey(cd.getPrimaryKey());
+ to.setFileSize(cd.getFileSize().value());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(cd.getStatus().getStatusCode()));
+ to.setErrString(cd.getStatus().getExplanation());
+ to.setLifeTime(PinLifetimeConverter.getInstance().toDB(cd.getLifeTime().value()));
+ to.setNormalizedStFN(cd.getSURL().normalizedStFN());
+ to.setSurlUniqueID(Integer.valueOf(cd.getSURL().uniqueId()));
+ dao.update(to);
+ }
+
+ /**
+ * Method used to add into Persistence a new entry. The supplied BoLChunkData gets the primary key
+ * changed to the value assigned in Persistence.
+ *
+ * This method is intended to be used by a recursive BoL request: the parent request supplies a
+ * directory which must be expanded, so all new children requests resulting from the files in the
+ * directory are added into persistence.
+ *
+ * So this method does _not_ add a new SRM prepare_to_get request into the DB!
+ *
+ * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and
+ * explanation.
+ *
+ * In case of any error the operation does not proceed, but no Exception is thrown! Proper
+ * messages get logged by underlying DAO.
+ */
+ synchronized public void addChild(BoLPersistentChunkData chunkData) {
+
+ BoLChunkDataTO to = new BoLChunkDataTO();
+ // needed for now to find ID of request! Must be changed soon!
+ to.setRequestToken(chunkData.getRequestToken().toString());
+ to.setFromSURL(chunkData.getSURL().toString());
+ to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
+ to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId()));
+
+ to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
+ to.setDirOption(chunkData.getDirOption().isDirectory());
+ to.setNumLevel(chunkData.getDirOption().getNumLevel());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode()));
+ to.setErrString(chunkData.getStatus().getExplanation());
+ to.setDeferredStartTime(chunkData.getDeferredStartTime());
+
+ /* add the entry and update the Primary Key field */
+ dao.addChild(to);
+ chunkData.setPrimaryKey(to.getPrimaryKey());
+ }
+
+ public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode,
+ TStatusCode newStatusCode, String explanation) {
+
+ dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation);
+ }
}
diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java
deleted file mode 100644
index 2c4e45f48..000000000
--- a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java
+++ /dev/null
@@ -1,1688 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.config.Configuration;
-import it.grid.storm.ea.StormEA;
-import it.grid.storm.namespace.NamespaceDirector;
-import it.grid.storm.namespace.NamespaceException;
-import it.grid.storm.namespace.StoRI;
-import it.grid.storm.namespace.naming.SURL;
-import it.grid.storm.srm.types.InvalidTSURLAttributesException;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TRequestType;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TStatusCode;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.SQLWarning;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * DAO class for BoLChunkCatalog. This DAO is specifically designed to connect
- * to a MySQL DB. The raw data found in those tables is pre-treated in order to
- * turn it into the Object Model of StoRM. See Method comments for further info.
- * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the
- * object model.
- *
- * @author CNAF
- * @version 1.0
- * @date Aug 2009
- */
-public class BoLChunkDAO {
-
- private static final Logger log = LoggerFactory.getLogger(BoLChunkDAO.class);
-
- /** String with the name of the class for the DB driver */
- private final String driver = Configuration.getInstance().getDBDriver();
- /** String referring to the URL of the DB */
- private final String url = Configuration.getInstance().getStormDbURL();
- /** String with the password for the DB */
- private final String password = Configuration.getInstance().getDBPassword();
- /** String with the name for the DB */
- private final String name = Configuration.getInstance().getDBUserName();
- /** Connection to DB - WARNING!!! It is kept open all the time! */
- private Connection con = null;
- private final static BoLChunkDAO dao = new BoLChunkDAO();
-
- /**
- * timer thread that will run a taask to alert when reconnecting is necessary!
- */
- private Timer clock = null;
- /**
- * timer task that will update the boolean signaling that a reconnection is
- * needed!
- */
- private TimerTask clockTask = null;
- /** milliseconds that must pass before reconnecting to DB */
- private final long period = Configuration.getInstance()
- .getDBReconnectPeriod() * 1000;
- /** initial delay in milliseconds before starting timer */
- private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000;
- /** boolean that tells whether reconnection is needed because of MySQL bug! */
- private boolean reconnect = false;
-
- private BoLChunkDAO() {
-
- setUpConnection();
-
- clock = new Timer();
- clockTask = new TimerTask() {
-
- @Override
- public void run() {
-
- reconnect = true;
- }
- }; // clock task
- clock.scheduleAtFixedRate(clockTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of the BoLChunkDAO.
- */
- public static BoLChunkDAO getInstance() {
-
- return dao;
- }
-
- /**
- * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets
- * its primaryKey changed to the one assigned by the DB. The supplied
- * BoLChunkData is used to fill in only the DB table where file specific info
- * gets recorded: it does _not_ add a new request! So if spurious data is
- * supplied, it will just stay there because of a lack of a parent request!
- */
- public synchronized void addChild(BoLChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: addChild - unable to get a valid connection!");
- return;
- }
- String str = null;
- PreparedStatement id = null; // statement to find out the ID associated to
- // the request token
- ResultSet rsid = null; // result set containing the ID of the request.
- // insertion
- try {
-
- /* WARNING!!!! We are forced to run a query to get the ID of the request,
- * which should NOT be so because the corresponding request object should
- * have been changed with the extra field! However, it is not possible
- * at the moment to perform such change because of strict deadline and
- * the change could wreak havoc the code. So we are forced to make this
- * query!!!
- */
-
- // begin transaction
- con.setAutoCommit(false);
- logWarnings(con.getWarnings());
-
- // find ID of request corresponding to given RequestToken
- str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?";
-
- id = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- id.setString(1, to.getRequestToken());
- logWarnings(id.getWarnings());
-
- log.debug("BoL CHUNK DAO: addChild; {}", id.toString());
- rsid = id.executeQuery();
- logWarnings(id.getWarnings());
-
- /* ID of request in request_process! */
- int request_id = extractID(rsid);
- int id_s = fillBoLTables(to, request_id);
-
- // end transaction!
- con.commit();
- logWarnings(con.getWarnings());
- con.setAutoCommit(true);
- logWarnings(con.getWarnings());
-
- // update primary key reading the generated key
- to.setPrimaryKey(id_s);
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; "
- + "exception received: {}", to, e.getMessage(), e);
- rollback(con);
- } catch (Exception e) {
- log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; "
- + "exception received: {}", to, e.getMessage(), e);
- rollback(con);
- } finally {
- close(rsid);
- close(id);
- }
- }
-
- /**
- * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets
- * its primaryKey changed to the one assigned by the DB. The client_dn must
- * also be supplied as a String. The supplied BoLChunkData is used to fill in
- * all the DB tables where file specific info gets recorded: it _adds_ a new
- * request!
- */
- public synchronized void addNew(BoLChunkDataTO to, String client_dn) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: addNew - unable to get a valid connection!");
- return;
- }
- String str = null;
- /* Result set containing the ID of the inserted new request */
- ResultSet rs_new = null;
- /* Insert new request into process_request */
- PreparedStatement addNew = null;
- /* Insert protocols for request. */
- PreparedStatement addProtocols = null; // insert protocols for request.
- try {
- // begin transaction
- con.setAutoCommit(false);
- logWarnings(con.getWarnings());
-
- // add to request_queue...
- str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp,deferredStartTime) VALUES (?,?,?,?,?,?,?,?,?)";
- addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- logWarnings(con.getWarnings());
- /* request type set to bring online */
- addNew.setString(1,
- RequestTypeConverter.getInstance().toDB(TRequestType.BRING_ON_LINE));
- logWarnings(addNew.getWarnings());
-
- addNew.setString(2, client_dn);
- logWarnings(addNew.getWarnings());
-
- addNew.setInt(3, to.getLifeTime());
- logWarnings(addNew.getWarnings());
-
- addNew.setInt(
- 4,
- StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_INPROGRESS));
- logWarnings(addNew.getWarnings());
-
- addNew.setString(5, "New BoL Request resulting from srmCopy invocation.");
- logWarnings(addNew.getWarnings());
-
- addNew.setString(6, to.getRequestToken());
- logWarnings(addNew.getWarnings());
-
- addNew.setInt(7, 1); // number of requested files set to 1!
- logWarnings(addNew.getWarnings());
-
- addNew.setTimestamp(8, new Timestamp(new Date().getTime()));
- logWarnings(addNew.getWarnings());
-
- addNew.setInt(9, to.getDeferredStartTime());
- logWarnings(addNew.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; {}", addNew.toString());
- addNew.execute();
- logWarnings(addNew.getWarnings());
-
- rs_new = addNew.getGeneratedKeys();
- int id_new = extractID(rs_new);
-
- // add protocols...
- str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)";
- addProtocols = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- for (Iterator i = to.getProtocolList().iterator(); i.hasNext();) {
- addProtocols.setInt(1, id_new);
- logWarnings(addProtocols.getWarnings());
-
- addProtocols.setString(2, i.next());
- logWarnings(addProtocols.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; {}", addProtocols.toString());
- addProtocols.execute();
- logWarnings(addProtocols.getWarnings());
- }
-
- // addChild...
- int id_s = fillBoLTables(to, id_new);
-
- // end transaction!
- con.commit();
- logWarnings(con.getWarnings());
- con.setAutoCommit(true);
- logWarnings(con.getWarnings());
-
- // update primary key reading the generated key
- to.setPrimaryKey(id_s);
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: Rolling back! Unable to complete addNew! "
- + "BoLChunkDataTO: {}; exception received: {}", to, e.getMessage(), e);
- rollback(con);
- } catch (Exception e) {
- log.error("BoL CHUNK DAO: unable to complete addNew! BoLChunkDataTO: {}; "
- + "exception received: {}", to, e.getMessage(), e);
- rollback(con);
- } finally {
- close(rs_new);
- close(addNew);
- close(addProtocols);
- }
- }
-
- /**
- * To be used inside a transaction
- *
- * @param to
- * @param requestQueueID
- * @return
- * @throws SQLException
- * @throws Exception
- */
- private synchronized int fillBoLTables(BoLChunkDataTO to, int requestQueueID)
- throws SQLException, Exception {
-
- String str = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_do = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_b = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_s = null;
- /* insert TDirOption for request */
- PreparedStatement addDirOption = null;
- /* insert request_Bol for request */
- PreparedStatement addBoL = null;
- PreparedStatement addChild = null;
-
- try {
- // first fill in TDirOption
- str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)";
- addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- logWarnings(con.getWarnings());
- addDirOption.setBoolean(1, to.getDirOption());
- logWarnings(addDirOption.getWarnings());
-
- addDirOption.setBoolean(2, to.getAllLevelRecursive());
- logWarnings(addDirOption.getWarnings());
-
- addDirOption.setInt(3, to.getNumLevel());
- logWarnings(addDirOption.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; {}", addDirOption.toString());
- addDirOption.execute();
- logWarnings(addDirOption.getWarnings());
-
- rs_do = addDirOption.getGeneratedKeys();
- int id_do = extractID(rs_do);
-
- // second fill in request_BoL... sourceSURL and TDirOption!
- str = "INSERT INTO request_BoL (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)";
- addBoL = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- logWarnings(con.getWarnings());
- addBoL.setInt(1, id_do);
- logWarnings(addBoL.getWarnings());
-
- addBoL.setInt(2, requestQueueID);
- logWarnings(addBoL.getWarnings());
-
- addBoL.setString(3, to.getFromSURL());
- logWarnings(addBoL.getWarnings());
-
- addBoL.setString(4, to.normalizedStFN());
- logWarnings(addBoL.getWarnings());
-
- addBoL.setInt(5, to.sulrUniqueID());
- logWarnings(addBoL.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; {}", addBoL.toString());
- addBoL.execute();
- logWarnings(addBoL.getWarnings());
-
- rs_b = addBoL.getGeneratedKeys();
- int id_g = extractID(rs_b);
-
- // third fill in status_BoL...
- str = "INSERT INTO status_BoL (request_BoLID,statusCode,explanation) VALUES (?,?,?)";
- addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- logWarnings(con.getWarnings());
- addChild.setInt(1, id_g);
- logWarnings(addChild.getWarnings());
-
- addChild.setInt(2, to.getStatus());
- logWarnings(addChild.getWarnings());
-
- addChild.setString(3, to.getErrString());
- logWarnings(addChild.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; " + addChild.toString());
- addChild.execute();
- logWarnings(addChild.getWarnings());
-
- return id_g;
- } finally {
- close(rs_do);
- close(rs_b);
- close(rs_s);
- close(addDirOption);
- close(addBoL);
- close(addChild);
- }
- }
-
- /**
- * Method used to save the changes made to a retrieved BoLChunkDataTO, back
- * into the MySQL DB. Only the fileSize, statusCode and explanation, of
- * status_BoL table are written to the DB. Likewise for the request
- * pinLifetime. In case of any error, an error message gets logged but no
- * exception is thrown.
- */
- public synchronized void update(BoLChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: update - unable to get a valid connection!");
- return;
- }
- PreparedStatement updateFileReq = null;
- try {
- // ready updateFileReq...
- updateFileReq = con
- .prepareStatement("UPDATE request_queue rq JOIN (status_BoL sb, request_BoL rb) ON (rq.ID=rb.request_queueID AND sb.request_BoLID=rb.ID)"
- + " SET sb.fileSize=?, sb.statusCode=?, sb.explanation=?, rq.pinLifetime=?, rb.normalized_sourceSURL_StFN=?, rb.sourceSURL_uniqueID=?"
- + " WHERE rb.ID=?");
- logWarnings(con.getWarnings());
- updateFileReq.setLong(1, to.getFileSize());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(2, to.getStatus());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(3, to.getErrString());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(4, to.getLifeTime());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(5, to.normalizedStFN());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(6, to.sulrUniqueID());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setLong(7, to.getPrimaryKey());
- logWarnings(updateFileReq.getWarnings());
- // execute update
- log.trace("BoL CHUNK DAO: update method; {}", updateFileReq.toString());
- updateFileReq.executeUpdate();
- logWarnings(updateFileReq.getWarnings());
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: Unable to complete update! {}", e.getMessage(), e);
- } finally {
- close(updateFileReq);
- }
- }
-
- /**
- * Updates the request_Bol represented by the received ReducedBoLChunkDataTO
- * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID
- *
- * @param chunkTO
- */
- public synchronized void updateIncomplete(ReducedBoLChunkDataTO chunkTO) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: updateIncomplete - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_BoL SET normalized_sourceSURL_StFN=?, "
- + "sourceSURL_uniqueID=? WHERE ID=?";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- stmt.setString(1, chunkTO.normalizedStFN());
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(2, chunkTO.surlUniqueID());
- logWarnings(stmt.getWarnings());
-
- stmt.setLong(3, chunkTO.primaryKey());
- logWarnings(stmt.getWarnings());
-
- log.trace("BoL CHUNK DAO - update incomplete: {}", stmt.toString());
- stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: Unable to complete update incomplete! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * TODO WARNING! THIS IS A WORK IN PROGRESS!!! Method used to refresh the
- * BoLChunkDataTO information from the MySQL DB. In this first version, only
- * the statusCode is reloaded from the DB. TODO The next version must contains
- * all the information related to the Chunk! In case of any error, an error
- * message gets logged but no exception is thrown.
- */
- public synchronized BoLChunkDataTO refresh(long primary_key) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: refresh - unable to get a valid connection!");
- return null;
- }
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
-
- try {
- // get chunks of the request
- str = "SELECT statusCode " + "FROM status_BoL "
- + "WHERE request_BoLID=?";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- find.setLong(1, primary_key);
-
- logWarnings(find.getWarnings());
- log.trace("BoL CHUNK DAO: refresh status method; " + find.toString());
-
- rs = find.executeQuery();
-
- logWarnings(find.getWarnings());
- BoLChunkDataTO aux = null;
- while (rs.next()) {
- aux = new BoLChunkDataTO();
- aux.setStatus(rs.getInt("statusCode"));
- }
- return aux;
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: {}", e.getMessage(), e);
- return null;
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that queries the MySQL DB to find all entries matching the supplied
- * TRequestToken. The Collection contains the corresponding BoLChunkDataTO
- * objects. An initial simple query establishes the list of protocols
- * associated with the request. A second complex query establishes all chunks
- * associated with the request, by properly joining request_queue,
- * request_BoL, status_BoL and request_DirOption. The considered fields are:
- * (1) From status_BoL: the ID field which becomes the TOs primary key, and
- * statusCode. (2) From request_BoL: sourceSURL (3) From request_queue:
- * pinLifetime (4) From request_DirOption: isSourceADirectory,
- * alLevelRecursive, numOfLevels In case of any error, a log gets written and
- * an empty collection is returned. No exception is thrown. NOTE! Chunks in
- * SRM_ABORTED status are NOT returned!
- */
- public synchronized Collection find(TRequestToken requestToken) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- str = "SELECT tp.config_ProtocolsID "
- + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID "
- + "WHERE rq.r_token=?";
-
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList protocols = new ArrayList();
- find.setString(1, strToken);
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO: find method; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
- while (rs.next()) {
- protocols.add(rs.getString("tp.config_ProtocolsID"));
- }
- close(rs);
- close(find);
-
- // get chunks of the request
- str = "SELECT sb.statusCode, rq.timeStamp, rq.pinLifetime, rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID "
- + "WHERE rq.r_token=? AND sb.statusCode<>?";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- ArrayList list = new ArrayList();
- find.setString(1, strToken);
- logWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED));
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO: find method; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- BoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new BoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime"));
- chunkDataTO.setRequestToken(strToken);
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
-
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory"));
- chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive"));
- chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels"));
- chunkDataTO.setProtocolList(protocols);
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BOL CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkDataTO associated to the
- * given TRequestToken expressed as String.
- */
- public synchronized Collection findReduced(
- String reqtoken) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- // get reduced chunks
- String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "WHERE rq.r_token=?";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, reqtoken);
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO! findReduced with request token; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- ReducedBoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedBoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BOL CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkDataTO associated to the
- * given griduser, and whose SURLs are contained in the supplied array of
- * Strings.
- */
- public synchronized Collection findReduced(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /*
- * NOTE: we search also on the fromSurl because otherwise we lost all
- * request_Bol that have not the uniqueID set because are not yet been
- * used by anybody
- */
- // get reduced chunks
- String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "WHERE rq.r_token=? AND ( rb.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs)
- + " AND rb.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, requestToken.getValue());
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- ReducedBoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedBoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: {}", e.getMessage(), e);
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkDataTO associated to the
- * given griduser, and whose SURLs are contained in the supplied array of
- * Strings.
- */
- public synchronized Collection findReduced(
- String griduser, int[] surlUniqueIDs, String[] surls) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /*
- * NOTE: we search also on the fromSurl because otherwise we lost all
- * request_Bol that have not the uniqueID set because are not yet been
- * used by anybody
- */
- // get reduced chunks
- String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "WHERE rq.client_dn=? AND ( rb.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs)
- + " AND rb.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, griduser);
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- ReducedBoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedBoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: {}", e.getMessage(), e);
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns the number of BoL requests on the given SURL, that are
- * in SRM_SUCCESS state. This method is intended to be used by BoLChunkCatalog
- * in the isSRM_SUCCESS method invocation. In case of any error, 0 is
- * returned.
- */
- public synchronized int numberInSRM_SUCCESS(int surlUniqueID) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: numberInSRM_SUCCESS - unable to get a valid connection!");
- return 0;
- }
- String str = "SELECT COUNT(rb.ID) "
- + "FROM status_BoL sb JOIN request_BoL rb "
- + "ON (sb.request_BoLID=rb.ID) "
- + "WHERE rb.sourceSURL_uniqueID=? AND sb.statusCode=?";
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- /* Prepared statement spares DB-specific String notation! */
- find.setInt(1, surlUniqueID);
- logWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO - numberInSRM_SUCCESS method: {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- int numberFileSuccessful = 0;
- if (rs.next()) {
- numberFileSuccessful = rs.getInt(1);
- }
- return numberFileSuccessful;
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Unable to determine numberInSRM_SUCCESS! "
- + "Returning 0! ", e.getMessage(), e);
- return 0;
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method used in extraordinary situations to signal that data retrieved from
- * the DB was malformed and could not be translated into the StoRM object
- * model. This method attempts to change the status of the request to
- * SRM_FAILURE and record it in the DB. This operation could potentially fail
- * because the source of the malformed problems could be a problematic DB;
- * indeed, initially only log messages where recorded. Yet it soon became
- * clear that the source of malformed data were the clients and/or FE
- * recording info in the DB. In these circumstances the client would see its
- * request as being in the SRM_IN_PROGRESS state for ever. Hence the pressing
- * need to inform it of the encountered problems.
- */
- public synchronized void signalMalformedBoLChunk(BoLChunkDataTO auxTO) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: signalMalformedBoLChunk - unable to get a valid connection!");
- return;
- }
- String signalSQL = "UPDATE status_BoL SET statusCode="
- + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE)
- + ", explanation=? WHERE request_BoLID=" + auxTO.getPrimaryKey();
- PreparedStatement signal = null;
- try {
- signal = con.prepareStatement(signalSQL);
- logWarnings(con.getWarnings());
- /* Prepared statement spares DB-specific String notation! */
- signal.setString(1, "Request is malformed!");
- logWarnings(signal.getWarnings());
-
- log.trace("BoL CHUNK DAO: signalMalformed; {}", signal.toString());
- signal.executeUpdate();
- logWarnings(signal.getWarnings());
- } catch (SQLException e) {
- log.error("BoLChunkDAO! Unable to signal in DB that the request was "
- + "malformed! Request: {}; Exception: {}", auxTO.toString(),
- e.toString(), e);
- } finally {
- close(signal);
- }
- }
-
- /**
- * Method that updates all expired requests in SRM_SUCCESS state, into
- * SRM_RELEASED. This is needed when the client forgets to invoke
- * srmReleaseFiles().
- *
- * @return
- */
- public synchronized List transitExpiredSRM_SUCCESS() {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: transitExpiredSRM_SUCCESS - unable to get a valid connection!");
- return new ArrayList();
- }
-
- HashMap expiredSurlMap = new HashMap();
- String str = null;
- PreparedStatement prepStatement = null;
-
- /* Find all expired surls */
- try {
- // start transaction
- con.setAutoCommit(false);
-
- str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM "
- + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "WHERE sb.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime ";
-
- prepStatement = con.prepareStatement(str);
- prepStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
-
- ResultSet res = prepStatement.executeQuery();
- logWarnings(prepStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rb.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("BoLChunkDAO! unable to build the TSURL from {}: "
- + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage());
- }
- }
- expiredSurlMap.put(sourceSURL, uniqueID);
- }
-
- if (expiredSurlMap.isEmpty()) {
- commit(con);
- log.trace("BoLChunkDAO! No chunk of BoL request was transited from "
- + "SRM_SUCCESS to SRM_RELEASED.");
- return new ArrayList();
- }
- } catch (SQLException e) {
- log.error("BoLChunkDAO! SQLException.", e.getMessage(), e);
- rollback(con);
- return new ArrayList();
- } finally {
- close(prepStatement);
- }
-
- /* Update status of all successful surls to SRM_RELEASED */
-
- prepStatement = null;
- try {
-
- str = "UPDATE "
- + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "SET sb.statusCode=? "
- + "WHERE sb.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime ";
-
- prepStatement = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- prepStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- logWarnings(prepStatement.getWarnings());
-
- prepStatement.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(prepStatement.getWarnings());
-
- log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}",
- prepStatement.toString());
-
- int count = prepStatement.executeUpdate();
- logWarnings(prepStatement.getWarnings());
-
- if (count == 0) {
- log.trace("BoLChunkDAO! No chunk of BoL request was"
- + " transited from SRM_SUCCESS to SRM_RELEASED.");
- } else {
- log.info("BoLChunkDAO! {} chunks of BoL requests were transited from "
- + "SRM_SUCCESS to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("BoLChunkDAO! Unable to transit expired SRM_SUCCESS chunks of "
- + "BoL requests, to SRM_RELEASED! ", e.getMessage(), e);
- rollback(con);
- return new ArrayList();
- } finally {
- close(prepStatement);
- }
-
- /*
- * in order to enhance performance here we can check if there is any file
- * system with tape (T1D0, T1D1), if there is not any we can skip the
- * following
- */
-
- /* Find all not expired surls from PtG */
-
- HashSet pinnedSurlSet = new HashSet();
- try {
- // SURLs pinned by BoLs
- str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM "
- + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "WHERE sb.statusCode="
- + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime ";
-
- ResultSet res = null;
-
- prepStatement = con.prepareStatement(str);
- res = prepStatement.executeQuery();
- logWarnings(prepStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rb.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("BoLChunkDAO! unable to build the TSURL from {}: "
- + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage());
- }
- }
- pinnedSurlSet.add(uniqueID);
- }
-
- close(prepStatement);
-
- str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM "
- + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "WHERE sg.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime ";
-
- prepStatement = con.prepareStatement(str);
-
- prepStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
-
- res = prepStatement.executeQuery();
- logWarnings(prepStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rg.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("BoLChunkDAO! unable to build the TSURL from {}: "
- + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage());
- }
- }
- pinnedSurlSet.add(uniqueID);
- }
-
- commit(con);
-
- } catch (SQLException e) {
- log.error("BoLChunkDAO! SQLException. {}", e.getMessage(), e);
- rollback(con);
- } finally {
- close(prepStatement);
- }
-
- /* Remove the Extended Attribute pinned if there is not a valid surl on it */
- ArrayList expiredSurlList = new ArrayList();
- TSURL surl;
- for (Entry surlEntry : expiredSurlMap.entrySet()) {
- if (!pinnedSurlSet.contains(surlEntry.getValue())) {
- try {
- surl = TSURL.makeFromStringValidate(surlEntry.getKey());
- } catch (InvalidTSURLAttributesException e) {
- log.error("Invalid SURL, cannot release the pin "
- + "(Extended Attribute): {}", surlEntry.getKey());
- continue;
- }
- expiredSurlList.add(surl);
- StoRI stori;
- try {
- stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl);
- } catch (Throwable e) {
- log.error("Invalid SURL {} cannot release the pin. {}: {}",
- surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage());
- continue;
- }
-
- if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) {
- StormEA.removePinned(stori.getAbsolutePath());
- }
- }
- }
- return expiredSurlList;
- }
-
- /**
- * Method that transits chunks in SRM_SUCCESS to SRM_ABORTED, for the given
- * SURL: the overall request status of the requests containing that chunk, is
- * not changed! The TURL is set to null. Beware, that the chunks may be part
- * of requests that have finished, or that still have not finished because
- * other chunks are still being processed.
- */
- public synchronized void transitSRM_SUCCESStoSRM_ABORTED(int surlUniqueID,
- String surl, String explanation) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_ABORTED - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE "
- + "status_BoL sb JOIN request_BoL rb ON sb.request_BoLID=rb.ID "
- + "SET sb.statusCode=?, sb.explanation=?, sb.transferURL=NULL "
- + "WHERE sb.statusCode=? AND (rb.sourceSURL_uniqueID=? OR rb.targetSURL=?)";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED));
- logWarnings(stmt.getWarnings());
-
- stmt.setString(2, explanation);
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(3,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(4, surlUniqueID);
- logWarnings(stmt.getWarnings());
-
- stmt.setString(5, surl);
- logWarnings(stmt.getWarnings());
-
- log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_ABORTED: {}", stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count > 0) {
- log.info("BoL CHUNK DAO! {} chunks were transited from SRM_SUCCESS "
- + "to SRM_ABORTED.", count);
- } else {
- log.trace("BoL CHUNK DAO! No chunks were transited from SRM_SUCCESS "
- + "to SRM_ABORTED.");
- }
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Unable to transitSRM_SUCCESStoSRM_ABORTED! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Method that updates all chunks in SRM_SUCCESS state, into SRM_RELEASED. An
- * array of long representing the primary key of each chunk is required: only
- * they get the status changed provided their current status is SRM_SUCCESS.
- * This method is used during srmReleaseFiles In case of any error nothing
- * happens and no exception is thrown, but proper messages get logged.
- */
- public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE status_BoL SET statusCode=? "
- + "WHERE statusCode=? AND request_BoLID IN " + makeWhereString(ids);
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(stmt.getWarnings());
-
- log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}",
- stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("BoL CHUNK DAO! No chunk of BoL request "
- + "was transited from SRM_SUCCESS to SRM_RELEASED.");
- } else {
- log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited "
- + "from SRM_SUCCESS to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Unable to transit chunks from SRM_SUCCESS "
- + "to SRM_RELEASED! {}", e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids,
- TRequestToken token) {
-
- if (token == null) {
- transitSRM_SUCCESStoSRM_RELEASED(ids);
- } else {
- /*
- * If a request token has been specified, only the related BoL requests
- * have to be released. This is done adding the r.r_token="..." clause in
- * the where subquery.
- */
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE "
- + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "SET sb.statusCode=? " + "WHERE sb.statusCode=? AND rq.r_token='"
- + token.toString() + "' AND rb.ID IN " + makeWhereString(ids);
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(stmt.getWarnings());
-
- log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}",
- stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("BoL CHUNK DAO! No chunk of BoL request was "
- + "transited from SRM_SUCCESS to SRM_RELEASED.");
- } else {
- log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited "
- + "from SRM_SUCCESS to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Unable to transit chunks "
- + "from SRM_SUCCESS to SRM_RELEASED! {}", e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
- }
-
- /**
- * Auxiliary method used to close a ResultSet
- */
- private void close(ResultSet rset) {
-
- if (rset != null) {
- try {
- rset.close();
- } catch (Exception e) {
- log.error("BoL CHUNK DAO! Unable to close ResultSet! Exception: " + e);
- }
- }
- }
-
- /**
- * Auxiliary method used to close a Statement
- */
- private void close(Statement stmt) {
-
- if (stmt != null) {
- try {
- stmt.close();
- } catch (Exception e) {
- log.error("BoL CHUNK DAO! Unable to close Statement {} - Exception: {}",
- stmt.toString(), e.getMessage(), e);
- }
- }
- }
-
- private void commit(Connection con) {
-
- if (con != null) {
- try {
- con.commit();
- con.setAutoCommit(true);
- } catch (SQLException e) {
- log.error("BoL, SQL EXception {}", e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary method used to roll back a failed transaction
- */
- private void rollback(Connection con) {
-
- if (con != null) {
- try {
- con.rollback();
- con.setAutoCommit(true);
- log.error("BoL CHUNK DAO: roll back successful!");
- } catch (SQLException e2) {
- log.error("BoL CHUNK DAO: roll back failed! {}", e2.getMessage(), e2);
- }
- }
- }
-
- /**
- * Private method that returns the generated ID: it throws an exception in
- * case of any problem!
- */
- private int extractID(ResultSet rs) throws Exception {
-
- if (rs == null) {
- throw new Exception("BoL CHUNK DAO! Null ResultSet!");
- }
- if (rs.next()) {
- return rs.getInt(1);
- }
- log.error("BoL CHUNK DAO! It was not possible to establish "
- + "the assigned autoincrement primary key!");
- throw new Exception(
- "BoL CHUNK DAO! It was not possible to establish the assigned autoincrement primary key!");
- }
-
- /**
- * Auxiliary private method that logs all SQL warnings.
- */
- private void logWarnings(SQLWarning w) {
-
- if (w != null) {
- log.debug("BoL CHUNK DAO: {}", w.toString());
- while ((w = w.getNextWarning()) != null) {
- log.debug("BoL CHUNK DAO: {}", w.toString());
- }
- }
- }
-
- /**
- * Method that returns a String containing all IDs.
- */
- private String makeWhereString(long[] rowids) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = rowids.length;
- for (int i = 0; i < n; i++) {
- sb.append(rowids[i]);
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surl's IDs.
- */
- private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) {
-
- StringBuilder sb = new StringBuilder("(");
- for (int i = 0; i < surlUniqueIDs.length; i++) {
- if (i > 0) {
- sb.append(",");
- }
- sb.append(surlUniqueIDs[i]);
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surls.
- */
- private String makeSurlString(String[] surls) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = surls.length;
-
- for (int i = 0; i < n; i++) {
-
- SURL requestedSURL;
-
- try {
- requestedSURL = SURL.makeSURLfromString(surls[i]);
- } catch (NamespaceException e) {
- log.error(e.getMessage());
- log.debug("Skip '{}' during query creation", surls[i]);
- continue;
- }
-
- sb.append("'");
- sb.append(requestedSURL.getNormalFormAsString());
- sb.append("','");
- sb.append(requestedSURL.getQueryFormAsString());
- sb.append("'");
-
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
-
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Auxiliary method that sets up the connection to the DB, as well as the
- * prepared statement.
- */
- private boolean setUpConnection() {
-
- boolean response = false;
- try {
- Class.forName(driver);
- con = DriverManager.getConnection(url, name, password);
- logWarnings(con.getWarnings());
- response = con.isValid(0);
- } catch (ClassNotFoundException | SQLException e) {
- log.error("BoL CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e);
- }
- return response;
- }
-
- /**
- * Auxiliary method that checks if time for resetting the connection has come,
- * and eventually takes it down and up back again.
- */
- private boolean checkConnection() {
-
- boolean response = true;
- if (reconnect) {
- log.debug("BoL CHUNK DAO! Reconnecting to DB! ");
- takeDownConnection();
- response = setUpConnection();
- if (response) {
- reconnect = false;
- }
- }
- return response;
- }
-
- /**
- * Auxiliary method that tales down a connection to the DB.
- */
- private void takeDownConnection() {
-
- if (con != null) {
- try {
- con.close();
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Exception in takeDownConnection method: {}",
- e.getMessage(), e);
- }
- }
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode, String explanation) {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || explanation == null) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken + " explanation="
- + explanation);
- }
- doUpdateStatusOnMatchingStatus(requestToken, null, null,
- expectedStatusCode, newStatusCode, explanation, true, false, true);
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode)
- throws IllegalArgumentException {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0
- || surls.length == 0 || surlsUniqueIDs.length != surls.length) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken
- + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls);
- }
- doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode, null, true, true, false);
- }
-
- public synchronized void doUpdateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation, boolean withRequestToken, boolean withSurls,
- boolean withExplanation) throws IllegalArgumentException {
-
- if ((withRequestToken && requestToken == null)
- || (withExplanation && explanation == null)
- || (withSurls && (surlUniqueIDs == null || surls == null))) {
- throw new IllegalArgumentException(
- "Unable to perform the doUpdateStatusOnMatchingStatus, "
- + "invalid arguments: withRequestToken=" + withRequestToken
- + " requestToken=" + requestToken + " withSurls=" + withSurls
- + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls
- + " withExplaination=" + withExplanation + " explanation="
- + explanation);
- }
- if (!checkConnection()) {
- log
- .error("BOL CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE status_BoL sb JOIN (request_BoL rb, request_queue rq) "
- + "ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "SET sb.statusCode=? ";
- if (withExplanation) {
- str += " , " + buildExpainationSet(explanation);
- }
- str += " WHERE sb.statusCode=? ";
- if (withRequestToken) {
- str += " AND " + buildTokenWhereClause(requestToken);
- }
- if (withSurls) {
- str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls);
- }
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode));
- logWarnings(stmt.getWarnings());
-
- stmt
- .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode));
- logWarnings(stmt.getWarnings());
-
- log.trace("BOL CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("BOL CHUNK DAO! No chunk of BOL request was updated from {} "
- + "to {}.", expectedStatusCode, newStatusCode);
- } else {
- log.debug("BOL CHUNK DAO! {} chunks of BOL requests were updated "
- + "from {} to {}.", count, expectedStatusCode, newStatusCode);
- }
- } catch (SQLException e) {
- log.error("BOL CHUNK DAO! Unable to updated from {} to {}!",
- expectedStatusCode, newStatusCode, e);
- } finally {
- close(stmt);
- }
- }
-
- public Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0 || dn == null) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " dn=" + dn);
- }
- return find(surlsUniqueIDs, surlsArray, dn, true);
- }
-
- public Collection find(int[] surlsUniqueIDs,
- String[] surlsArray) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray);
- }
- return find(surlsUniqueIDs, surlsArray, null, false);
- }
-
- private synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn, boolean withDn)
- throws IllegalArgumentException {
-
- if ((withDn && dn == null) || surlsUniqueIDs == null
- || surlsUniqueIDs.length == 0 || surlsArray == null
- || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn);
- }
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- // get chunks of the request
- String str = "SELECT rq.ID, rq.r_token, sb.statusCode, rq.timeStamp, rq.pinLifetime, "
- + "rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, "
- + "rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID "
- + "WHERE ( rb.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs)
- + " AND rb.sourceSURL IN "
- + makeSurlString(surlsArray) + " )";
- if (withDn) {
- str += " AND rq.client_dn=\'" + dn + "\'";
- }
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- List list = new ArrayList();
-
- log.trace("BOL CHUNK DAO - find method: {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
- BoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
-
- chunkDataTO = new BoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime"));
- chunkDataTO.setRequestToken(rs.getString("rq.r_token"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
-
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory"));
- chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive"));
- chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels"));
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BOL CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- private String buildExpainationSet(String explanation) {
-
- return " sb.explanation='" + explanation + "' ";
- }
-
- private String buildTokenWhereClause(TRequestToken requestToken) {
-
- return " rq.r_token='" + requestToken.toString() + "' ";
- }
-
- private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) {
-
- return " ( rb.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rb.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java
deleted file mode 100644
index 4600758d5..000000000
--- a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.namespace.model.Protocol;
-import it.grid.storm.srm.types.TStatusCode;
-
-import java.sql.Timestamp;
-import java.util.List;
-
-/**
- * Class that represents a row in the Persistence Layer: this is all raw data
- * referring to the BoLChunkData proper, that is, String and primitive types.
- *
- * Each field is initialized with default values as per SRM 2.2 specification:
- * protocolList GSIFTP dirOption false status SRM_REQUEST_QUEUED
- *
- * All other fields are 0 if int, or a white space if String.
- *
- * @author CNAF
- * @version 1.0
- * @date Aug 2009
- */
-public class BoLChunkDataTO {
-
- /* Database table request_Bol fields BEGIN */
- private long primaryKey = -1; // ID primary key of record in DB
- private String fromSURL = " ";
- private boolean dirOption; // initialised in constructor
- private String normalizedStFN = null;
- private Integer surlUniqueID = null;
- /* Database table request_Get fields END */
-
- private String requestToken = " ";
- private int lifetime = 0;
- private boolean allLevelRecursive; // initialised in constructor
- private int numLevel; // initialised in constructor
- private List protocolList = null; // initialised in constructor
- private long filesize = 0;
- private int status; // initialised in constructor
- private String errString = " ";
- private int deferredStartTime = -1;
- private Timestamp timeStamp = null;
-
- public BoLChunkDataTO() {
-
- TURLPrefix protocolPreferences = new TURLPrefix();
- protocolPreferences.addProtocol(Protocol.GSIFTP);
- this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences);
- this.status = StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_QUEUED);
- this.dirOption = false;
- this.allLevelRecursive = false;
- this.numLevel = 0;
- }
-
- public boolean getAllLevelRecursive() {
-
- return allLevelRecursive;
- }
-
- public int getDeferredStartTime() {
-
- return deferredStartTime;
- }
-
- public boolean getDirOption() {
-
- return dirOption;
- }
-
- public String getErrString() {
-
- return errString;
- }
-
- public long getFileSize() {
-
- return filesize;
- }
-
- public String getFromSURL() {
-
- return fromSURL;
- }
-
- public int getLifeTime() {
-
- return lifetime;
- }
-
- public int getNumLevel() {
-
- return numLevel;
- }
-
- public long getPrimaryKey() {
-
- return primaryKey;
- }
-
- public List getProtocolList() {
-
- return protocolList;
- }
-
- public String getRequestToken() {
-
- return requestToken;
- }
-
- public Timestamp getTimeStamp() {
-
- return timeStamp;
- }
-
- public int getStatus() {
-
- return status;
- }
-
- public void setAllLevelRecursive(boolean b) {
-
- allLevelRecursive = b;
- }
-
- public void setDeferredStartTime(int deferredStartTime) {
-
- this.deferredStartTime = deferredStartTime;
- }
-
- public void setDirOption(boolean b) {
-
- dirOption = b;
- }
-
- public void setErrString(String s) {
-
- errString = s;
- }
-
- public void setFileSize(long n) {
-
- filesize = n;
- }
-
- public void setFromSURL(String s) {
-
- fromSURL = s;
- }
-
- public void setLifeTime(int n) {
-
- lifetime = n;
- }
-
- public void setNumLevel(int n) {
-
- numLevel = n;
- }
-
- public void setPrimaryKey(long n) {
-
- primaryKey = n;
- }
-
- public void setProtocolList(List l) {
-
- if ((l != null) && (!l.isEmpty())) {
- protocolList = l;
- }
- }
-
- public void setRequestToken(String s) {
-
- requestToken = s;
- }
-
- public void setTimeStamp(Timestamp timeStamp) {
-
- this.timeStamp = timeStamp;
- }
-
- public void setStatus(int n) {
-
- status = n;
- }
-
- /**
- * @param normalizedStFN
- * the normalizedStFN to set
- */
- public void setNormalizedStFN(String normalizedStFN) {
-
- this.normalizedStFN = normalizedStFN;
- }
-
- /**
- * @return the normalizedStFN
- */
- public String normalizedStFN() {
-
- return normalizedStFN;
- }
-
- /**
- * @param surlUniqueID
- * the sURLUniqueID to set
- */
- public void setSurlUniqueID(Integer surlUniqueID) {
-
- this.surlUniqueID = surlUniqueID;
- }
-
- /**
- * @return the sURLUniqueID
- */
- public Integer sulrUniqueID() {
-
- return surlUniqueID;
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append(primaryKey);
- sb.append(" ");
- sb.append(requestToken);
- sb.append(" ");
- sb.append(fromSURL);
- sb.append(" ");
- sb.append(normalizedStFN);
- sb.append(" ");
- sb.append(surlUniqueID);
- sb.append(" ");
- sb.append(lifetime);
- sb.append(" ");
- sb.append(dirOption);
- sb.append(" ");
- sb.append(allLevelRecursive);
- sb.append(" ");
- sb.append(numLevel);
- sb.append(" ");
- sb.append(protocolList);
- sb.append(" ");
- sb.append(filesize);
- sb.append(" ");
- sb.append(status);
- sb.append(" ");
- sb.append(errString);
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java
deleted file mode 100644
index 0dedd963f..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java
+++ /dev/null
@@ -1,476 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TimeUnit;
-import it.grid.storm.griduser.GridUserInterface;
-// import it.grid.storm.namespace.SurlStatusStore;
-import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException;
-import it.grid.storm.srm.types.InvalidTSURLAttributesException;
-import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TStatusCode;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class that represents StoRMs CopyChunkCatalog: it collects CopyChunkData and
- * provides methods for looking up a CopyChunkData based on TRequestToken, as
- * well as for updating an existing one.
- *
- * @author EGRID - ICTP Trieste
- * @date september, 2005
- * @version 2.0
- */
-public class CopyChunkCatalog {
-
- private static final Logger log = LoggerFactory
- .getLogger(CopyChunkCatalog.class);
-
- /* only instance of CopyChunkCatalog present in StoRM! */
- private static final CopyChunkCatalog cat = new CopyChunkCatalog();
- /* WARNING!!! TO BE MODIFIED WITH FACTORY!!! */
- private CopyChunkDAO dao = CopyChunkDAO.getInstance();
-
- private CopyChunkCatalog() {
-
- }
-
- /**
- * Method that returns the only instance of PtPChunkCatalog available.
- */
- public static CopyChunkCatalog getInstance() {
-
- return cat;
- }
-
- /**
- * Method used to update into Persistence a retrieved CopyChunkData. In case
- * any error occurs, the operation does not proceed and no Exception is
- * thrown.
- *
- * Beware that the only fields updated into persistence are the StatusCode and
- * the errorString.
- */
- synchronized public void update(CopyPersistentChunkData cd) {
-
- CopyChunkDataTO to = new CopyChunkDataTO();
- /* primary key needed by DAO Object */
- to.setPrimaryKey(cd.getPrimaryKey());
- to.setLifeTime(FileLifetimeConverter.getInstance().toDB(
- cd.getLifetime().value()));
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- cd.getStatus().getStatusCode()));
- to.setErrString(cd.getStatus().getExplanation());
- to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(
- cd.getFileStorageType()));
- to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB(
- cd.getOverwriteOption()));
- to.setNormalizedSourceStFN(cd.getSURL().normalizedStFN());
- to.setSourceSurlUniqueID(new Integer(cd.getSURL().uniqueId()));
- to.setNormalizedTargetStFN(cd.getDestinationSURL().normalizedStFN());
- to.setTargetSurlUniqueID(new Integer(cd.getDestinationSURL().uniqueId()));
-
- dao.update(to);
- }
-
- /**
- * Method that returns a Collection of CopyChunkData Objects matching the
- * supplied TRequestToken.
- *
- * If any of the data associated to the TRequestToken is not well formed and
- * so does not allow a CopyChunkData Object to be created, then that part of
- * the request is dropped and gets logged, and the processing continues with
- * the next part. All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks to process then an empty Collection is returned, and
- * a messagge gets logged.
- */
- synchronized public Collection lookup(
- TRequestToken rt) {
-
- Collection chunkDataTOs = dao.find(rt);
- log.debug("COPY CHUNK CATALOG: retrieved data {}", chunkDataTOs);
- return buildChunkDataList(chunkDataTOs, rt);
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOs, TRequestToken rt) {
-
- ArrayList list = new ArrayList();
- CopyPersistentChunkData chunk;
- for (CopyChunkDataTO chunkTO : chunkDataTOs) {
- chunk = makeOne(chunkTO, rt);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(completeTO(chunkTO, chunk));
- } catch (InvalidReducedCopyChunkDataAttributesException e) {
- log.warn("COPY CHUNK CATALOG! unable to add missing informations on "
- + "DB to the request: {}", e.getMessage());
- }
- }
- log.debug("COPY CHUNK CATALOG: returning {}\n\n", list);
- return list;
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOs) {
-
- ArrayList list = new ArrayList();
- CopyPersistentChunkData chunk;
- for (CopyChunkDataTO chunkTO : chunkDataTOs) {
- chunk = makeOne(chunkTO);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(completeTO(chunkTO, chunk));
- } catch (InvalidReducedCopyChunkDataAttributesException e) {
- log.warn("COPY CHUNK CATALOG! unable to add missing informations on DB "
- + "to the request: {}", e.getMessage());
- }
- }
- log.debug("COPY CHUNK CATALOG: returning {}\n\n", list);
- return list;
- }
-
- public Collection lookupCopyChunkData(
- TRequestToken requestToken, Collection surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOs = dao.find(requestToken,
- surlsUniqueIDs, surlsArray);
- return buildChunkDataList(chunkDataTOs, requestToken);
- }
-
- public Collection lookupCopyChunkData(TSURL surl,
- GridUserInterface user) {
-
- return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl }), user);
- }
-
- public Collection lookupCopyChunkData(TSURL surl) {
-
- return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl }));
- }
-
- private Collection lookupCopyChunkData(
- List surls, GridUserInterface user) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOs = dao.find(surlsUniqueIDs,
- surlsArray, user.getDn());
- return buildChunkDataList(chunkDataTOs);
- }
-
- public Collection lookupCopyChunkData(
- List surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOs = dao.find(surlsUniqueIDs,
- surlsArray);
- return buildChunkDataList(chunkDataTOs);
- }
-
- private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkTO) {
-
- try {
- return makeOne(chunkTO,
- new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp()));
- } catch (InvalidTRequestTokenAttributesException e) {
- throw new IllegalStateException(
- "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: "
- + e);
- }
- }
-
- /**
- * Generates a CopyChunkData from the received CopyChunkDataTO
- *
- * @param chunkDataTO
- * @param rt
- * @return
- */
- private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkDataTO,
- TRequestToken rt) {
-
- StringBuilder errorSb = new StringBuilder();
- // fromSURL
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (chunkDataTO.normalizedSourceStFN() != null) {
- fromSURL.setNormalizedStFN(chunkDataTO.normalizedSourceStFN());
- }
- if (chunkDataTO.sourceSurlUniqueID() != null) {
- fromSURL.setUniqueID(chunkDataTO.sourceSurlUniqueID().intValue());
- }
- // toSURL
- TSURL toSURL = null;
- try {
- toSURL = TSURL.makeFromStringValidate(chunkDataTO.toSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (chunkDataTO.normalizedTargetStFN() != null) {
- toSURL.setNormalizedStFN(chunkDataTO.normalizedTargetStFN());
- }
- if (chunkDataTO.targetSurlUniqueID() != null) {
- toSURL.setUniqueID(chunkDataTO.targetSurlUniqueID().intValue());
- }
- // lifeTime
- TLifeTimeInSeconds lifeTime = null;
- try {
- lifeTime = TLifeTimeInSeconds.make(FileLifetimeConverter.getInstance()
- .toStoRM(chunkDataTO.lifeTime()), TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // fileStorageType
- TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance()
- .toSTORM(chunkDataTO.fileStorageType());
- if (fileStorageType == TFileStorageType.EMPTY) {
- log.error("\nTFileStorageType could not be translated from its String "
- + "representation! String: {}", chunkDataTO.fileStorageType());
- // fail creation of PtPChunk!
- fileStorageType = null;
- }
- // spaceToken!
- //
- // WARNING! Although this field is in common between StoRM and DPM, a
- // converter is still used
- // because DPM logic for NULL/EMPTY is not known. StoRM model does not
- // allow for null, so it must
- // be taken care of!
- TSpaceToken spaceToken = null;
- TSpaceToken emptyToken = TSpaceToken.makeEmpty();
- // convert empty string representation of DPM into StoRM representation;
- String spaceTokenTranslation = SpaceTokenStringConverter.getInstance()
- .toStoRM(chunkDataTO.spaceToken());
- if (emptyToken.toString().equals(spaceTokenTranslation)) {
- spaceToken = emptyToken;
- } else {
- try {
- spaceToken = TSpaceToken.make(spaceTokenTranslation);
- } catch (InvalidTSpaceTokenAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- }
- // overwriteOption!
- TOverwriteMode globalOverwriteOption = OverwriteModeConverter.getInstance()
- .toSTORM(chunkDataTO.overwriteOption());
- if (globalOverwriteOption == TOverwriteMode.EMPTY) {
- errorSb.append("\nTOverwriteMode could not be "
- + "translated from its String representation! String: "
- + chunkDataTO.overwriteOption());
- globalOverwriteOption = null;
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- chunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + chunkDataTO.status());
- } else {
- status = new TReturnStatus(code, chunkDataTO.errString());
- }
- // make CopyChunkData
- CopyPersistentChunkData aux = null;
- try {
- aux = new CopyPersistentChunkData(rt, fromSURL, toSURL, lifeTime,
- fileStorageType, spaceToken, globalOverwriteOption, status);
- aux.setPrimaryKey(chunkDataTO.primaryKey());
- } catch (InvalidSurlRequestDataAttributesException e) {
- dao.signalMalformedCopyChunk(chunkDataTO);
- log.warn("COPY CHUNK CATALOG! Retrieved malformed Copy"
- + " chunk data from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage());
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- *
- * Adds to the received CopyChunkDataTO the normalized StFN and the SURL
- * unique ID taken from the CopyChunkData
- *
- * @param chunkTO
- * @param chunk
- */
- private void completeTO(ReducedCopyChunkDataTO chunkTO,
- final ReducedCopyChunkData chunk) {
-
- chunkTO.setNormalizedSourceStFN(chunk.fromSURL().normalizedStFN());
- chunkTO.setSourceSurlUniqueID(new Integer(chunk.fromSURL().uniqueId()));
- chunkTO.setNormalizedTargetStFN(chunk.toSURL().normalizedStFN());
- chunkTO.setTargetSurlUniqueID(new Integer(chunk.toSURL().uniqueId()));
- }
-
- /**
- *
- * Creates a ReducedCopyChunkDataTO from the received CopyChunkDataTO and
- * completes it with the normalized StFN and the SURL unique ID taken from the
- * PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- * @return
- * @throws InvalidReducedCopyChunkDataAttributesException
- */
- private ReducedCopyChunkDataTO completeTO(CopyChunkDataTO chunkTO,
- final CopyPersistentChunkData chunk)
- throws InvalidReducedCopyChunkDataAttributesException {
-
- ReducedCopyChunkDataTO reducedChunkTO = this.reduce(chunkTO);
- this.completeTO(reducedChunkTO, this.reduce(chunk));
- return reducedChunkTO;
- }
-
- /**
- * Creates a ReducedCopyChunkData from the data contained in the received
- * CopyChunkData
- *
- * @param chunk
- * @return
- * @throws InvalidReducedPtGChunkDataAttributesException
- */
- private ReducedCopyChunkData reduce(CopyPersistentChunkData chunk)
- throws InvalidReducedCopyChunkDataAttributesException {
-
- ReducedCopyChunkData reducedChunk = new ReducedCopyChunkData(
- chunk.getSURL(), chunk.getDestinationSURL(), chunk.getStatus());
- reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
- return reducedChunk;
- }
-
- /**
- * Creates a ReducedCopyChunkDataTO from the data contained in the received
- * CopyChunkDataTO
- *
- * @param chunkTO
- * @return
- */
- private ReducedCopyChunkDataTO reduce(CopyChunkDataTO chunkTO) {
-
- ReducedCopyChunkDataTO reducedChunkTO = new ReducedCopyChunkDataTO();
- reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
- reducedChunkTO.setFromSURL(chunkTO.fromSURL());
- reducedChunkTO.setNormalizedSourceStFN(chunkTO.normalizedSourceStFN());
- reducedChunkTO.setSourceSurlUniqueID(chunkTO.sourceSurlUniqueID());
- reducedChunkTO.setToSURL(chunkTO.toSURL());
- reducedChunkTO.setNormalizedTargetStFN(chunkTO.normalizedTargetStFN());
- reducedChunkTO.setTargetSurlUniqueID(chunkTO.targetSurlUniqueID());
- reducedChunkTO.setStatus(chunkTO.status());
- reducedChunkTO.setErrString(chunkTO.errString());
- return reducedChunkTO;
- }
-
- /**
- * Checks if the received CopyChunkDataTO contains the fields not set by the
- * front end but required
- *
- * @param chunkTO
- * @return
- */
- private boolean isComplete(CopyChunkDataTO chunkTO) {
-
- return (chunkTO.normalizedSourceStFN() != null)
- && (chunkTO.sourceSurlUniqueID() != null && chunkTO
- .normalizedTargetStFN() != null)
- && (chunkTO.targetSurlUniqueID() != null);
- }
-
- /**
- * Checks if the received ReducedPtGChunkDataTO contains the fields not set by
- * the front end but required
- *
- * @param reducedChunkTO
- * @return
- */
- @SuppressWarnings("unused")
- private boolean isComplete(ReducedCopyChunkDataTO reducedChunkTO) {
-
- return (reducedChunkTO.normalizedSourceStFN() != null)
- && (reducedChunkTO.sourceSurlUniqueID() != null && reducedChunkTO
- .normalizedTargetStFN() != null)
- && (reducedChunkTO.targetSurlUniqueID() != null);
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode,
- newStatusCode, explanation);
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- List surlList, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode) {
-
- int[] surlsUniqueIDs = new int[surlList.size()];
- String[] surls = new String[surlList.size()];
- int index = 0;
- for (TSURL tsurl : surlList) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode);
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java
deleted file mode 100644
index 912acb9df..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java
+++ /dev/null
@@ -1,773 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.config.Configuration;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TStatusCode;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.SQLWarning;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect
- * to a MySQL DB. The raw data found in those tables is pre-treated in order to
- * turn it into the Object Model of StoRM. See Method comments for further info.
- *
- * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the
- * object model.
- *
- * @author EGRID - ICTP Trieste
- * @version 2.0
- * @date September 2005
- */
-public class CopyChunkDAO {
-
- private static final Logger log = LoggerFactory.getLogger(CopyChunkDAO.class);
-
- /* String with the name of the class for the DB driver */
- private final String driver = Configuration.getInstance().getDBDriver();
- /* String referring to the URL of the DB */
- private final String url = Configuration.getInstance().getStormDbURL();
- /* String with the password for the DB */
- private final String password = Configuration.getInstance().getDBPassword();
- /* String with the name for the DB */
- private final String name = Configuration.getInstance().getDBUserName();
-
- /* Connection to DB - WARNING!!! It is kept open all the time! */
- private Connection con = null;
- /* boolean that tells whether reconnection is needed because of MySQL bug! */
- private boolean reconnect = false;
-
- /* Singleton instance */
- private final static CopyChunkDAO dao = new CopyChunkDAO();
-
- /* timer thread that will run a task to alert when reconnecting is necessary! */
- private Timer clock = null;
- /*
- * timer task that will update the boolean signaling that a reconnection is
- * needed!
- */
- private TimerTask clockTask = null;
- /* milliseconds that must pass before reconnecting to DB */
- private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000;
- /* initial delay in milliseconds before starting timer */
- private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000;
-
- private CopyChunkDAO() {
-
- setUpConnection();
- clock = new Timer();
- clockTask = new TimerTask() {
-
- @Override
- public void run() {
-
- reconnect = true;
- }
- }; // clock task
- clock.scheduleAtFixedRate(clockTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of the CopyChunkDAO.
- */
- public static CopyChunkDAO getInstance() {
-
- return dao;
- }
-
- /**
- * Method used to save the changes made to a retrieved CopyChunkDataTO, back
- * into the MySQL DB.
- *
- * Only statusCode and explanation, of status_Copy table get written to the
- * DB. Likewise for fileLifetime of request_queue table.
- *
- * In case of any error, an error messagge gets logged but no exception is
- * thrown.
- */
- public synchronized void update(CopyChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("COPY CHUNK DAO: update - unable to get a valid connection!");
- return;
- }
- PreparedStatement updateFileReq = null;
- try {
- // ready updateFileReq...
- updateFileReq = con
- .prepareStatement("UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) "
- + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) "
- + "SET sc.statusCode=?, sc.explanation=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, "
- + "rc.normalized_sourceSURL_StFN=?, rc.sourceSURL_uniqueID=?, rc.normalized_targetSURL_StFN=?, rc.targetSURL_uniqueID=? "
- + "WHERE rc.ID=?");
- logWarnings(con.getWarnings());
-
- updateFileReq.setInt(1, to.status());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(2, to.errString());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(3, to.lifeTime());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(4, to.fileStorageType());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(5, to.overwriteOption());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(6, to.normalizedSourceStFN());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(7, to.sourceSurlUniqueID());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(8, to.normalizedTargetStFN());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(9, to.targetSurlUniqueID());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setLong(10, to.primaryKey());
- logWarnings(updateFileReq.getWarnings());
-
- // run updateFileReq
- updateFileReq.executeUpdate();
- logWarnings(updateFileReq.getWarnings());
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: Unable to complete update! {}",
- e.getMessage(), e);
- } finally {
- close(updateFileReq);
- }
- }
-
- /**
- * Updates the request_Get represented by the received ReducedPtGChunkDataTO
- * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID
- *
- * @param chunkTO
- */
- public synchronized void updateIncomplete(ReducedCopyChunkDataTO chunkTO) {
-
- if (!checkConnection()) {
- log
- .error("COPY CHUNK DAO: updateIncomplete - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_Copy SET normalized_sourceSURL_StFN=?, sourceSURL_uniqueID=?, normalized_targetSURL_StFN=?, targetSURL_uniqueID=? "
- + "WHERE ID=?";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- stmt.setString(1, chunkTO.normalizedSourceStFN());
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(2, chunkTO.sourceSurlUniqueID());
- logWarnings(stmt.getWarnings());
-
- stmt.setString(3, chunkTO.normalizedTargetStFN());
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(4, chunkTO.targetSurlUniqueID());
- logWarnings(stmt.getWarnings());
-
- stmt.setLong(5, chunkTO.primaryKey());
- logWarnings(stmt.getWarnings());
-
- log.trace("COPY CHUNK DAO - update incomplete: {}", stmt.toString());
- stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: Unable to complete update incomplete! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Method that queries the MySQL DB to find all entries matching the supplied
- * TRequestToken. The Collection contains the corresponding CopyChunkDataTO
- * objects.
- *
- * A complex query establishes all chunks associated with the request token,
- * by properly joining request_queue, request_Copy and status_Copy. The
- * considered fields are:
- *
- * (1) From status_Copy: the ID field which becomes the TOs primary key, and
- * statusCode.
- *
- * (2) From request_Copy: targetSURL and sourceSURL.
- *
- * (3) From request_queue: fileLifetime, config_FileStorageTypeID, s_token,
- * config_OverwriteID.
- *
- * In case of any error, a log gets written and an empty collection is
- * returned. No exception is returned.
- *
- * NOTE! Chunks in SRM_ABORTED status are NOT returned!
- */
- public synchronized Collection find(
- TRequestToken requestToken) {
-
- if (!checkConnection()) {
- log.error("COPY CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /* get chunks of the request */
- str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) "
- + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) "
- + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID "
- + "WHERE rq.r_token=? AND sc.statusCode<>?";
-
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, strToken);
- logWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED));
- logWarnings(find.getWarnings());
-
- log.debug("COPY CHUNK DAO: find method; " + find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- CopyChunkDataTO chunkDataTO;
- while (rs.next()) {
- chunkDataTO = new CopyChunkDataTO();
- chunkDataTO.setRequestToken(strToken);
- chunkDataTO.setSpaceToken(rs.getString("rq.s_token"));
- chunkDataTO.setFileStorageType(rs
- .getString("rq.config_FileStorageTypeID"));
- chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime"));
- chunkDataTO.setPrimaryKey(rs.getLong("rc.ID"));
- chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL"));
- chunkDataTO.setNormalizedSourceStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setToSURL(rs.getString("rc.targetSURL"));
- chunkDataTO.setNormalizedTargetStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID));
- }
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
-
- }
-
- public synchronized Collection find(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) {
-
- if (!checkConnection()) {
- log.error("COPY CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /* get chunks of the request */
- str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) "
- + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) "
- + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID "
- + "WHERE rq.r_token=? AND ( rc.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs)
- + " AND rc.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
-
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, strToken);
- logWarnings(find.getWarnings());
-
- log.debug("COPY CHUNK DAO: find method; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- CopyChunkDataTO chunkDataTO;
- while (rs.next()) {
- chunkDataTO = new CopyChunkDataTO();
- chunkDataTO.setRequestToken(strToken);
- chunkDataTO.setSpaceToken(rs.getString("rq.s_token"));
- chunkDataTO.setFileStorageType(rs
- .getString("rq.config_FileStorageTypeID"));
- chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime"));
- chunkDataTO.setPrimaryKey(rs.getLong("rc.ID"));
- chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL"));
- chunkDataTO.setNormalizedSourceStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setToSURL(rs.getString("rc.targetSURL"));
- chunkDataTO.setNormalizedTargetStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID));
- }
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
-
- }
-
- /**
- * Method used in extraordinary situations to signal that data retrieved from
- * the DB was malformed and could not be translated into the StoRM object
- * model.
- *
- * This method attempts to change the status of the request to SRM_FAILURE and
- * record it in the DB.
- *
- * This operation could potentially fail because the source of the malformed
- * problems could be a problematic DB; indeed, initially only log messagges
- * where recorded.
- *
- * Yet it soon became clear that the source of malformed data were the clients
- * and/or FE recording info in the DB. In these circumstances the client would
- * its request as being in the SRM_IN_PROGRESS state for ever. Hence the
- * pressing need to inform it of the encountered problems.
- */
- public synchronized void signalMalformedCopyChunk(CopyChunkDataTO auxTO) {
-
- if (!checkConnection()) {
- log
- .error("COPY CHUNK DAO: signalMalformedCopyChunk - unable to get a valid connection!");
- return;
- }
- String signalSQL = "UPDATE status_Copy SET statusCode="
- + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE)
- + ", explanation=? WHERE request_CopyID=" + auxTO.primaryKey();
-
- PreparedStatement signal = null;
- try {
- /* update storm_put_filereq */
- signal = con.prepareStatement(signalSQL);
- logWarnings(con.getWarnings());
-
- /* Prepared statement spares DB-specific String notation! */
- signal.setString(1, "Request is malformed!");
- logWarnings(signal.getWarnings());
-
- signal.executeUpdate();
- logWarnings(signal.getWarnings());
- } catch (SQLException e) {
- log.error("CopyChunkDAO! Unable to signal in DB that the request was "
- + "malformed! Request: {}; Error: {}", auxTO.toString(),
- e.getMessage(), e);
- } finally {
- close(signal);
- }
- }
-
- /**
- * Auxiliary method used to close a Statement
- */
- private void close(Statement stmt) {
-
- if (stmt != null) {
- try {
- stmt.close();
- } catch (Exception e) {
- log.error("COPY CHUNK DAO! Unable to close Statement {} - Error: {}",
- stmt.toString(), e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary method used to close a ResultSet
- */
- private void close(ResultSet rset) {
-
- if (rset != null) {
- try {
- rset.close();
- } catch (Exception e) {
- log.error("COPY CHUNK DAO! Unable to close ResultSet! Error: {}",
- e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary private method that logs all SQL warnings.
- */
- private void logWarnings(SQLWarning w) {
-
- if (w != null) {
- log.debug("COPY CHUNK DAO: {}", w.toString());
- while ((w = w.getNextWarning()) != null) {
- log.debug("COPY CHUNK DAO: {}", w.toString());
- }
- }
- }
-
- /**
- * Auxiliary method that sets up the conenction to the DB.
- */
- private boolean setUpConnection() {
-
- boolean response = false;
- try {
- Class.forName(driver);
- con = DriverManager.getConnection(url, name, password);
- logWarnings(con.getWarnings());
- response = con.isValid(0);
- } catch (SQLException | ClassNotFoundException e) {
- log.error("COPY CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e);
- }
- return response;
- }
-
- /**
- * Auxiliary method that checks if time for resetting the connection has come,
- * and eventually takes it down and up back again.
- */
- private synchronized boolean checkConnection() {
-
- boolean response = true;
- if (reconnect) {
- log.debug("COPY CHUNK DAO! Reconnecting to DB! ");
- takeDownConnection();
- response = setUpConnection();
- if (response) {
- reconnect = false;
- }
- }
- return response;
- }
-
- /**
- * Auxiliary method that takes down a conenctin to the DB.
- */
- private void takeDownConnection() {
-
- if (con != null) {
- try {
- con.close();
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO! Exception in takeDownConnection method: {}",
- e.getMessage(), e);
- }
- }
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode, String explanation) {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || explanation == null) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken + " explanation="
- + explanation);
- }
- doUpdateStatusOnMatchingStatus(requestToken, null, null,
- expectedStatusCode, newStatusCode, explanation, true, false, true);
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode)
- throws IllegalArgumentException {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0
- || surls.length == 0 || surlsUniqueIDs.length != surls.length) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken
- + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls);
- }
- doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode, null, true, true, false);
- }
-
- public synchronized void doUpdateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation, boolean withRequestToken, boolean withSurls,
- boolean withExplanation) throws IllegalArgumentException {
-
- if ((withRequestToken && requestToken == null)
- || (withExplanation && explanation == null)
- || (withSurls && (surlUniqueIDs == null || surls == null))) {
- throw new IllegalArgumentException(
- "Unable to perform the doUpdateStatusOnMatchingStatus, "
- + "invalid arguments: withRequestToken=" + withRequestToken
- + " requestToken=" + requestToken + " withSurls=" + withSurls
- + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls
- + " withExplaination=" + withExplanation + " explanation="
- + explanation);
- }
- if (!checkConnection()) {
- log
- .error("COPY CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) "
- + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) "
- + "SET sc.statusCode=? ";
- if (withExplanation) {
- str += " , " + buildExpainationSet(explanation);
- }
- str += " WHERE sc.statusCode=? ";
- if (withRequestToken) {
- str += " AND " + buildTokenWhereClause(requestToken);
- }
- if (withSurls) {
- str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls);
- }
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode));
- logWarnings(stmt.getWarnings());
-
- stmt
- .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode));
- logWarnings(stmt.getWarnings());
-
- log.trace("COPY CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("COPY CHUNK DAO! No chunk of COPY request was updated "
- + "from {} to {}.", expectedStatusCode, newStatusCode);
- } else {
- log.debug("COPY CHUNK DAO! {} chunks of COPY requests were updated "
- + "from {} to {}.", count, expectedStatusCode, newStatusCode);
- }
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO! Unable to updated from {} to {}! {}",
- expectedStatusCode, newStatusCode, e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Method that returns a String containing all Surl's IDs.
- */
- private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) {
-
- StringBuilder sb = new StringBuilder("(");
- for (int i = 0; i < surlUniqueIDs.length; i++) {
- if (i > 0) {
- sb.append(",");
- }
- sb.append(surlUniqueIDs[i]);
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surls.
- */
- private String makeSurlString(String[] surls) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = surls.length;
- for (int i = 0; i < n; i++) {
- sb.append("'");
- sb.append(surls[i]);
- sb.append("'");
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
- sb.append(")");
- return sb.toString();
- }
-
- public synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0 || dn == null) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " dn=" + dn);
- }
- return find(surlsUniqueIDs, surlsArray, dn, true);
- }
-
- public synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray);
- }
- return find(surlsUniqueIDs, surlsArray, null, false);
- }
-
- private synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn, boolean withDn)
- throws IllegalArgumentException {
-
- if ((withDn && dn == null) || surlsUniqueIDs == null
- || surlsUniqueIDs.length == 0 || surlsArray == null
- || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn);
- }
- if (!checkConnection()) {
- log.error("COPY CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- String str = "SELECT rq.r_token, rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, "
- + "rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, "
- + "rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, "
- + "d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) "
- + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) "
- + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID "
- + "WHERE ( rc.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs)
- + " AND rc.sourceSURL IN "
- + makeSurlString(surlsArray) + " )";
- if (withDn) {
- str += " AND rq.client_dn=\'" + dn + "\'";
- }
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- List list = new ArrayList();
-
- log.trace("COPY CHUNK DAO - find method: {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
- CopyChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new CopyChunkDataTO();
- chunkDataTO.setRequestToken(rs.getString("rq.r_token"));
- chunkDataTO.setSpaceToken(rs.getString("rq.s_token"));
- chunkDataTO.setFileStorageType(rs
- .getString("rq.config_FileStorageTypeID"));
- chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime"));
- chunkDataTO.setPrimaryKey(rs.getLong("rc.ID"));
- chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL"));
- chunkDataTO.setNormalizedSourceStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setToSURL(rs.getString("rc.targetSURL"));
- chunkDataTO.setNormalizedTargetStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID));
- }
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- private String buildExpainationSet(String explanation) {
-
- return " sc.explanation='" + explanation + "' ";
- }
-
- private String buildTokenWhereClause(TRequestToken requestToken) {
-
- return " rq.r_token='" + requestToken.toString() + "' ";
- }
-
- private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) {
-
- return " ( rc.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rc.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java
deleted file mode 100644
index 1b455ac7a..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import java.sql.Timestamp;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TStatusCode;
-
-/**
- * Class that represents a row in the Persistence Layer: this is all raw data
- * referring to the CopyChunkData proper, that is, String and primitive types.
- *
- * Each field is initialized with default values as per SRM 2.2 specification:
- * fileStorageType VOLATILE overwriteMode NEVER status SRM_REQUEST_QUEUED
- *
- * All other fields are 0 if int, or a white space if String.
- *
- * @author EGRID ICTP
- * @version 2.0
- * @date Semptember 2005
- */
-public class CopyChunkDataTO {
-
- /* Database table request_Get fields BEGIN */
- private long primaryKey = -1; // ID primary key of record in DB
- private String fromSURL = " ";
- private String toSURL = " ";
- private String normalizedSourceStFN = null;
- private Integer sourceSurlUniqueID = null;
- private String normalizedTargetStFN = null;
- private Integer targetSurlUniqueID = null;
- /* Database table request_Get fields END */
-
- private String requestToken = " ";
- private int lifetime = 0;
- private String fileStorageType = null; // initialised in constructor
- private String spaceToken = " ";
- private String overwriteOption = null; // initialised in constructor
- private int status; // initialised in constructor
- private String errString = " ";
- private Timestamp timeStamp = null;
-
- public CopyChunkDataTO() {
-
- fileStorageType = FileStorageTypeConverter.getInstance().toDB(
- TFileStorageType.VOLATILE);
- overwriteOption = OverwriteModeConverter.getInstance().toDB(
- TOverwriteMode.NEVER);
- status = StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_QUEUED);
- }
-
- public long primaryKey() {
-
- return primaryKey;
- }
-
- public void setPrimaryKey(long n) {
-
- primaryKey = n;
- }
-
- public String requestToken() {
-
- return requestToken;
- }
-
- public void setRequestToken(String s) {
-
- requestToken = s;
- }
-
- public Timestamp timeStamp() {
-
- return timeStamp;
- }
-
- public void setTimeStamp(Timestamp timeStamp) {
-
- this.timeStamp = timeStamp;
- }
-
- public String fromSURL() {
-
- return fromSURL;
- }
-
- public void setFromSURL(String s) {
-
- fromSURL = s;
- }
-
- /**
- * @return the normalizedStFN
- */
- public String normalizedSourceStFN() {
-
- return normalizedSourceStFN;
- }
-
- /**
- * @param normalizedStFN
- * the normalizedStFN to set
- */
- public void setNormalizedSourceStFN(String normalizedStFN) {
-
- this.normalizedSourceStFN = normalizedStFN;
- }
-
- /**
- * @return the surlUniqueID
- */
- public Integer sourceSurlUniqueID() {
-
- return sourceSurlUniqueID;
- }
-
- /**
- * @param surlUniqueID
- * the surlUniqueID to set
- */
- public void setSourceSurlUniqueID(Integer surlUniqueID) {
-
- this.sourceSurlUniqueID = surlUniqueID;
- }
-
- /**
- * @return the normalizedStFN
- */
- public String normalizedTargetStFN() {
-
- return normalizedTargetStFN;
- }
-
- /**
- * @param normalizedStFN
- * the normalizedStFN to set
- */
- public void setNormalizedTargetStFN(String normalizedStFN) {
-
- this.normalizedTargetStFN = normalizedStFN;
- }
-
- /**
- * @return the surlUniqueID
- */
- public Integer targetSurlUniqueID() {
-
- return targetSurlUniqueID;
- }
-
- /**
- * @param surlUniqueID
- * the surlUniqueID to set
- */
- public void setTargetSurlUniqueID(Integer surlUniqueID) {
-
- this.targetSurlUniqueID = surlUniqueID;
- }
-
- public String toSURL() {
-
- return toSURL;
- }
-
- public void setToSURL(String s) {
-
- toSURL = s;
- }
-
- public int lifeTime() {
-
- return lifetime;
- }
-
- public void setLifeTime(int n) {
-
- lifetime = n;
- }
-
- public String fileStorageType() {
-
- return fileStorageType;
- }
-
- /**
- * Method used to set the FileStorageType: if s is null nothing gets set; the
- * internal default String is the one relative to Volatile FileStorageType.
- */
- public void setFileStorageType(String s) {
-
- if (s != null)
- fileStorageType = s;
- }
-
- public String spaceToken() {
-
- return spaceToken;
- }
-
- public void setSpaceToken(String s) {
-
- spaceToken = s;
- }
-
- public String overwriteOption() {
-
- return overwriteOption;
- }
-
- /**
- * Method used to set the OverwriteMode: if s is null nothing gets set; the
- * internal default String is the one relative to Never OverwriteMode.
- */
- public void setOverwriteOption(String s) {
-
- if (s != null)
- overwriteOption = s;
- }
-
- public int status() {
-
- return status;
- }
-
- public void setStatus(int n) {
-
- status = n;
- }
-
- public String errString() {
-
- return errString;
- }
-
- public void setErrString(String s) {
-
- errString = s;
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append(primaryKey);
- sb.append(" ");
- sb.append(requestToken);
- sb.append(" ");
- sb.append(fromSURL);
- sb.append(" ");
- sb.append(normalizedSourceStFN);
- sb.append(" ");
- sb.append(sourceSurlUniqueID);
- sb.append(" ");
- sb.append(toSURL);
- sb.append(" ");
- sb.append(normalizedTargetStFN);
- sb.append(" ");
- sb.append(targetSurlUniqueID);
- sb.append(" ");
- sb.append(lifetime);
- sb.append(" ");
- sb.append(fileStorageType);
- sb.append(" ");
- sb.append(spaceToken);
- sb.append(" ");
- sb.append(overwriteOption);
- sb.append(" ");
- sb.append(status);
- sb.append(" ");
- sb.append(errString);
- sb.append(" ");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyData.java b/src/main/java/it/grid/storm/catalogs/CopyData.java
deleted file mode 100644
index 103cdaf9e..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyData.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TStatusCode;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class represents a CopyChunkData, that is part of a multifile Copy srm
- * request. It contains data about: the requestToken, the fromSURL, the toSURL,
- * the target fileLifeTime, the target fileStorageType and any available target
- * spaceToken, the target overwriteOption to be applied in case the file already
- * exists, the fileSize of the existing file if any, return status of the file
- * together with its error string.
- *
- * @author EGRID - ICTP Trieste
- * @date September, 2005
- * @version 2.0
- */
-public class CopyData extends SurlMultyOperationRequestData {
-
- private static final Logger log = LoggerFactory.getLogger(CopyData.class);
-
- /**
- * SURL to which the srmCopy will put the file
- */
- protected TSURL destinationSURL;
-
- /**
- * requested lifetime - BEWARE!!! It is the fileLifetime at destination in
- * case of Volatile files!
- */
- protected TLifeTimeInSeconds lifetime;
-
- /**
- * TFileStorageType at destination
- */
- protected TFileStorageType fileStorageType;
-
- /**
- * SpaceToken to use for toSURL
- */
- protected TSpaceToken spaceToken;
-
- /**
- * specifies the behaviour in case of existing files for Put part of the copy
- * (could be local or remote!)
- */
- protected TOverwriteMode overwriteOption;
-
- public CopyData(TSURL fromSURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status)
- throws InvalidCopyDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(fromSURL, status);
- if (destinationSURL == null || lifetime == null || fileStorageType == null
- || spaceToken == null || overwriteOption == null) {
- throw new InvalidCopyDataAttributesException(fromSURL, destinationSURL,
- lifetime, fileStorageType, spaceToken, overwriteOption, status);
- }
- this.destinationSURL = destinationSURL;
- this.lifetime = lifetime;
- this.fileStorageType = fileStorageType;
- this.spaceToken = spaceToken;
- this.overwriteOption = overwriteOption;
- }
-
- /**
- * Method that returns the toSURL of the srm request to which this chunk
- * belongs.
- */
- public TSURL getDestinationSURL() {
-
- return destinationSURL;
- }
-
- /**
- * Method that returns the requested pin life time for this chunk of the srm
- * request.
- */
- public TLifeTimeInSeconds getLifetime() {
-
- return lifetime;
- }
-
- /**
- * Method that returns the fileStorageType for this chunk of the srm request.
- */
- public TFileStorageType getFileStorageType() {
-
- return fileStorageType;
- }
-
- /**
- * Method that returns the space token supplied for this chunk of the srm
- * request.
- */
- public TSpaceToken getSpaceToken() {
-
- return spaceToken;
- }
-
- /**
- * Method that returns the overwriteOption specified in the srm request.
- */
- public TOverwriteMode getOverwriteOption() {
-
- return overwriteOption;
- }
-
- /**
- * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it
- * needs the explanation String which describes the situation in greater
- * detail; if a null is passed, then an empty String is used as explanation.
- */
- public void changeStatusSRM_DUPLICATION_ERROR(String explanation) {
-
- setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation);
- }
-
- /**
- * Method that sets the status of this request to SRM_FATAL_INTERNAL_ERROR; it
- * needs the explanation String which describes the situation in greater
- * detail; if a null is passed, then an empty String is used as explanation.
- */
- public void changeStatusSRM_FATAL_INTERNAL_ERROR(String explanation) {
-
- setStatus(TStatusCode.SRM_FATAL_INTERNAL_ERROR, explanation);
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java
deleted file mode 100644
index a64729a19..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import it.grid.storm.srm.types.TOverwriteMode;
-
-/**
- * Package private auxiliary class used to convert between DPM and StoRM
- * representation of Copy TOverwriteMode+RemoveSourceFiles global information
- * for the whole request, and Flags in storm_req.
- *
- * @author: EGRID - ICTP Trieste
- * @version: 1.0
- * @date: September 2005
- */
-class CopyGlobalFlagConverter {
-
- private Map DPMtoSTORM = new HashMap();
- private Map STORMtoDPM = new HashMap();
-
- private static CopyGlobalFlagConverter c = new CopyGlobalFlagConverter();
-
- /**
- * Private constructor that fills in the conversion table; in particular, DPM
- * uses int values to represent the pair of values:
- *
- * 0 NEVER + DO NOT RemoveSourceFiles 1 ALWAYS + DO NOT RemoveSourceFiles 2
- * WHENFILESAREDIFFERENT + DO NOT RemoveSourceFiles 4 NEVER +
- * RemoveSourceFiles 5 ALWAYS + RemoveSourceFiles 6 WHENFILESAREDIFFERENT +
- * RemoveSourceFiles
- */
- private CopyGlobalFlagConverter() {
-
- DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER,
- new Boolean(false) });
- DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS,
- new Boolean(false) });
- DPMtoSTORM.put(new Integer(2), new Object[] {
- TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) });
- DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER,
- new Boolean(true) });
- DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS,
- new Boolean(true) });
- DPMtoSTORM.put(new Integer(6), new Object[] {
- TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) });
- Object aux;
- for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) {
- aux = i.next();
- STORMtoDPM.put(DPMtoSTORM.get(aux), aux);
- }
- }
-
- /**
- * Method that returns the only instance of OverwriteModeConverter.
- */
- public static CopyGlobalFlagConverter getInstance() {
-
- return c;
- }
-
- /**
- * Method that returns the int used by DPM to represent the given
- * TOverwriteMode and removeSourceFiles boolean. -1 is returned if no match is
- * found.
- */
- public int toDPM(TOverwriteMode om, boolean removeSourceFiles) {
-
- Integer aux = (Integer) STORMtoDPM.get(new Object[] { om,
- new Boolean(removeSourceFiles) });
- if (aux == null)
- return -1;
- return aux.intValue();
- }
-
- /**
- * Method that returns an Object[] containing the TOverwriteMode and the
- * boolean used by StoRM to represent the supplied int representation of DPM.
- * An empty Object[] is returned if no StoRM type is found.
- */
- public Object[] toSTORM(int n) {
-
- Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n));
- if (aux == null)
- return new Object[] {};
- return aux;
- }
-
- public String toString() {
-
- return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM
- + "\nSTORMtoDPM map:" + STORMtoDPM;
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java
deleted file mode 100644
index 419ff1515..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class represents a CopyChunkData, that is part of a multifile Copy srm
- * request. It contains data about: the requestToken, the fromSURL, the toSURL,
- * the target fileLifeTime, the target fileStorageType and any available target
- * spaceToken, the target overwriteOption to be applied in case the file already
- * exists, the fileSize of the existing file if any, return status of the file
- * together with its error string.
- *
- * @author EGRID - ICTP Trieste
- * @date September, 2005
- * @version 2.0
- */
-public class CopyPersistentChunkData extends CopyData implements
- PersistentChunkData {
-
- private static final Logger log = LoggerFactory
- .getLogger(CopyPersistentChunkData.class);
-
- /**
- * long representing the primary key for the persistence layer!
- */
- private long primaryKey = -1;
-
- /**
- * This is the requestToken of the multifile srm request to which this chunk
- * belongs
- */
- private TRequestToken requestToken;
-
- public CopyPersistentChunkData(TRequestToken requestToken, TSURL fromSURL,
- TSURL destinationSURL, TLifeTimeInSeconds lifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TOverwriteMode overwriteOption, TReturnStatus status)
- throws InvalidCopyPersistentChunkDataAttributesException,
- InvalidCopyDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status);
- if (requestToken == null) {
- log.debug("CopyPersistentChunkData: requestToken is null!");
- throw new InvalidCopyPersistentChunkDataAttributesException(requestToken,
- fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status);
- }
- this.requestToken = requestToken;
- }
-
- /**
- * Method used to get the primary key used in the persistence layer!
- */
- public long getPrimaryKey() {
-
- return primaryKey;
- }
-
- /**
- * Method used to set the primary key to be used in the persistence layer!
- */
- public void setPrimaryKey(long l) {
-
- primaryKey = l;
- }
-
- /**
- * Method that returns the requestToken of the srm request to which this chunk
- * belongs.
- */
- public TRequestToken getRequestToken() {
-
- return requestToken;
- }
-
- @Override
- public long getIdentifier() {
-
- return getPrimaryKey();
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java
deleted file mode 100644
index bd269f407..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import it.grid.storm.srm.types.TOverwriteMode;
-
-/**
- * Package private auxiliary class used to convert between DPM and StoRM
- * representation of Copy TOverwriteMode+TDirOption request specific
- * information, and Flags in storm_copy_filereq.
- *
- * @author: EGRID - ICTP Trieste
- * @version: 1.0
- * @date: September 2005
- */
-class CopySpecificFlagConverter {
-
- private Map DPMtoSTORM = new HashMap();
- private Map STORMtoDPM = new HashMap();
-
- private static CopySpecificFlagConverter c = new CopySpecificFlagConverter();
-
- /**
- * Private constructor that fills in the conversion table; in particular, DPM
- * uses int values to represent the pair of values:
- *
- * 0 NEVER + source NOT directory 1 ALWAYS + source NOT directory 2
- * WHENFILESAREDIFFERENT + source NOT directory 4 NEVER + source is directory
- * 5 ALWAYS + source is directory 6 WHENFILESAREDIFFERENT + source is
- * directory
- */
- private CopySpecificFlagConverter() {
-
- DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER,
- new Boolean(false) });
- DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS,
- new Boolean(false) });
- DPMtoSTORM.put(new Integer(2), new Object[] {
- TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) });
- DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER,
- new Boolean(true) });
- DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS,
- new Boolean(true) });
- DPMtoSTORM.put(new Integer(6), new Object[] {
- TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) });
- Object aux;
- for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) {
- aux = i.next();
- STORMtoDPM.put(DPMtoSTORM.get(aux), aux);
- }
- }
-
- /**
- * Method that returns the only instance of CopySpecificFlagConverter.
- */
- public static CopySpecificFlagConverter getInstance() {
-
- return c;
- }
-
- /**
- * Method that returns the int used by DPM to represent the given
- * TOverwriteMode and isSourceADirectory boolean. -1 is returned if no match
- * is found.
- */
- public int toDPM(TOverwriteMode om, boolean isSourceADirectory) {
-
- Integer aux = (Integer) STORMtoDPM.get(new Object[] { om,
- new Boolean(isSourceADirectory) });
- if (aux == null)
- return -1;
- return aux.intValue();
- }
-
- /**
- * Method that returns an Object[] containing the TOverwriteMode and the
- * Boolean used by StoRM to represent the supplied int representation of DPM.
- * An empty Object[] is returned if no StoRM type is found.
- */
- public Object[] toSTORM(int n) {
-
- Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n));
- if (aux == null)
- return new Object[] {};
- return aux;
- }
-
- public String toString() {
-
- return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM
- + "\nSTORMtoDPM map:" + STORMtoDPM;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java
deleted file mode 100644
index 3627d68c6..000000000
--- a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.config.Configuration;
-
-/**
- * Class that handles DB representation of a pinLifetime as expressed by a
- * TLifetimeInSeconds objects; in particular it takes care of protocol
- * specification:
- *
- * 0/null/negative are translated as default StoRM configurable values. StoRMs
- * Empty TLifeTimeInSeconds is translated as 0.
- *
- * @author EGRID ICTP
- * @version 1.0
- * @date March 2007
- */
-public class FileLifetimeConverter {
-
- private static FileLifetimeConverter stc = new FileLifetimeConverter(); // only
- // instance
-
- private FileLifetimeConverter() {
-
- }
-
- /**
- * Method that returns the only instance of SizeInBytesIntConverter
- */
- public static FileLifetimeConverter getInstance() {
-
- return stc;
- }
-
- /**
- * Method that translates the Empty TLifeTimeInSeconds into the empty
- * representation of DB which is 0. Any other value is left as is.
- */
- public int toDB(long l) {
-
- if (l == TLifeTimeInSeconds.makeEmpty().value())
- return 0;
- return new Long(l).intValue();
- }
-
- /**
- * Method that returns the long corresponding to the int value in the DB,
- * except if it is 0, NULL or negative; a configurable default value is
- * returned instead, corresponding to the getFileLifetimeDefault()
- * Configuration class method.
- */
- public long toStoRM(int s) {
-
- if (s <= 0)
- return Configuration.getInstance().getFileLifetimeDefault();
- return new Integer(s).longValue();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java b/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java
deleted file mode 100644
index 0f8f81710..000000000
--- a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.config.Configuration;
-
-/**
- * Package private auxiliary class used to convert between DB raw data and StoRM
- * object model representation of TFileStorageType.
- *
- * @author: EGRID ICTP
- * @version: 2.0
- * @date: June 2005
- */
-class FileStorageTypeConverter {
-
- private Map DBtoSTORM = new HashMap();
- private Map STORMtoDB = new HashMap();
-
- private static FileStorageTypeConverter c = new FileStorageTypeConverter();
-
- /**
- * Private constructor that fills in the conversion tables;
- *
- * V - VOLATILE P - PERMANENT D - DURABLE
- */
- private FileStorageTypeConverter() {
-
- DBtoSTORM.put("V", TFileStorageType.VOLATILE);
- DBtoSTORM.put("P", TFileStorageType.PERMANENT);
- DBtoSTORM.put("D", TFileStorageType.DURABLE);
- String aux;
- for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) {
- aux = i.next();
- STORMtoDB.put(DBtoSTORM.get(aux), aux);
- }
- }
-
- /**
- * Method that returns the only instance of FileStorageTypeConverter.
- */
- public static FileStorageTypeConverter getInstance() {
-
- return c;
- }
-
- /**
- * Method that returns the String used in the DB to represent the given
- * TFileStorageType. The empty String "" is returned if no match is found.
- */
- public String toDB(TFileStorageType fst) {
-
- String aux = (String) STORMtoDB.get(fst);
- if (aux == null)
- return "";
- return aux;
- }
-
- /**
- * Method that returns the TFileStorageType used by StoRM to represent the
- * supplied String representation in the DB. A configured default
- * TFileStorageType is returned in case no corresponding StoRM type is found.
- * TFileStorageType.EMPTY is returned if there are configuration errors.
- */
- public TFileStorageType toSTORM(String s) {
-
- TFileStorageType aux = DBtoSTORM.get(s);
- if (aux == null)
- // This case is that the String s is different from V,P or D.
- aux = DBtoSTORM.get(Configuration.getInstance()
- .getDefaultFileStorageType());
- if (aux == null)
- // This case should never happen, but in case we prefer ponder PERMANENT.
- return TFileStorageType.EMPTY;
- else
- return aux;
- }
-
- public String toString() {
-
- return "FileStorageTypeConverter.\nDBtoSTORM map:" + DBtoSTORM
- + "\nSTORMtoDB map:" + STORMtoDB;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java
deleted file mode 100644
index 6046d423d..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of BoLChunkData are invalid, that is if any of the following is
- * _null_: requestToken, fromSURL, lifeTime, numOfLevels, transferProtocols,
- * fileSize, status, transferURL.
- *
- * @author CNAF
- * @date Aug 2009
- * @version 1.0
- */
-public class InvalidBoLChunkDataAttributesException extends Exception {
-
- private static final long serialVersionUID = 5657310881067434280L;
-
- // booleans that indicate whether the corresponding variable is null
- private boolean nullRequestToken;
- private boolean nullFromSURL;
- private boolean nullLifeTime;
- private boolean nullDirOption;
- private boolean nullTransferProtocols;
- private boolean nullFileSize;
- private boolean nullStatus;
- private boolean nullTransferURL;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidBoLChunkDataAttributesException(TRequestToken requestToken,
- TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL) {
-
- nullRequestToken = requestToken == null;
- nullFromSURL = fromSURL == null;
- nullLifeTime = lifeTime == null;
- nullDirOption = dirOption == null;
- nullTransferProtocols = transferProtocols == null;
- nullFileSize = fileSize == null;
- nullStatus = status == null;
- nullTransferURL = transferURL == null;
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("Invalid BoLChunkData attributes: null-requestToken=");
- sb.append(nullRequestToken);
- sb.append("; nul-fromSURL=");
- sb.append(nullFromSURL);
- sb.append("; null-lifeTime=");
- sb.append(nullLifeTime);
- sb.append("; null-dirOption=");
- sb.append(nullDirOption);
- sb.append("; null-transferProtocols=");
- sb.append(nullTransferProtocols);
- sb.append("; null-fileSize=");
- sb.append(nullFileSize);
- sb.append("; null-status=");
- sb.append(nullStatus);
- sb.append("; null-transferURL=");
- sb.append(nullTransferURL);
- sb.append(".");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java
deleted file mode 100644
index 86d657c75..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of CopyChunkData are invalid, that is if any of the following is
- * _null_: requestToken, fromsURL, toSURL, lifetime, fileStorageType,
- * spaceToken, overwriteOption, status.
- *
- * @author EGRID - ICTP Trieste
- * @date September, 2005
- * @version 2.0
- */
-public class InvalidCopyChunkDataAttributesException extends Exception {
-
- private static final long serialVersionUID = 6786154038995023512L;
-
- // booleans that indicate whether the corresponding variable is null
- private boolean nullRequestToken;
- private boolean nullFromSURL;
- private boolean nullToSURL;
- private boolean nullLifetime;
- private boolean nullFileStorageType;
- private boolean nullSpaceToken;
- private boolean nullOverwriteOption;
- private boolean nullStatus;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidCopyChunkDataAttributesException(TRequestToken requestToken,
- TSURL fromSURL, TSURL toSURL, TLifeTimeInSeconds lifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TOverwriteMode overwriteOption, TReturnStatus status) {
-
- nullRequestToken = requestToken == null;
- nullFromSURL = fromSURL == null;
- nullToSURL = toSURL == null;
- nullLifetime = lifetime == null;
- nullFileStorageType = fileStorageType == null;
- nullSpaceToken = spaceToken == null;
- nullOverwriteOption = overwriteOption == null;
- nullStatus = status == null;
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("Invalid CopyChunkData attributes: null-requestToken=");
- sb.append(nullRequestToken);
- sb.append("; null-fromSURL=");
- sb.append(nullFromSURL);
- sb.append("; null-toSURL=");
- sb.append(nullToSURL);
- sb.append("; null-lifetime=");
- sb.append(nullLifetime);
- sb.append("; null-filestorageType=");
- sb.append(nullFileStorageType);
- sb.append("; null-spaceToken=");
- sb.append(nullSpaceToken);
- sb.append("; null-overwriteOption=");
- sb.append(nullOverwriteOption);
- sb.append("; null-status=");
- sb.append(nullStatus);
- sb.append(".");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java
deleted file mode 100644
index c31ed841a..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-public class InvalidCopyDataAttributesException extends
- InvalidSurlRequestDataAttributesException {
-
- private static final long serialVersionUID = -1217486426437414490L;
- protected boolean nullDestinationSURL;
- protected boolean nullLifetime;
- protected boolean nullFileStorageType;
- protected boolean nullSpaceToken;
- protected boolean nullOverwriteOption;
-
- public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) {
-
- super(SURL, status);
- init(destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption);
- }
-
- public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, String message) {
-
- super(SURL, status, message);
- init(destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption);
- }
-
- public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, Throwable cause) {
-
- super(SURL, status, cause);
- init(destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption);
- }
-
- public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, String message, Throwable cause) {
-
- super(SURL, status, message, cause);
- init(destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption);
- }
-
- private void init(TSURL destinationSURL, TLifeTimeInSeconds lifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TOverwriteMode overwriteOption) {
-
- nullDestinationSURL = destinationSURL == null;
- nullLifetime = lifetime == null;
- nullFileStorageType = fileStorageType == null;
- nullSpaceToken = spaceToken == null;
- nullOverwriteOption = overwriteOption == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder.append("InvalidCopyDataAttributesException [nullDestinationSURL=");
- builder.append(nullDestinationSURL);
- builder.append(", nullLifetime=");
- builder.append(nullLifetime);
- builder.append(", nullFileStorageType=");
- builder.append(nullFileStorageType);
- builder.append(", nullSpaceToken=");
- builder.append(nullSpaceToken);
- builder.append(", nullOverwriteOption=");
- builder.append(nullOverwriteOption);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java
deleted file mode 100644
index 4259b4db2..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-public class InvalidCopyPersistentChunkDataAttributesException extends
- InvalidCopyDataAttributesException {
-
- /**
- *
- */
- private static final long serialVersionUID = 1266996505954208061L;
- private boolean nullRequestToken;
-
- public InvalidCopyPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) {
-
- super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status);
- init(requestToken);
- }
-
- public InvalidCopyPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, String message) {
-
- super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status, message);
- init(requestToken);
- }
-
- public InvalidCopyPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, Throwable cause) {
-
- super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status, cause);
- init(requestToken);
- }
-
- public InvalidCopyPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, String message, Throwable cause) {
-
- super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status, message, cause);
- init(requestToken);
- }
-
- private void init(TRequestToken requestToken) {
-
- nullRequestToken = requestToken == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder
- .append("InvalidCopyPersistentChunkDataAttributesException [nullRequestToken=");
- builder.append(nullRequestToken);
- builder.append(", nullDestinationSURL=");
- builder.append(nullDestinationSURL);
- builder.append(", nullLifetime=");
- builder.append(nullLifetime);
- builder.append(", nullFileStorageType=");
- builder.append(nullFileStorageType);
- builder.append(", nullSpaceToken=");
- builder.append(nullSpaceToken);
- builder.append(", nullOverwriteOption=");
- builder.append(nullOverwriteOption);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java
deleted file mode 100644
index 65235db54..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TReturnStatus;
-
-/**
- * This class represents an exception thrown when the attributes supplied to the
- * constructor of ReducedCopyChunkData are invalid, that is if any of the
- * following is _null_: fromsURL, toSURL, status.
- *
- * @author Michele Dibenedetto
- */
-@SuppressWarnings("serial")
-public class InvalidReducedCopyChunkDataAttributesException extends Exception {
-
- // booleans that indicate whether the corresponding variable is null
- private boolean nullFromSURL;
- private boolean nullToSURL;
- private boolean nullStatus;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidReducedCopyChunkDataAttributesException(TSURL fromSURL,
- TSURL toSURL, TReturnStatus status) {
-
- nullFromSURL = fromSURL == null;
- nullToSURL = toSURL == null;
- nullStatus = status == null;
- }
-
- @Override
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("Invalid CopyChunkData attributes: null-requestToken=");
- sb.append("; null-fromSURL=");
- sb.append(nullFromSURL);
- sb.append("; null-toSURL=");
- sb.append(nullToSURL);
- sb.append("; null-status=");
- sb.append(nullStatus);
- sb.append(".");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java b/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java
deleted file mode 100644
index ddce2846c..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-/**
- * Class that represents an Exception thrown by the ReservedSpaceCatalog when it
- * is asked to retrieve info from the persistence but the raw data is invalid
- * and does not allow a well-formed domain obejcts to be created.
- *
- * @author: EGRID ICTP
- * @version: 1.0
- * @date: June 2005
- */
-public class InvalidRetrievedDataException extends Exception {
-
- private static final long serialVersionUID = -3645913441787012438L;
-
- private String requestToken;
- private String requestType;
- private int totalFilesInThisRequest;
- private int numOfQueuedRequests;
- private int numOfProgressing;
- private int numFinished;
- private boolean isSuspended;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidRetrievedDataException(String requestToken, String requestType,
- int totalFilesInThisRequest, int numOfQueuedRequests,
- int numOfProgressingRequests, int numFinished, boolean isSuspended) {
-
- this.requestToken = requestToken;
- this.requestType = requestType;
- this.totalFilesInThisRequest = totalFilesInThisRequest;
- this.numOfQueuedRequests = numOfQueuedRequests;
- this.numOfProgressing = numOfProgressingRequests;
- this.numFinished = numFinished;
- this.isSuspended = isSuspended;
- }
-
- public String toString() {
-
- return "InvalidRetrievedDataException: token=" + requestToken + " type="
- + requestType + " total-files=" + totalFilesInThisRequest + " queued="
- + numOfQueuedRequests + " progressing=" + numOfProgressing + " finished="
- + numFinished + " isSusp=" + isSuspended;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/JiTData.java b/src/main/java/it/grid/storm/catalogs/JiTData.java
deleted file mode 100644
index 4ef357735..000000000
--- a/src/main/java/it/grid/storm/catalogs/JiTData.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-/**
- * Class that represents data associated to JiT entries. It contains a String
- * representing the file, an int representing the ACL, an int representing the
- * user UID, an int representing the user GID.
- *
- * @author EGRID - ICTP Trieste
- * @version 1.0
- * @date November 2006
- */
-public class JiTData {
-
- private String file = "";
- private int uid = -1;
- private int gid = -1;
- private int acl = -1;
-
- /**
- * Constructor requiring the complete name of the file as String, the acl as
- * int, the uid and primary gid of the LocalUser bith as int.
- */
- public JiTData(String file, int acl, int uid, int gid) {
-
- this.file = file;
- this.acl = acl;
- this.uid = uid;
- this.gid = gid;
- }
-
- public String pfn() {
-
- return file;
- }
-
- public int acl() {
-
- return acl;
- }
-
- public int uid() {
-
- return uid;
- }
-
- public int gid() {
-
- return gid;
- }
-
- public String toString() {
-
- return "file=" + file + " acl=" + acl + " uid=" + uid + " gid=" + gid;
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java b/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java
deleted file mode 100644
index fa03e3c3c..000000000
--- a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-
-/**
- * Class that represents an Exception thrown by the ReservedSpaceCatalog when it
- * finds more than one row of data for the specified request.
- *
- * @author: EGRID ICTP
- * @version: 1.0
- * @date: June 2005
- */
-public class MultipleDataEntriesException extends Exception {
-
- private static final long serialVersionUID = 427636739469695868L;
-
- private TRequestToken requestToken;
-
- /**
- * Constructor tha trequires the attributes that caused the exception to be
- * thrown.
- */
- public MultipleDataEntriesException(TRequestToken requestToken) {
-
- this.requestToken = requestToken;
- }
-
- public String toString() {
-
- return "MultipleDataEntriesException: requestToken=" + requestToken;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java b/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java
deleted file mode 100644
index 548f0df9f..000000000
--- a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-
-/**
- * Class that represents an Exception thrown by the ReservedSpaceCatalog when it
- * finds no data for the specified request.
- *
- * @author: EGRID ICTP
- * @version: 1.0
- * @date: June 2005
- */
-public class NoDataFoundException extends Exception {
-
- private static final long serialVersionUID = -718255813130266566L;
-
- private TRequestToken requestToken;
-
- /**
- * Constructor tha trequires the attributes that caused the exception to be
- * thrown.
- */
- public NoDataFoundException(TRequestToken requestToken) {
-
- this.requestToken = requestToken;
- }
-
- public String toString() {
-
- return "NoDataFoundException: requestToken=" + requestToken;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java b/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java
deleted file mode 100644
index 45ba54d1e..000000000
--- a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.config.Configuration;
-
-/**
- * Package private auxiliary class used to convert between DB and StoRM object
- * model representation of TOverwriteMode.
- *
- * @author: EGRID ICTP
- * @version: 2.0
- * @date: June 2005
- */
-public class OverwriteModeConverter {
-
- private Map DBtoSTORM = new HashMap();
- private Map STORMtoDB = new HashMap();
-
- private static OverwriteModeConverter c = new OverwriteModeConverter();
-
- /**
- * Private constructor that fills in the conversion table; in particular, DB
- * uses String values to represent TOverwriteMode:
- *
- * N NEVER A ALWAYS D WHENFILESAREDIFFERENT
- */
- private OverwriteModeConverter() {
-
- DBtoSTORM.put("N", TOverwriteMode.NEVER);
- DBtoSTORM.put("A", TOverwriteMode.ALWAYS);
- DBtoSTORM.put("D", TOverwriteMode.WHENFILESAREDIFFERENT);
- Object aux;
- for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) {
- aux = i.next();
- STORMtoDB.put(DBtoSTORM.get(aux), aux);
- }
- }
-
- /**
- * Method that returns the only instance of OverwriteModeConverter.
- */
- public static OverwriteModeConverter getInstance() {
-
- return c;
- }
-
- /**
- * Method that returns the int used by DPM to represent the given
- * TOverwriteMode. "" is returned if no match is found.
- */
- public String toDB(TOverwriteMode om) {
-
- String aux = (String) STORMtoDB.get(om);
- if (aux == null)
- return "";
- return aux;
- }
-
- /**
- * Method that returns the TOverwriteMode used by StoRM to represent the
- * supplied String representation of DPM. A configured default TOverwriteMode
- * is returned in case no corresponding StoRM type is found.
- * TOverwriteMode.EMPTY is returned if there are configuration errors.
- */
- public TOverwriteMode toSTORM(String s) {
-
- TOverwriteMode aux = (TOverwriteMode) DBtoSTORM.get(s);
- if (aux == null)
- aux = (TOverwriteMode) DBtoSTORM.get(Configuration.getInstance()
- .getDefaultOverwriteMode());
- if (aux == null)
- return TOverwriteMode.EMPTY;
- else
- return aux;
- }
-
- public String toString() {
-
- return "OverWriteModeConverter.\nDBtoSTORM map:" + DBtoSTORM
- + "\nSTORMtoDB map:" + STORMtoDB;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java
deleted file mode 100644
index 8a111afda..000000000
--- a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.config.Configuration;
-
-/**
- * Class that handles DB representation of a TLifetimeInSeconds, in particular
- * it takes care of protocol specification:
- *
- * 0/null/negative are translated as default StoRM configurable values. StoRMs
- * Empty TLifeTimeInSeconds is translated as 0.
- *
- * @author EGRID ICTP
- * @version 1.0
- * @date March 2007
- */
-public class PinLifetimeConverter {
-
- private static PinLifetimeConverter stc = new PinLifetimeConverter(); // only
- // instance
-
- private PinLifetimeConverter() {
-
- }
-
- /**
- * Method that returns the only instance of SizeInBytesIntConverter
- */
- public static PinLifetimeConverter getInstance() {
-
- return stc;
- }
-
- /**
- * Method that translates the Empty TLifeTimeInSeconds into the empty
- * representation of DB which is 0. Any other value is left as is.
- */
- public int toDB(long l) {
-
- if (l == TLifeTimeInSeconds.makeEmpty().value())
- return 0;
- return new Long(l).intValue();
- }
-
- /**
- * Method that returns the long corresponding to the int value in the DB,
- * except if it is 0, NULL or negative; a configurable default value is
- * returned instead, corresponding to the getPinLifetimeMinimum()
- * Configuration class method.
- */
- public long toStoRM(int s) {
-
- if (s == 0) {
- return Configuration.getInstance().getPinLifetimeDefault();
- } else if (s < 0) {
- // The default is used also as a Minimum
- return Configuration.getInstance().getPinLifetimeDefault();
- }
- return new Integer(s).longValue();
- }
-
- public long toStoRM(long s) {
-
- if (s == 0) {
- return Configuration.getInstance().getPinLifetimeDefault();
- } else if (s < 0) {
- // The default is used also as a Minimum
- return Configuration.getInstance().getPinLifetimeDefault();
- }
- return s;
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java
index 31723b38c..8f25ccd8a 100644
--- a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java
+++ b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java
@@ -4,18 +4,33 @@
*/
package it.grid.storm.catalogs;
-import it.grid.storm.common.types.SizeUnit;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import it.grid.storm.common.types.TURLPrefix;
import it.grid.storm.common.types.TimeUnit;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
import it.grid.storm.griduser.AbstractGridUser;
import it.grid.storm.griduser.GridUserInterface;
import it.grid.storm.griduser.GridUserManager;
+import it.grid.storm.persistence.converter.PinLifetimeConverter;
+import it.grid.storm.persistence.converter.StatusCodeConverter;
+import it.grid.storm.persistence.converter.TURLConverter;
+import it.grid.storm.persistence.converter.TransferProtocolListConverter;
+import it.grid.storm.persistence.dao.PtGChunkDAO;
+import it.grid.storm.persistence.exceptions.InvalidReducedPtGChunkDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.impl.mysql.PtGChunkDAOMySql;
+import it.grid.storm.persistence.model.PtGChunkDataTO;
+import it.grid.storm.persistence.model.PtGPersistentChunkData;
+import it.grid.storm.persistence.model.ReducedPtGChunkData;
+import it.grid.storm.persistence.model.ReducedPtGChunkDataTO;
import it.grid.storm.srm.types.InvalidTDirOptionAttributesException;
-import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException;
import it.grid.storm.srm.types.InvalidTSURLAttributesException;
import it.grid.storm.srm.types.InvalidTSizeAttributesException;
-import it.grid.storm.srm.types.InvalidTTURLAttributesException;
import it.grid.storm.srm.types.TDirOption;
import it.grid.storm.srm.types.TLifeTimeInSeconds;
import it.grid.storm.srm.types.TRequestToken;
@@ -25,820 +40,340 @@
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.srm.types.TTURL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class that represents StoRMs PtGChunkCatalog: it collects PtGChunkData and
- * provides methods for looking up a PtGChunkData based on TRequestToken, as
- * well as for adding a new entry and removing an existing one.
- *
- * @author EGRID - ICTP Trieste
- * @date April 26th, 2005
- * @version 4.0
- */
-@SuppressWarnings("unused")
public class PtGChunkCatalog {
- private static final Logger log = LoggerFactory
- .getLogger(PtGChunkCatalog.class);
-
- /* Only instance of PtGChunkCatalog present in StoRM! */
- private static final PtGChunkCatalog cat = new PtGChunkCatalog();
- private final PtGChunkDAO dao = PtGChunkDAO.getInstance();
-
- /*
- * Timer object in charge of transiting expired requests from SRM_FILE_PINNED
- * to SRM_RELEASED!
- */
- private final Timer transiter = new Timer();
- /* Delay time before starting cleaning thread! */
- private final long delay = Configuration.getInstance()
- .getTransitInitialDelay() * 1000;
- /* Period of execution of cleaning! */
- private final long period = Configuration.getInstance()
- .getTransitTimeInterval() * 1000;
-
- /**
- * Private constructor that starts the internal timer needed to periodically
- * check and transit requests whose pinLifetime has expired and are in
- * SRM_FILE_PINNED, to SRM_RELEASED.
- */
- private PtGChunkCatalog() {
-
- TimerTask transitTask = new TimerTask() {
-
- @Override
- public void run() {
-
- transitExpiredSRM_FILE_PINNED();
- }
- };
- transiter.scheduleAtFixedRate(transitTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of PtGChunkCatalog available.
- */
- public static PtGChunkCatalog getInstance() {
-
- return cat;
- }
-
- /**
- * Method used to update into Persistence a retrieved PtGChunkData. In case
- * any error occurs, the operation does not proceed but no Exception is
- * thrown. Error messages get logged.
- *
- * Only fileSize, StatusCode, errString and transferURL are updated. Likewise
- * for the request pinLifetime.
- */
- synchronized public void update(PtGPersistentChunkData chunkData) {
-
- PtGChunkDataTO to = new PtGChunkDataTO();
- /* Primary key needed by DAO Object */
- to.setPrimaryKey(chunkData.getPrimaryKey());
- to.setFileSize(chunkData.getFileSize().value());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setTurl(TURLConverter.getInstance().toDB(
- chunkData.getTransferURL().toString()));
- to.setLifeTime(PinLifetimeConverter.getInstance().toDB(
- chunkData.getPinLifeTime().value()));
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
- to.setClientDN(chunkData.getUser().getDn());
- if (chunkData.getUser() instanceof AbstractGridUser) {
- if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
- to.setVomsAttributes(((AbstractGridUser) chunkData.getUser())
- .getFQANsAsString());
- }
-
- }
- dao.update(to);
- }
-
- /**
- * Refresh method. THIS IS A WORK IN PROGRESS!!!! This method have to synch
- * the ChunkData information with the database status intended as the status
- * code and the TURL
- *
- * @param auxTO
- * @param PtGChunkData
- * inputChunk
- * @return PtGChunkData outputChunk
- */
- synchronized public PtGPersistentChunkData refreshStatus(
- PtGPersistentChunkData inputChunk) {
-
- PtGChunkDataTO chunkDataTO = dao.refresh(inputChunk.getPrimaryKey());
-
- log.debug("PtG CHUNK CATALOG: retrieved data " + chunkDataTO);
- if (chunkDataTO == null) {
- log.warn("PtG CHUNK CATALOG! Empty TO found in persistence for specified "
- + "request: {}", inputChunk.getPrimaryKey());
- return inputChunk;
- }
-
- /*
- * In this first version the only field updated is the Status. Once
- * updated, the new status is rewritten into the input ChunkData
- */
-
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status());
- if (code != TStatusCode.EMPTY) {
- status = new TReturnStatus(code, chunkDataTO.errString());
- }
- inputChunk.setStatus(status);
- TTURL turl = null;
- try {
- turl = TTURL.makeFromString(chunkDataTO.turl());
- } catch (InvalidTTURLAttributesException e) {
- log.info("PtGChunkCatalog (FALSE-ERROR-in-abort-refresh-status?):"
- + " built a TURL with protocol NULL (retrieved from the DB..)");
- }
- inputChunk.setTransferURL(turl);
- return inputChunk;
- }
-
- /**
- * Method that returns a Collection of PtGChunkData Objects matching the
- * supplied TRequestToken.
- *
- * If any of the data associated to the TRequestToken is not well formed and
- * so does not allow a PtGChunkData Object to be created, then that part of
- * the request is dropped and gets logged, and the processing continues with
- * the next part. All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks to process then an empty Collection is returned, and
- * a messagge gets logged.
- */
- synchronized public Collection lookup(TRequestToken rt) {
-
- Collection chunkTOs = dao.find(rt);
- log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs);
- ArrayList list = new ArrayList();
- if (chunkTOs.isEmpty()) {
- log.warn("PtG CHUNK CATALOG! No chunks found in persistence for "
- + "specified request: {}", rt);
- return list;
- }
- PtGPersistentChunkData chunk;
- for (PtGChunkDataTO chunkTO : chunkTOs) {
- chunk = makeOne(chunkTO, rt);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(this.completeTO(chunkTO, chunk));
- } catch (InvalidReducedPtGChunkDataAttributesException e) {
- log.warn("PtG CHUNK CATALOG! unable to add missing informations on DB "
- + "to the request: {}", e.getMessage());
- }
- }
- log.debug("PtG CHUNK CATALOG: returning " + list);
- return list;
- }
-
- /**
- * Generates a PtGChunkData from the received PtGChunkDataTO
- *
- * @param chunkDataTO
- * @param rt
- * @return
- */
- private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO,
- TRequestToken rt) {
-
- StringBuilder errorSb = new StringBuilder();
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (chunkDataTO.normalizedStFN() != null) {
- fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN());
- }
- if (chunkDataTO.surlUniqueID() != null) {
- fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue());
- }
- // lifeTime
- TLifeTimeInSeconds lifeTime = null;
- try {
- long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(
- chunkDataTO.lifeTime());
- // Check for max value allowed
- long max = Configuration.getInstance().getPinLifetimeMaximum();
- if (pinLifeTime > max) {
- log.warn("PinLifeTime is greater than the max value allowed."
- + " Drop the value to the max = {} seconds", max);
- pinLifeTime = max;
- }
- lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // dirOption
- TDirOption dirOption = null;
- try {
- dirOption = new TDirOption(chunkDataTO.dirOption(),
- chunkDataTO.allLevelRecursive(), chunkDataTO.numLevel());
- } catch (InvalidTDirOptionAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // transferProtocols
- TURLPrefix transferProtocols = TransferProtocolListConverter
- .toSTORM(chunkDataTO.protocolList());
- if (transferProtocols.size() == 0) {
- errorSb.append("\nEmpty list of TransferProtocols or could "
- + "not translate TransferProtocols!");
- /* fail construction of PtGChunkData! */
- transferProtocols = null;
- }
- // fileSize
- TSizeInBytes fileSize = null;
- try {
- fileSize = TSizeInBytes.make(chunkDataTO.fileSize(), SizeUnit.BYTES);
- } catch (InvalidTSizeAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- chunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + chunkDataTO.status());
- } else {
- status = new TReturnStatus(code, chunkDataTO.errString());
- }
- GridUserInterface gridUser = null;
- try {
- if (chunkDataTO.vomsAttributes() != null
- && !chunkDataTO.vomsAttributes().trim().equals("")) {
- gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(),
- chunkDataTO.vomsAttributesArray());
- } else {
- gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN());
- }
-
- } catch (IllegalArgumentException e) {
- log.error("Unexpected error on voms grid user creation."
- + " IllegalArgumentException: {}", e.getMessage(), e);
- }
- // transferURL
- /*
- * whatever is read is just meaningless because PtG will fill it in!!! So
- * create an Empty TTURL by default! Vital to avoid problems with unknown
- * DPM NULL/EMPTY logic policy!
- */
- TTURL transferURL = TTURL.makeEmpty();
- // make PtGChunkData
- PtGPersistentChunkData aux = null;
- try {
- aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime,
- dirOption, transferProtocols, fileSize, status, transferURL);
- aux.setPrimaryKey(chunkDataTO.primaryKey());
- } catch (InvalidSurlRequestDataAttributesException e) {
- dao.signalMalformedPtGChunk(chunkDataTO);
- log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from "
- + "persistence. Dropping chunk from request {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- *
- * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique
- * ID taken from the PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- */
- private void completeTO(ReducedPtGChunkDataTO chunkTO,
- final ReducedPtGChunkData chunk) {
-
- chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN());
- chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId()));
- }
-
- /**
- *
- * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and
- * completes it with the normalized StFN and the SURL unique ID taken from the
- * PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- * @return
- * @throws InvalidReducedPtGChunkDataAttributesException
- */
- private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO,
- final PtGPersistentChunkData chunk)
- throws InvalidReducedPtGChunkDataAttributesException {
-
- ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO);
- this.completeTO(reducedChunkTO, this.reduce(chunk));
- return reducedChunkTO;
- }
-
- /**
- * Creates a ReducedPtGChunkData from the data contained in the received
- * PtGChunkData
- *
- * @param chunk
- * @return
- * @throws InvalidReducedPtGChunkDataAttributesException
- */
- private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk)
- throws InvalidReducedPtGChunkDataAttributesException {
-
- ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(),
- chunk.getStatus());
- reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
- return reducedChunk;
- }
-
- /**
- * Creates a ReducedPtGChunkDataTO from the data contained in the received
- * PtGChunkDataTO
- *
- * @param chunkTO
- * @return
- */
- private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) {
-
- ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO();
- reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
- reducedChunkTO.setFromSURL(chunkTO.fromSURL());
- reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
- reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID());
- reducedChunkTO.setStatus(chunkTO.status());
- reducedChunkTO.setErrString(chunkTO.errString());
- return reducedChunkTO;
- }
-
- /**
- * Checks if the received PtGChunkDataTO contains the fields not set by the
- * front end but required
- *
- * @param chunkTO
- * @return
- */
- private boolean isComplete(PtGChunkDataTO chunkTO) {
-
- return (chunkTO.normalizedStFN() != null)
- && (chunkTO.surlUniqueID() != null);
- }
-
- /**
- * Checks if the received ReducedPtGChunkDataTO contains the fields not set by
- * the front end but required
- *
- * @param reducedChunkTO
- * @return
- */
- private boolean isComplete(ReducedPtGChunkDataTO reducedChunkTO) {
-
- return (reducedChunkTO.normalizedStFN() != null)
- && (reducedChunkTO.surlUniqueID() != null);
- }
-
- /**
- * Method that returns a Collection of ReducedPtGChunkData Objects associated
- * to the supplied TRequestToken.
- *
- * If any of the data retrieved for a given chunk is not well formed and so
- * does not allow a ReducedPtGChunkData Object to be created, then that chunk
- * is dropped and gets logged, while processing continues with the next one.
- * All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks associated to the given TRequestToken, then an empty
- * Collection is returned and a message gets logged.
- */
- synchronized public Collection lookupReducedPtGChunkData(
- TRequestToken rt) {
-
- Collection reducedChunkDataTOs = dao.findReduced(rt
- .getValue());
- log.debug("PtG CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs);
- ArrayList list = new ArrayList();
- if (reducedChunkDataTOs.isEmpty()) {
- log.debug("PtG CHUNK CATALOG! No chunks found in persistence for {}", rt);
- } else {
- ReducedPtGChunkData reducedChunkData = null;
- for (ReducedPtGChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!this.isComplete(reducedChunkDataTO)) {
- this.completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("PtG CHUNK CATALOG: returning {}", list);
- }
- return list;
- }
-
- public Collection lookupReducedPtGChunkData(
- TRequestToken requestToken, Collection surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.findReduced(
- requestToken, surlsUniqueIDs, surlsArray);
- log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildReducedChunkDataList(chunkDataTOCollection);
- }
-
- public Collection lookupPtGChunkData(TSURL surl,
- GridUserInterface user) {
-
- return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl }), user);
- }
-
- public Collection lookupPtGChunkData(TSURL surl) {
-
- return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl }));
- }
-
- public Collection lookupPtGChunkData(
- List surls, GridUserInterface user) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.find(surlsUniqueIDs,
- surlsArray, user.getDn());
- log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildChunkDataList(chunkDataTOCollection);
- }
-
- public Collection lookupPtGChunkData(List surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.find(surlsUniqueIDs,
- surlsArray);
- log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildChunkDataList(chunkDataTOCollection);
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOCollection) {
-
- ArrayList list = new ArrayList();
- PtGPersistentChunkData chunk;
- for (PtGChunkDataTO chunkTO : chunkDataTOCollection) {
- chunk = makeOne(chunkTO);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(this.completeTO(chunkTO, chunk));
- } catch (InvalidReducedPtGChunkDataAttributesException e) {
- log.warn("PtG CHUNK CATALOG! unable to add missing informations on "
- + "DB to the request: ", e.getMessage());
- }
- }
- return list;
- }
-
- private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkTO) {
-
- try {
- return makeOne(chunkTO,
- new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp()));
- } catch (InvalidTRequestTokenAttributesException e) {
- throw new IllegalStateException(
- "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: "
- + e);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedPtGChunkData Objects matching
- * the supplied GridUser and Collection of TSURLs. If any of the data
- * retrieved for a given chunk is not well formed and so does not allow a
- * ReducedPtGChunkData Object to be created, then that chunk is dropped and
- * gets logged, while processing continues with the next one. All valid chunks
- * get returned: the others get dropped. If there are no chunks associated to
- * the given GridUser and Collection of TSURLs, then an empty Collection is
- * returned and a message gets logged.
- */
- synchronized public Collection lookupReducedPtGChunkData(
- GridUserInterface gu, Collection tsurlCollection) {
-
- int[] surlsUniqueIDs = new int[tsurlCollection.size()];
- String[] surls = new String[tsurlCollection.size()];
- int index = 0;
- for (TSURL tsurl : tsurlCollection) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.findReduced(
- gu.getDn(), surlsUniqueIDs, surls);
- log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildReducedChunkDataList(chunkDataTOCollection);
- }
-
- private Collection buildReducedChunkDataList(
- Collection chunkDataTOCollection) {
-
- ArrayList list = new ArrayList();
- ReducedPtGChunkData reducedChunkData;
- for (ReducedPtGChunkDataTO reducedChunkDataTO : chunkDataTOCollection) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!isComplete(reducedChunkDataTO)) {
- completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("PtG CHUNK CATALOG: returning {}",list);
- return list;
- }
-
- /**
- *
- *
- * @param reducedChunkDataTO
- * @return
- */
- private ReducedPtGChunkData makeOneReduced(
- ReducedPtGChunkDataTO reducedChunkDataTO) {
-
- StringBuilder errorSb = new StringBuilder();
- // fromSURL
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (reducedChunkDataTO.normalizedStFN() != null) {
- fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN());
- }
- if (reducedChunkDataTO.surlUniqueID() != null) {
- fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue());
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- reducedChunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + reducedChunkDataTO.status());
- } else {
- status = new TReturnStatus(code, reducedChunkDataTO.errString());
- }
- // make ReducedPtGChunkData
- ReducedPtGChunkData aux = null;
- try {
- aux = new ReducedPtGChunkData(fromSURL, status);
- aux.setPrimaryKey(reducedChunkDataTO.primaryKey());
- } catch (InvalidReducedPtGChunkDataAttributesException e) {
- log.warn("PtG CHUNK CATALOG! Retrieved malformed Reduced PtG chunk "
- + "data from persistence: dropping reduced chunk...");
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- * Method used to add into Persistence a new entry. The supplied PtGChunkData
- * gets the primary key changed to the value assigned in Persistence.
- *
- * This method is intended to be used by a recursive PtG request: the parent
- * request supplies a directory which must be expanded, so all new children
- * requests resulting from the files in the directory are added into
- * persistence.
- *
- * So this method does _not_ add a new SRM prepare_to_get request into the DB!
- *
- * The only children data written into the DB are: sourceSURL, TDirOption,
- * statusCode and explanation.
- *
- * In case of any error the operation does not proceed, but no Exception is
- * thrown! Proper messages get logged by underlaying DAO.
- */
- synchronized public void addChild(PtGPersistentChunkData chunkData) {
-
- PtGChunkDataTO to = new PtGChunkDataTO();
- /* needed for now to find ID of request! Must be changed soon! */
- to.setRequestToken(chunkData.getRequestToken().toString());
- to.setFromSURL(chunkData.getSURL().toString());
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
-
- to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
- to.setDirOption(chunkData.getDirOption().isDirectory());
- to.setNumLevel(chunkData.getDirOption().getNumLevel());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setClientDN(chunkData.getUser().getDn());
- if (chunkData.getUser() instanceof AbstractGridUser) {
- if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
- to.setVomsAttributes(((AbstractGridUser) chunkData.getUser())
- .getFQANsAsString());
- }
-
- }
- /* add the entry and update the Primary Key field! */
- dao.addChild(to);
- /* set the assigned PrimaryKey! */
- chunkData.setPrimaryKey(to.primaryKey());
- }
-
- /**
- * Method used to add into Persistence a new entry. The supplied PtGChunkData
- * gets the primary key changed to the value assigned in the Persistence. The
- * method requires the GridUser to whom associate the added request.
- *
- * This method is intended to be used by an srmCopy request in push mode which
- * implies a local srmPtG. The only fields from PtGChunkData that are
- * considered are: the requestToken, the sourceSURL, the pinLifetime, the
- * dirOption, the protocolList, the status and error string.
- *
- * So this method _adds_ a new SRM prepare_to_get request into the DB!
- *
- * In case of any error the operation does not proceed, but no Exception is
- * thrown! The underlaying DAO logs proper error messagges.
- */
- synchronized public void add(PtGPersistentChunkData chunkData,
- GridUserInterface gu) {
-
- PtGChunkDataTO to = new PtGChunkDataTO();
- to.setRequestToken(chunkData.getRequestToken().toString());
- to.setFromSURL(chunkData.getSURL().toString());
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
-
- to.setLifeTime(new Long(chunkData.getPinLifeTime().value()).intValue());
- to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
- to.setDirOption(chunkData.getDirOption().isDirectory());
- to.setNumLevel(chunkData.getDirOption().getNumLevel());
- to.setProtocolList(TransferProtocolListConverter.toDB(chunkData
- .getTransferProtocols()));
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
-
- to.setClientDN(chunkData.getUser().getDn());
- if (chunkData.getUser() instanceof AbstractGridUser) {
- if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
- to.setVomsAttributes(((AbstractGridUser) chunkData.getUser())
- .getFQANsAsString());
- }
-
- }
-
- dao.addNew(to, gu.getDn()); // add the entry and update the Primary Key
- // field!
- chunkData.setPrimaryKey(to.primaryKey()); // set the assigned PrimaryKey!
- }
-
- /**
- * Method used to establish if in Persistence there is a PtGChunkData working
- * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case
- * true is returned. In case none are found or there is any problem, false is
- * returned. This method is intended to be used by srmMv.
- */
- synchronized public boolean isSRM_FILE_PINNED(TSURL surl) {
-
- return (dao.numberInSRM_FILE_PINNED(surl.uniqueId()) > 0);
-
- }
-
- /**
- * Method used to transit the specified Collection of ReducedPtGChunkData from
- * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not
- * transited. In case of any error nothing is done, but proper error messages
- * get logged by the underlaying DAO.
- */
- synchronized public void transitSRM_FILE_PINNEDtoSRM_RELEASED(
- Collection chunks, TRequestToken token) {
-
- if (chunks == null || chunks.isEmpty()) {
- return;
- }
- long[] primaryKeys = new long[chunks.size()];
- int index = 0;
- for (ReducedPtGChunkData chunkData : chunks) {
- if (chunkData != null) {
- primaryKeys[index] = chunkData.primaryKey();
- index++;
- }
-
- }
- dao.transitSRM_FILE_PINNEDtoSRM_RELEASED(primaryKeys, token);
- for (ReducedPtGChunkData chunkData : chunks) {
- if (chunkData != null) {
- primaryKeys[index] = chunkData.primaryKey();
- index++;
- }
- }
- }
-
- /**
- * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of
- * all PtG Requests whose pinLifetime has expired and the state still has not
- * been changed (a user forgot to run srmReleaseFiles)!
- */
- synchronized public void transitExpiredSRM_FILE_PINNED() {
-
- List expiredSurls = dao.transitExpiredSRM_FILE_PINNED();
- }
-
- public void updateStatus(TRequestToken requestToken, TSURL surl,
- TStatusCode statusCode, String explanation) {
-
- dao.updateStatus(requestToken, new int[] { surl.uniqueId() },
- new String[] { surl.rawSurl() }, statusCode, explanation);
- }
-
- public void updateFromPreviousStatus(TSURL surl,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- dao.updateStatusOnMatchingStatus(new int[] { surl.uniqueId() },
- new String[] { surl.rawSurl() }, expectedStatusCode, newStatusCode,
- explanation);
-
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode,
- newStatusCode, explanation);
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- List surlList, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode) {
-
- int[] surlsUniqueIDs = new int[surlList.size()];
- String[] surls = new String[surlList.size()];
- int index = 0;
- for (TSURL tsurl : surlList) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode);
- }
+ private static final Logger log = LoggerFactory.getLogger(PtGChunkCatalog.class);
+
+ private static PtGChunkCatalog instance;
+
+ public static synchronized PtGChunkCatalog getInstance() {
+ if (instance == null) {
+ instance = new PtGChunkCatalog();
+ }
+ return instance;
+ }
+
+ private final PtGChunkDAO dao;
+
+ /**
+ * Private constructor that starts the internal timer needed to periodically check and transit
+ * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED.
+ */
+ private PtGChunkCatalog() {
+
+ dao = PtGChunkDAOMySql.getInstance();
+ }
+
+ /**
+ * Method used to update into Persistence a retrieved PtGChunkData. In case any error occurs, the
+ * operation does not proceed but no Exception is thrown. Error messages get logged.
+ *
+ * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request
+ * pinLifetime.
+ */
+ synchronized public void update(PtGPersistentChunkData chunkData) {
+
+ PtGChunkDataTO to = new PtGChunkDataTO();
+ /* Primary key needed by DAO Object */
+ to.setPrimaryKey(chunkData.getPrimaryKey());
+ to.setFileSize(chunkData.getFileSize().value());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode()));
+ to.setErrString(chunkData.getStatus().getExplanation());
+ to.setTurl(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString()));
+ to.setLifeTime(PinLifetimeConverter.getInstance().toDB(chunkData.getPinLifeTime().value()));
+ to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
+ to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId()));
+ to.setClientDN(chunkData.getUser().getDn());
+ if (chunkData.getUser() instanceof AbstractGridUser) {
+ if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
+ to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString());
+ }
+
+ }
+ dao.update(to);
+ }
+
+ /**
+ * Method that returns a Collection of PtGChunkData Objects matching the supplied TRequestToken.
+ *
+ * If any of the data associated to the TRequestToken is not well formed and so does not allow a
+ * PtGChunkData Object to be created, then that part of the request is dropped and gets logged,
+ * and the processing continues with the next part. All valid chunks get returned: the others get
+ * dropped.
+ *
+ * If there are no chunks to process then an empty Collection is returned, and a messagge gets
+ * logged.
+ */
+ synchronized public Collection lookup(TRequestToken rt) {
+
+ Collection chunkTOs = dao.find(rt);
+ log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs);
+ ArrayList list = new ArrayList();
+ if (chunkTOs.isEmpty()) {
+ log.warn("PtG CHUNK CATALOG! No chunks found in persistence for " + "specified request: {}",
+ rt);
+ return list;
+ }
+ PtGPersistentChunkData chunk;
+ for (PtGChunkDataTO chunkTO : chunkTOs) {
+ chunk = makeOne(chunkTO, rt);
+ if (chunk == null) {
+ continue;
+ }
+ list.add(chunk);
+ if (isComplete(chunkTO)) {
+ continue;
+ }
+ try {
+ dao.updateIncomplete(this.completeTO(chunkTO, chunk));
+ } catch (InvalidReducedPtGChunkDataAttributesException e) {
+ log.warn(
+ "PtG CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}",
+ e.getMessage());
+ }
+ }
+ log.debug("PtG CHUNK CATALOG: returning " + list);
+ return list;
+ }
+
+ /**
+ * Generates a PtGChunkData from the received PtGChunkDataTO
+ *
+ * @param chunkDataTO
+ * @param rt
+ * @return
+ */
+ private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO, TRequestToken rt) {
+
+ StringBuilder errorSb = new StringBuilder();
+ TSURL fromSURL = null;
+ try {
+ fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL());
+ } catch (InvalidTSURLAttributesException e) {
+ errorSb.append(e);
+ }
+ if (chunkDataTO.normalizedStFN() != null) {
+ fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN());
+ }
+ if (chunkDataTO.surlUniqueID() != null) {
+ fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue());
+ }
+ // lifeTime
+ TLifeTimeInSeconds lifeTime = null;
+ try {
+ long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(chunkDataTO.lifeTime());
+ // Check for max value allowed
+ long max = StormConfiguration.getInstance().getPinLifetimeMaximum();
+ if (pinLifeTime > max) {
+ log.warn("PinLifeTime is greater than the max value allowed."
+ + " Drop the value to the max = {} seconds", max);
+ pinLifeTime = max;
+ }
+ lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS);
+ } catch (IllegalArgumentException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // dirOption
+ TDirOption dirOption = null;
+ try {
+ dirOption = new TDirOption(chunkDataTO.dirOption(), chunkDataTO.allLevelRecursive(),
+ chunkDataTO.numLevel());
+ } catch (InvalidTDirOptionAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // transferProtocols
+ TURLPrefix transferProtocols =
+ TransferProtocolListConverter.toSTORM(chunkDataTO.protocolList());
+ if (transferProtocols.size() == 0) {
+ errorSb
+ .append("\nEmpty list of TransferProtocols or could " + "not translate TransferProtocols!");
+ /* fail construction of PtGChunkData! */
+ transferProtocols = null;
+ }
+ // fileSize
+ TSizeInBytes fileSize = null;
+ try {
+ fileSize = TSizeInBytes.make(chunkDataTO.fileSize());
+ } catch (InvalidTSizeAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // status
+ TReturnStatus status = null;
+ TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status());
+ if (code == TStatusCode.EMPTY) {
+ errorSb.append("\nRetrieved StatusCode was not recognised: " + chunkDataTO.status());
+ } else {
+ status = new TReturnStatus(code, chunkDataTO.errString());
+ }
+ GridUserInterface gridUser = null;
+ try {
+ if (chunkDataTO.vomsAttributes() != null && !chunkDataTO.vomsAttributes().trim().equals("")) {
+ gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(),
+ chunkDataTO.vomsAttributesArray());
+ } else {
+ gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN());
+ }
+
+ } catch (IllegalArgumentException e) {
+ log.error("Unexpected error on voms grid user creation." + " IllegalArgumentException: {}",
+ e.getMessage(), e);
+ }
+ // transferURL
+ /*
+ * whatever is read is just meaningless because PtG will fill it in!!! So create an Empty TTURL
+ * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy!
+ */
+ TTURL transferURL = TTURL.makeEmpty();
+ // make PtGChunkData
+ PtGPersistentChunkData aux = null;
+ try {
+ aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime, dirOption,
+ transferProtocols, fileSize, status, transferURL);
+ aux.setPrimaryKey(chunkDataTO.primaryKey());
+ } catch (InvalidSurlRequestDataAttributesException e) {
+ dao.fail(chunkDataTO);
+ log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from "
+ + "persistence. Dropping chunk from request {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ }
+ // end...
+ return aux;
+ }
+
+ /**
+ *
+ * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique ID taken from the
+ * PtGChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ */
+ private void completeTO(ReducedPtGChunkDataTO chunkTO, final ReducedPtGChunkData chunk) {
+
+ chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN());
+ chunkTO.setSurlUniqueID(Integer.valueOf(chunk.fromSURL().uniqueId()));
+ }
+
+ /**
+ *
+ * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the
+ * normalized StFN and the SURL unique ID taken from the PtGChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ * @return
+ * @throws InvalidReducedPtGChunkDataAttributesException
+ */
+ private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO,
+ final PtGPersistentChunkData chunk) throws InvalidReducedPtGChunkDataAttributesException {
+
+ ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO);
+ this.completeTO(reducedChunkTO, this.reduce(chunk));
+ return reducedChunkTO;
+ }
+
+ /**
+ * Creates a ReducedPtGChunkData from the data contained in the received PtGChunkData
+ *
+ * @param chunk
+ * @return
+ * @throws InvalidReducedPtGChunkDataAttributesException
+ */
+ private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk)
+ throws InvalidReducedPtGChunkDataAttributesException {
+
+ ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(), chunk.getStatus());
+ reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
+ return reducedChunk;
+ }
+
+ /**
+ * Creates a ReducedPtGChunkDataTO from the data contained in the received PtGChunkDataTO
+ *
+ * @param chunkTO
+ * @return
+ */
+ private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) {
+
+ ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO();
+ reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
+ reducedChunkTO.setFromSURL(chunkTO.fromSURL());
+ reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
+ reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID());
+ reducedChunkTO.setStatus(chunkTO.status());
+ reducedChunkTO.setErrString(chunkTO.errString());
+ return reducedChunkTO;
+ }
+
+ /**
+ * Checks if the received PtGChunkDataTO contains the fields not set by the front end but required
+ *
+ * @param chunkTO
+ * @return
+ */
+ private boolean isComplete(PtGChunkDataTO chunkTO) {
+
+ return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null);
+ }
+
+ /**
+ * Method used to add into Persistence a new entry. The supplied PtGChunkData gets the primary key
+ * changed to the value assigned in Persistence.
+ *
+ * This method is intended to be used by a recursive PtG request: the parent request supplies a
+ * directory which must be expanded, so all new children requests resulting from the files in the
+ * directory are added into persistence.
+ *
+ * So this method does _not_ add a new SRM prepare_to_get request into the DB!
+ *
+ * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and
+ * explanation.
+ *
+ * In case of any error the operation does not proceed, but no Exception is thrown! Proper
+ * messages get logged by underlaying DAO.
+ */
+ synchronized public void addChild(PtGPersistentChunkData chunkData) {
+
+ PtGChunkDataTO to = new PtGChunkDataTO();
+ /* needed for now to find ID of request! Must be changed soon! */
+ to.setRequestToken(chunkData.getRequestToken().toString());
+ to.setFromSURL(chunkData.getSURL().toString());
+ to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
+ to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId()));
+
+ to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
+ to.setDirOption(chunkData.getDirOption().isDirectory());
+ to.setNumLevel(chunkData.getDirOption().getNumLevel());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode()));
+ to.setErrString(chunkData.getStatus().getExplanation());
+ to.setClientDN(chunkData.getUser().getDn());
+ if (chunkData.getUser() instanceof AbstractGridUser) {
+ if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
+ to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString());
+ }
+
+ }
+ /* add the entry and update the Primary Key field! */
+ dao.addChild(to);
+ /* set the assigned PrimaryKey! */
+ chunkData.setPrimaryKey(to.primaryKey());
+ }
+
+ public void updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode,
+ String explanation) {
+
+ dao.updateStatus(requestToken, new int[] {surl.uniqueId()}, new String[] {surl.rawSurl()},
+ statusCode, explanation);
+ }
+
+ public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode,
+ TStatusCode newStatusCode, String explanation) {
+
+ dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation);
+ }
}
diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java
deleted file mode 100644
index 78a837bfa..000000000
--- a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java
+++ /dev/null
@@ -1,1765 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings;
-
-import it.grid.storm.config.Configuration;
-import it.grid.storm.ea.StormEA;
-import it.grid.storm.namespace.NamespaceDirector;
-import it.grid.storm.namespace.NamespaceException;
-import it.grid.storm.namespace.StoRI;
-import it.grid.storm.namespace.naming.SURL;
-import it.grid.storm.srm.types.InvalidTSURLAttributesException;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TRequestType;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TStatusCode;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Timer;
-import java.util.TimerTask;
-
-/**
- * DAO class for PtGChunkCatalog. This DAO is specifically designed to connect
- * to a MySQL DB. The raw data found in those tables is pre-treated in order to
- * turn it into the Object Model of StoRM. See Method comments for further info.
- *
- * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the
- * object model.
- *
- * @author EGRID ICTP
- * @version 3.0
- * @date June 2005
- */
-public class PtGChunkDAO {
-
- private static final Logger log = LoggerFactory.getLogger(PtGChunkDAO.class);
-
- /** String with the name of the class for the DB driver */
- private final String driver = Configuration.getInstance().getDBDriver();
- /** String referring to the URL of the DB */
- private final String url = Configuration.getInstance().getStormDbURL();
- /** String with the password for the DB */
- private final String password = Configuration.getInstance().getDBPassword();
- /** String with the name for the DB */
- private final String name = Configuration.getInstance().getDBUserName();
-
- /** Connection to DB - WARNING!!! It is kept open all the time! */
- private Connection con = null;
- /** boolean that tells whether reconnection is needed because of MySQL bug! */
- private boolean reconnect = false;
-
- /** Singleton instance */
- private final static PtGChunkDAO dao = new PtGChunkDAO();
-
- /** timer thread that will run a task to alert when reconnecting is necessary! */
- private Timer clock = null;
- /**
- * timer task that will update the boolean signaling that a reconnection is
- * needed!
- */
- private TimerTask clockTask = null;
- /** milliseconds that must pass before reconnecting to DB */
- private final long period = Configuration.getInstance()
- .getDBReconnectPeriod() * 1000;
- /** initial delay in milliseconds before starting timer */
- private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000;
-
- private PtGChunkDAO() {
-
- setUpConnection();
-
- clock = new Timer();
- clockTask = new TimerTask() {
-
- @Override
- public void run() {
-
- reconnect = true;
- }
- }; // clock task
- clock.scheduleAtFixedRate(clockTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of the PtGChunkDAO.
- */
- public static PtGChunkDAO getInstance() {
-
- return dao;
- }
-
- /**
- * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets
- * its primaryKey changed to the one assigned by the DB.
- *
- * The supplied PtGChunkData is used to fill in only the DB table where file
- * specific info gets recorded: it does _not_ add a new request! So if
- * spurious data is supplied, it will just stay there because of a lack of a
- * parent request!
- */
- public synchronized void addChild(PtGChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: addChild - unable to get a valid connection!");
- return;
- }
- String str = null;
- PreparedStatement id = null; // statement to find out the ID associated to
- // the request token
- ResultSet rsid = null; // result set containing the ID of the request.
- try {
-
- // WARNING!!!! We are forced to run a query to get the ID of the request,
- // which should NOT be so
- // because the corresponding request object should have been changed with
- // the extra field! However, it is not possible
- // at the moment to perform such chage because of strict deadline and the
- // change could wreak havoc
- // the code. So we are forced to make this query!!!
-
- // begin transaction
- con.setAutoCommit(false);
- printWarnings(con.getWarnings());
-
- // find ID of request corresponding to given RequestToken
- str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?";
-
- id = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- id.setString(1, to.requestToken());
- printWarnings(id.getWarnings());
-
- log.debug("PTG CHUNK DAO: addChild; {}", id.toString());
- rsid = id.executeQuery();
- printWarnings(id.getWarnings());
-
- /* ID of request in request_process! */
- int request_id = extractID(rsid);
- int id_s = fillPtGTables(to, request_id);
-
- /* end transaction! */
- con.commit();
- printWarnings(con.getWarnings());
- con.setAutoCommit(true);
- printWarnings(con.getWarnings());
-
- // update primary key reading the generated key
- to.setPrimaryKey(id_s);
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: unable to complete addChild! "
- + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e);
- rollback(con);
- } catch (Exception e) {
- log.error("PTG CHUNK DAO: unable to complete addChild! "
- + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e);
- rollback(con);
- } finally {
- close(rsid);
- close(id);
- }
- }
-
- /**
- * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets
- * its primaryKey changed to the one assigned by the DB. The client_dn must
- * also be supplied as a String.
- *
- * The supplied PtGChunkData is used to fill in all the DB tables where file
- * specific info gets recorded: it _adds_ a new request!
- */
- public synchronized void addNew(PtGChunkDataTO to, String client_dn) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: addNew - unable to get a valid connection!");
- return;
- }
- String str = null;
- /* Result set containing the ID of the inserted new request */
- ResultSet rs_new = null;
- /* Insert new request into process_request */
- PreparedStatement addNew = null;
- /* Insert protocols for request. */
- PreparedStatement addProtocols = null;
- try {
- // begin transaction
- con.setAutoCommit(false);
- printWarnings(con.getWarnings());
-
- // add to request_queue...
- str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp) VALUES (?,?,?,?,?,?,?,?)";
- addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- printWarnings(con.getWarnings());
- /* Request type set to prepare to get! */
- addNew.setString(1,
- RequestTypeConverter.getInstance().toDB(TRequestType.PREPARE_TO_GET));
- printWarnings(addNew.getWarnings());
-
- addNew.setString(2, client_dn);
- printWarnings(addNew.getWarnings());
-
- addNew.setInt(3, to.lifeTime());
- printWarnings(addNew.getWarnings());
-
- addNew.setInt(
- 4,
- StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_INPROGRESS));
- printWarnings(addNew.getWarnings());
-
- addNew.setString(5, "New PtG Request resulting from srmCopy invocation.");
- printWarnings(addNew.getWarnings());
-
- addNew.setString(6, to.requestToken());
- printWarnings(addNew.getWarnings());
-
- addNew.setInt(7, 1); // number of requested files set to 1!
- printWarnings(addNew.getWarnings());
-
- addNew.setTimestamp(8, new Timestamp(new Date().getTime()));
- printWarnings(addNew.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addNew.toString());
- addNew.execute();
- printWarnings(addNew.getWarnings());
-
- rs_new = addNew.getGeneratedKeys();
- int id_new = extractID(rs_new);
-
- // add protocols...
- str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)";
- addProtocols = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- for (Iterator i = to.protocolList().iterator(); i.hasNext();) {
- addProtocols.setInt(1, id_new);
- printWarnings(addProtocols.getWarnings());
-
- addProtocols.setString(2, i.next());
- printWarnings(addProtocols.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addProtocols.toString());
- addProtocols.execute();
- printWarnings(addProtocols.getWarnings());
- }
-
- // addChild...
- int id_s = fillPtGTables(to, id_new);
-
- // end transaction!
- con.commit();
- printWarnings(con.getWarnings());
- con.setAutoCommit(true);
- printWarnings(con.getWarnings());
-
- // update primary key reading the generated key
- to.setPrimaryKey(id_s);
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: Rolling back! Unable to complete addNew! "
- + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e);
- rollback(con);
- } catch (Exception e) {
- log.error("PTG CHUNK DAO: unable to complete addNew! "
- + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e);
- rollback(con);
- } finally {
- close(rs_new);
- close(addNew);
- close(addProtocols);
- }
- }
-
- /**
- * To be used inside a transaction
- *
- * @param to
- * @param requestQueueID
- * @return
- * @throws SQLException
- * @throws Exception
- */
- private synchronized int fillPtGTables(PtGChunkDataTO to, int requestQueueID)
- throws SQLException, Exception {
-
- String str = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_do = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_g = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_s = null;
- /* insert TDirOption for request */
- PreparedStatement addDirOption = null;
- /* insert request_Get for request */
- PreparedStatement addGet = null;
- PreparedStatement addChild = null;
-
- try {
- // first fill in TDirOption
- str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)";
- addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- printWarnings(con.getWarnings());
- addDirOption.setBoolean(1, to.dirOption());
- printWarnings(addDirOption.getWarnings());
-
- addDirOption.setBoolean(2, to.allLevelRecursive());
- printWarnings(addDirOption.getWarnings());
-
- addDirOption.setInt(3, to.numLevel());
- printWarnings(addDirOption.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addDirOption.toString());
- addDirOption.execute();
- printWarnings(addDirOption.getWarnings());
-
- rs_do = addDirOption.getGeneratedKeys();
- int id_do = extractID(rs_do);
-
- // second fill in request_Get... sourceSURL and TDirOption!
- str = "INSERT INTO request_Get (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)";
- addGet = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- printWarnings(con.getWarnings());
- addGet.setInt(1, id_do);
- printWarnings(addGet.getWarnings());
-
- addGet.setInt(2, requestQueueID);
- printWarnings(addGet.getWarnings());
-
- addGet.setString(3, to.fromSURL());
- printWarnings(addGet.getWarnings());
-
- addGet.setString(4, to.normalizedStFN());
- printWarnings(addGet.getWarnings());
-
- addGet.setInt(5, to.surlUniqueID());
- printWarnings(addGet.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addGet.toString());
- addGet.execute();
- printWarnings(addGet.getWarnings());
-
- rs_g = addGet.getGeneratedKeys();
- int id_g = extractID(rs_g);
-
- // third fill in status_Get...
- str = "INSERT INTO status_Get (request_GetID,statusCode,explanation) VALUES (?,?,?)";
- addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- printWarnings(con.getWarnings());
- addChild.setInt(1, id_g);
- printWarnings(addChild.getWarnings());
-
- addChild.setInt(2, to.status());
- printWarnings(addChild.getWarnings());
-
- addChild.setString(3, to.errString());
- printWarnings(addChild.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addChild.toString());
- addChild.execute();
- printWarnings(addChild.getWarnings());
-
- return id_g;
- } finally {
- close(rs_do);
- close(rs_g);
- close(rs_s);
- close(addDirOption);
- close(addGet);
- close(addChild);
- }
- }
-
- /**
- * Method used to save the changes made to a retrieved PtGChunkDataTO, back
- * into the MySQL DB.
- *
- * Only the fileSize, transferURL, statusCode and explanation, of status_Get
- * table are written to the DB. Likewise for the request pinLifetime.
- *
- * In case of any error, an error message gets logged but no exception is
- * thrown.
- */
- public synchronized void update(PtGChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: update - unable to get a valid connection!");
- return;
- }
- PreparedStatement updateFileReq = null;
- try {
- // ready updateFileReq...
- updateFileReq = con
- .prepareStatement("UPDATE request_queue rq JOIN (status_Get sg, request_Get rg) ON (rq.ID=rg.request_queueID AND sg.request_GetID=rg.ID) "
- + "SET sg.fileSize=?, sg.transferURL=?, sg.statusCode=?, sg.explanation=?, rq.pinLifetime=?, rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? "
- + "WHERE rg.ID=?");
- printWarnings(con.getWarnings());
-
- updateFileReq.setLong(1, to.fileSize());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(2, to.turl());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(3, to.status());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(4, to.errString());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(5, to.lifeTime());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(6, to.normalizedStFN());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(7, to.surlUniqueID());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setLong(8, to.primaryKey());
- printWarnings(updateFileReq.getWarnings());
- // execute update
- log.trace("PTG CHUNK DAO: update method; {}", updateFileReq.toString());
- updateFileReq.executeUpdate();
- printWarnings(updateFileReq.getWarnings());
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO: Unable to complete update! {}",
- e.getMessage(), e);
- } finally {
- close(updateFileReq);
- }
- }
-
- /**
- * Updates the request_Get represented by the received ReducedPtGChunkDataTO
- * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID
- *
- * @param chunkTO
- */
- public synchronized void updateIncomplete(ReducedPtGChunkDataTO chunkTO) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: updateIncomplete - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_Get rg SET rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? "
- + "WHERE rg.ID=?";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- stmt.setString(1, chunkTO.normalizedStFN());
- printWarnings(stmt.getWarnings());
-
- stmt.setInt(2, chunkTO.surlUniqueID());
- printWarnings(stmt.getWarnings());
-
- stmt.setLong(3, chunkTO.primaryKey());
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - update incomplete: {}", stmt.toString());
- stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO: Unable to complete update incomplete! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * TODO WARNING! THIS IS A WORK IN PROGRESS!!!
- *
- * Method used to refresh the PtGChunkDataTO information from the MySQL DB.
- *
- * In this first version, only the statusCode and the TURL are reloaded from
- * the DB. TODO The next version must contains all the information related to
- * the Chunk!
- *
- * In case of any error, an error messagge gets logged but no exception is
- * thrown.
- */
-
- public synchronized PtGChunkDataTO refresh(long primary_key) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: refresh - unable to get a valid connection!");
- return null;
- }
- String queryString = null;
- PreparedStatement find = null;
- ResultSet rs = null;
-
- try {
- // get chunks of the request
- queryString = "SELECT sg.statusCode, sg.transferURL "
- + "FROM status_Get sg " + "WHERE sg.request_GetID=?";
- find = con.prepareStatement(queryString);
- printWarnings(con.getWarnings());
- find.setLong(1, primary_key);
- printWarnings(find.getWarnings());
- log.trace("PTG CHUNK DAO: refresh status method; {}", find.toString());
-
- rs = find.executeQuery();
-
- printWarnings(find.getWarnings());
- PtGChunkDataTO chunkDataTO = null;
- // The result shoul be un
- while (rs.next()) {
- chunkDataTO = new PtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setTurl(rs.getString("sg.transferURL"));
- }
- return chunkDataTO;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* Return null TransferObject! */
- return null;
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that queries the MySQL DB to find all entries matching the supplied
- * TRequestToken. The Collection contains the corresponding PtGChunkDataTO
- * objects.
- *
- * An initial simple query establishes the list of protocols associated with
- * the request. A second complex query establishes all chunks associated with
- * the request, by properly joining request_queue, request_Get, status_Get and
- * request_DirOption. The considered fields are:
- *
- * (1) From status_Get: the ID field which becomes the TOs primary key, and
- * statusCode.
- *
- * (2) From request_Get: sourceSURL
- *
- * (3) From request_queue: pinLifetime
- *
- * (4) From request_DirOption: isSourceADirectory, alLevelRecursive,
- * numOfLevels
- *
- * In case of any error, a log gets written and an empty collection is
- * returned. No exception is thrown.
- *
- * NOTE! Chunks in SRM_ABORTED status are NOT returned!
- */
- public synchronized Collection find(TRequestToken requestToken) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- str = "SELECT tp.config_ProtocolsID "
- + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID "
- + "WHERE rq.r_token=?";
-
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- List protocols = new ArrayList();
- find.setString(1, strToken);
- printWarnings(find.getWarnings());
-
- log.trace("PTG CHUNK DAO: find method; {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
- while (rs.next()) {
- protocols.add(rs.getString("tp.config_ProtocolsID"));
- }
- close(rs);
- close(find);
-
- // get chunks of the request
- str = "SELECT sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, rq.client_dn, rq.proxy, rg.sourceSURL, "
- + "rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, d.isSourceADirectory, "
- + "d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID "
- + "WHERE rq.r_token=? AND sg.statusCode<>?";
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- ArrayList list = new ArrayList();
- find.setString(1, strToken);
- printWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED));
- printWarnings(find.getWarnings());
-
- log.trace("PTG CHUNK DAO: find method; " + find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- PtGChunkDataTO chunkDataTO;
- while (rs.next()) {
- chunkDataTO = new PtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setRequestToken(strToken);
- chunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setClientDN(rs.getString("rq.client_dn"));
-
- /**
- * This code is only for the 1.3.18. This is a workaround to get FQANs
- * using the proxy field on request_queue. The FE use the proxy field of
- * request_queue to insert a single FQAN string containing all FQAN
- * separeted by the "#" char. The proxy is a BLOB, hence it has to be
- * properly conveted in string.
- */
- java.sql.Blob blob = rs.getBlob("rq.proxy");
- if (!rs.wasNull() && blob != null) {
- byte[] bdata = blob.getBytes(1, (int) blob.length());
- chunkDataTO.setVomsAttributes(new String(bdata));
- }
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory"));
- chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive"));
- chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels"));
- chunkDataTO.setProtocolList(protocols);
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: ", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedPtGChunkDataTO associated to the
- * given TRequestToken expressed as String.
- */
- public synchronized Collection findReduced(
- String reqtoken) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- // get reduced chunks
- String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "WHERE rq.r_token=?";
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, reqtoken);
- printWarnings(find.getWarnings());
-
- log.trace("PtG CHUNK DAO! findReduced with request token; {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- ReducedPtGChunkDataTO reducedChunkDataTO = null;
- while (rs.next()) {
- reducedChunkDataTO = new ReducedPtGChunkDataTO();
- reducedChunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- reducedChunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- reducedChunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
- reducedChunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- reducedChunkDataTO.setSurlUniqueID(uniqueID);
- }
-
- list.add(reducedChunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- public synchronized Collection findReduced(
- TRequestToken requestToken, int[] surlsUniqueIDs, String[] surlsArray) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
-
- try {
-
- String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "WHERE rq.r_token=? AND ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs)
- + " AND rg.sourceSURL IN "
- + makeSurlString(surlsArray) + " ) ";
-
- find = con.prepareStatement(str);
-
- printWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, requestToken.getValue());
- printWarnings(find.getWarnings());
-
- log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- ReducedPtGChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedPtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedPtGChunkDataTO associated to the
- * given griduser, and whose SURLs are contained in the supplied array of
- * Strings.
- */
- public synchronized Collection findReduced(
- String griduser, int[] surlUniqueIDs, String[] surls) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /*
- * NOTE: we search also on the fromSurl because otherwise we lost all
- * request_get that have not the uniqueID set because are not yet been
- * used by anybody
- */
- // get reduced chunks
- String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "WHERE rq.client_dn=? AND ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs)
- + " AND rg.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, griduser);
- printWarnings(find.getWarnings());
-
- log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- ReducedPtGChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedPtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method used in extraordinary situations to signal that data retrieved from
- * the DB was malformed and could not be translated into the StoRM object
- * model.
- *
- * This method attempts to change the status of the request to SRM_FAILURE and
- * record it in the DB.
- *
- * This operation could potentially fail because the source of the malformed
- * problems could be a problematic DB; indeed, initially only log messagges
- * where recorded.
- *
- * Yet it soon became clear that the source of malformed data were the clients
- * and/or FE recording info in the DB. In these circumstances the client would
- * see its request as being in the SRM_IN_PROGRESS state for ever. Hence the
- * pressing need to inform it of the encountered problems.
- */
- public synchronized void signalMalformedPtGChunk(PtGChunkDataTO auxTO) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: signalMalformedPtGChunk - unable to get a valid connection!");
- return;
- }
- String signalSQL = "UPDATE status_Get SET statusCode="
- + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE)
- + ", explanation=? WHERE request_GetID=" + auxTO.primaryKey();
- PreparedStatement signal = null;
- try {
- signal = con.prepareStatement(signalSQL);
- printWarnings(con.getWarnings());
- /* Prepared statement spares DB-specific String notation! */
- signal.setString(1, "Request is malformed!");
- printWarnings(signal.getWarnings());
-
- log.trace("PTG CHUNK DAO: signalMalformed; {}", signal.toString());
- signal.executeUpdate();
- printWarnings(signal.getWarnings());
- } catch (SQLException e) {
- log.error("PtGChunkDAO! Unable to signal in DB that the request was "
- + "malformed! Request: {}; Exception: {}", auxTO.toString(), e.toString());
- } finally {
- close(signal);
- }
- }
-
- /**
- * Method that returns the number of Get requests on the given SURL, that are
- * in SRM_FILE_PINNED state.
- *
- * This method is intended to be used by PtGChunkCatalog in the
- * isSRM_FILE_PINNED method invocation.
- *
- * In case of any error, 0 is returned.
- */
- // request_Get table
- public synchronized int numberInSRM_FILE_PINNED(int surlUniqueID) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: numberInSRM_FILE_PINNED - unable to get a valid connection!");
- return 0;
- }
- String str = "SELECT COUNT(rg.ID) "
- + "FROM status_Get sg JOIN request_Get rg "
- + "ON (sg.request_GetID=rg.ID) "
- + "WHERE rg.sourceSURL_uniqueID=? AND sg.statusCode=?";
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- /* Prepared statement spares DB-specific String notation! */
- find.setInt(1, surlUniqueID);
- printWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
- printWarnings(find.getWarnings());
-
- log.trace("PtG CHUNK DAO - numberInSRM_FILE_PINNED method: {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- int numberFilePinned = 0;
- if (rs.next()) {
- numberFilePinned = rs.getInt(1);
- }
- return numberFilePinned;
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to determine numberInSRM_FILE_PINNED! "
- + "Returning 0! {}", e.getMessage(), e);
- return 0;
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that updates all expired requests in SRM_FILE_PINNED state, into
- * SRM_RELEASED.
- *
- * This is needed when the client forgets to invoke srmReleaseFiles().
- *
- * @return
- */
- public synchronized List transitExpiredSRM_FILE_PINNED() {
-
- // tring to the surl unique ID
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: transitExpiredSRM_FILE_PINNED - unable to get a valid connection!");
- return new ArrayList();
- }
- HashMap expiredSurlMap = new HashMap();
- String str = null;
- // Statement statement = null;
- PreparedStatement preparedStatement = null;
-
- /* Find all expired surls */
- try {
- // start transaction
- con.setAutoCommit(false);
-
- str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID "
- + "FROM request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "WHERE sg.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime ";
-
- preparedStatement = con.prepareStatement(str);
- preparedStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
-
- ResultSet res = preparedStatement.executeQuery();
- printWarnings(preparedStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rg.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("PtGChunkDAO! unable to build the TSURL from {}: "
- + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage(), e);
- }
- }
- expiredSurlMap.put(sourceSURL, uniqueID);
- }
-
- if (expiredSurlMap.isEmpty()) {
- commit(con);
- log
- .trace("PtGChunkDAO! No chunk of PtG request was transited from SRM_FILE_PINNED to SRM_RELEASED.");
- return new ArrayList();
- }
- } catch (SQLException e) {
- log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e);
- rollback(con);
- return new ArrayList();
- } finally {
- close(preparedStatement);
- }
-
- /* Update status of all expired surls to SRM_RELEASED */
-
- preparedStatement = null;
- try {
-
- str = "UPDATE "
- + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "SET sg.statusCode=? "
- + "WHERE sg.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime ";
-
- preparedStatement = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- preparedStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- printWarnings(preparedStatement.getWarnings());
-
- preparedStatement.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
- printWarnings(preparedStatement.getWarnings());
-
- log.trace("PtG CHUNK DAO - transitExpiredSRM_FILE_PINNED method: {}",
- preparedStatement.toString());
-
- int count = preparedStatement.executeUpdate();
- printWarnings(preparedStatement.getWarnings());
-
- if (count == 0) {
- log.trace("PtGChunkDAO! No chunk of PtG request was "
- + "transited from SRM_FILE_PINNED to SRM_RELEASED.");
- } else {
- log.info("PtGChunkDAO! {} chunks of PtG requests were transited from"
- + " SRM_FILE_PINNED to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("PtGChunkDAO! Unable to transit expired SRM_FILE_PINNED chunks "
- + "of PtG requests, to SRM_RELEASED! {}", e.getMessage(), e);
- rollback(con);
- return new ArrayList();
- } finally {
- close(preparedStatement);
- }
-
- /*
- * in order to enhance performance here we can check if there is any file
- * system with tape (T1D0, T1D1), if there is not any we can skip the
- * following
- */
-
- /* Find all not expired surls from PtG and BoL */
-
- HashSet pinnedSurlSet = new HashSet();
- try {
-
- // SURLs pinned by PtGs
- str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM "
- + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "WHERE sg.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime ";
-
- preparedStatement = con.prepareStatement(str);
- preparedStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
-
- ResultSet res = preparedStatement.executeQuery();
- printWarnings(preparedStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rg.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("PtGChunkDAO! unable to build the TSURL from {}. "
- + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage());
- }
- }
- pinnedSurlSet.add(uniqueID);
- }
-
- close(preparedStatement);
-
- // SURLs pinned by BoLs
- str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM "
- + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "WHERE sb.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime ";
-
- preparedStatement = con.prepareStatement(str);
- preparedStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
-
- res = preparedStatement.executeQuery();
- printWarnings(preparedStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rb.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("PtGChunkDAO! unable to build the TSURL from {}. "
- + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage(), e);
- }
- }
- pinnedSurlSet.add(uniqueID);
- }
- commit(con);
- } catch (SQLException e) {
- log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e);
- rollback(con);
- } finally {
- close(preparedStatement);
- }
-
- ArrayList expiredSurlList = new ArrayList();
- /* Remove the Extended Attribute pinned if there is not a valid surl on it */
- TSURL surl;
- for (Entry surlEntry : expiredSurlMap.entrySet()) {
- if (!pinnedSurlSet.contains(surlEntry.getValue())) {
- try {
- surl = TSURL.makeFromStringValidate(surlEntry.getKey());
- } catch (InvalidTSURLAttributesException e) {
- log.error("Invalid SURL, cannot release the pin "
- + "(Extended Attribute): {}", surlEntry.getKey());
- continue;
- }
- expiredSurlList.add(surl);
- StoRI stori;
- try {
- stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl);
- } catch (Throwable e) {
- log.error("Invalid SURL {} cannot release the pin. {}: {}",
- surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage(), e);
- continue;
- }
-
- if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) {
- StormEA.removePinned(stori.getAbsolutePath());
- }
- }
- }
- return expiredSurlList;
- }
-
- /**
- * Method that updates all chunks in SRM_FILE_PINNED state, into SRM_RELEASED.
- * An array of long representing the primary key of each chunk is required:
- * only they get the status changed provided their current status is
- * SRM_FILE_PINNED.
- *
- * This method is used during srmReleaseFiles
- *
- * In case of any error nothing happens and no exception is thrown, but proper
- * messagges get logged.
- */
- public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE status_Get sg SET sg.statusCode=? "
- + "WHERE sg.statusCode=? AND sg.request_GetID IN " + makeWhereString(ids);
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- stmt.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- printWarnings(stmt.getWarnings());
-
- stmt.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}",
- stmt.toString());
- int count = stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("PtG CHUNK DAO! No chunk of PtG request was "
- + "transited from SRM_FILE_PINNED to SRM_RELEASED.");
- } else {
- log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited "
- + "from SRM_FILE_PINNED to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to transit chunks"
- + " from SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * @param ids
- * @param token
- */
- public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids,
- TRequestToken token) {
-
- if (token == null) {
- transitSRM_FILE_PINNEDtoSRM_RELEASED(ids);
- return;
- }
-
- /*
- * If a request token has been specified, only the related Get requests
- * have to be released. This is done adding the r.r_token="..." clause in
- * the where subquery.
- */
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - "
- + "unable to get a valid connection!");
- return;
- }
-
- String str = "UPDATE "
- + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "SET sg.statusCode=? " + "WHERE sg.statusCode=? AND rq.r_token='"
- + token.toString() + "' AND rg.ID IN " + makeWhereString(ids);
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- stmt.setInt(1,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- printWarnings(stmt.getWarnings());
-
- stmt.setInt(2,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt.toString());
- int count = stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("PtG CHUNK DAO! No chunk of PtG request was"
- + " transited from SRM_FILE_PINNED to SRM_RELEASED.");
- } else {
- log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited from "
- + "SRM_FILE_PINNED to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to transit chunks from "
- + "SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- public synchronized void updateStatus(TRequestToken requestToken,
- int[] surlUniqueIDs, String[] surls, TStatusCode statusCode,
- String explanation) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: updateStatus - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE "
- + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "SET sg.statusCode=? , sg.explanation=? " + "WHERE rq.r_token='"
- + requestToken.toString() + "' AND ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rg.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- stmt.setInt(1, StatusCodeConverter.getInstance().toDB(statusCode));
- printWarnings(stmt.getWarnings());
-
- stmt.setString(2, (explanation != null ? explanation : ""));
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - updateStatus: {}", stmt.toString());
- int count = stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("PtG CHUNK DAO! No chunk of PtG request was updated to {}.",
- statusCode);
- } else {
- log.info("PtG CHUNK DAO! {} chunks of PtG requests were updated to {}.",
- count, statusCode);
- }
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to updated to {}! {}", statusCode,
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- public synchronized void updateStatusOnMatchingStatus(int[] surlsUniqueIDs,
- String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surls == null || explanation == null
- || surlsUniqueIDs.length == 0 || surls.length == 0
- || surlsUniqueIDs.length != surls.length) {
-
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surls="
- + surls + " explanation=" + explanation);
- }
-
- doUpdateStatusOnMatchingStatus(null, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode, explanation, false, true, true);
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode, String explanation) {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || explanation == null) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken + " explanation="
- + explanation);
- }
- doUpdateStatusOnMatchingStatus(requestToken, null, null,
- expectedStatusCode, newStatusCode, explanation, true, false, true);
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode)
- throws IllegalArgumentException {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0
- || surls.length == 0 || surlsUniqueIDs.length != surls.length) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken
- + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls);
- }
- doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode, null, true, true, false);
- }
-
- public synchronized void doUpdateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation, boolean withRequestToken, boolean withSurls,
- boolean withExplanation) throws IllegalArgumentException {
-
- if ((withRequestToken && requestToken == null)
- || (withExplanation && explanation == null)
- || (withSurls && (surlUniqueIDs == null || surls == null))) {
-
- throw new IllegalArgumentException(
- "Unable to perform the doUpdateStatusOnMatchingStatus, "
- + "invalid arguments: withRequestToken=" + withRequestToken
- + " requestToken=" + requestToken + " withSurls=" + withSurls
- + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls
- + " withExplaination=" + withExplanation + " explanation="
- + explanation);
- }
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) "
- + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "SET sg.statusCode=? ";
- if (withExplanation) {
- str += " , " + buildExpainationSet(explanation);
- }
- str += " WHERE sg.statusCode=? ";
- if (withRequestToken) {
- str += " AND " + buildTokenWhereClause(requestToken);
- }
- if (withSurls) {
- str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls);
- }
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode));
- printWarnings(stmt.getWarnings());
-
- stmt
- .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode));
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString());
- int count = stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("PtG CHUNK DAO! No chunk of PtG request was updated "
- + "from {} to {}.", expectedStatusCode, newStatusCode);
- } else {
- log.debug("PtG CHUNK DAO! {} chunks of PtG requests were updated "
- + "from {} to {}.", count, expectedStatusCode, newStatusCode);
- }
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to updated from {} to {}! {}",
- expectedStatusCode, newStatusCode, e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Auxiliary method used to close a ResultSet
- */
- private void close(ResultSet rset) {
-
- if (rset != null) {
- try {
- rset.close();
- } catch (Exception e) {
- log.error("PTG CHUNK DAO! Unable to close ResultSet! Error: {}",
- e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary method used to close a Statement
- */
- private void close(Statement stmt) {
-
- if (stmt != null) {
- try {
- stmt.close();
- } catch (Exception e) {
- log.error("PTG CHUNK DAO! Unable to close Statement {} - Error: {}",
- stmt.toString(), e.getMessage(), e);
- }
- }
- }
-
- private void commit(Connection con) {
-
- if (con != null) {
- try {
- con.commit();
- con.setAutoCommit(true);
- } catch (SQLException e) {
- log.error("PtG, SQL Exception: {}", e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary method used to roll back a failed transaction
- */
- private void rollback(Connection con) {
-
- if (con != null) {
- try {
- con.rollback();
- con.setAutoCommit(true);
- log.error("PTG CHUNK DAO: roll back successful!");
- } catch (SQLException e2) {
- log.error("PTG CHUNK DAO: roll back failed! {}", e2.getMessage(), e2);
- }
- }
- }
-
- /**
- * Private method that returns the generated ID: it throws an exception in
- * case of any problem!
- */
- private int extractID(ResultSet rs) throws Exception {
-
- if (rs == null) {
- throw new Exception("PTG CHUNK DAO! Null ResultSet!");
- }
- if (rs.next()) {
- return rs.getInt(1);
- } else {
- log.error("PTG CHUNK DAO! It was not possible to establish "
- + "the assigned autoincrement primary key!");
- throw new Exception("PTG CHUNK DAO! It was not possible to"
- + " establish the assigned autoincrement primary key!");
- }
- }
-
- /**
- * Method that returns a String containing all IDs.
- */
- private String makeWhereString(long[] rowids) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = rowids.length;
- for (int i = 0; i < n; i++) {
- sb.append(rowids[i]);
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surl's IDs.
- */
- private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) {
-
- StringBuilder sb = new StringBuilder("(");
- for (int i = 0; i < surlUniqueIDs.length; i++) {
- if (i > 0) {
- sb.append(",");
- }
- sb.append(surlUniqueIDs[i]);
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surls.
- */
- private String makeSurlString(String[] surls) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = surls.length;
-
- for (int i = 0; i < n; i++) {
-
- SURL requestedSURL;
-
- try {
- requestedSURL = SURL.makeSURLfromString(surls[i]);
- } catch (NamespaceException e) {
- log.error(e.getMessage());
- log.debug("Skip '{}' during query creation", surls[i]);
- continue;
- }
-
- sb.append("'");
- sb.append(requestedSURL.getNormalFormAsString());
- sb.append("','");
- sb.append(requestedSURL.getQueryFormAsString());
- sb.append("'");
-
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
-
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Auxiliary method that sets up the connection to the DB, as well as the
- * prepared statement.
- */
- private boolean setUpConnection() {
-
- boolean response = false;
- try {
- Class.forName(driver);
- con = DriverManager.getConnection(url, name, password);
- printWarnings(con.getWarnings());
- response = con.isValid(0);
- } catch (ClassNotFoundException | SQLException e) {
- log.error("PTG CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e);
- }
- return response;
- }
-
- /**
- * Auxiliary method that checks if time for resetting the connection has come,
- * and eventually takes it down and up back again.
- */
- private boolean checkConnection() {
-
- boolean response = true;
- if (reconnect) {
- log.debug("PTG CHUNK DAO! Reconnecting to DB! ");
- takeDownConnection();
- response = setUpConnection();
- if (response) {
- reconnect = false;
- }
- }
- return response;
- }
-
- /**
- * Auxiliary method that tales down a connection to the DB.
- */
- private void takeDownConnection() {
-
- if (con != null) {
- try {
- con.close();
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO! Exception in takeDownConnection method: {}",
- e.getMessage(), e);
- }
- }
- }
-
- public Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0 || dn == null) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " dn=" + dn);
- }
- return find(surlsUniqueIDs, surlsArray, dn, true);
- }
-
- public Collection find(int[] surlsUniqueIDs,
- String[] surlsArray) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray);
- }
- return find(surlsUniqueIDs, surlsArray, null, false);
- }
-
- private synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn, boolean withDn)
- throws IllegalArgumentException {
-
- if ((withDn && dn == null) || surlsUniqueIDs == null
- || surlsUniqueIDs.length == 0 || surlsArray == null
- || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn);
- }
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
-
- try {
-
- String str = "SELECT rq.ID, rq.r_token, sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, "
- + "rq.client_dn, rq.proxy, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, "
- + "d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID "
- + "WHERE ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs)
- + " AND rg.sourceSURL IN "
- + makeSurlString(surlsArray) + " )";
-
- if (withDn) {
-
- str += " AND rq.client_dn=\'" + dn + "\'";
- }
-
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- List list = new ArrayList();
-
- log.trace("PTG CHUNK DAO - find method: {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
- PtGChunkDataTO chunkDataTO = null;
- while (rs.next()) {
-
- chunkDataTO = new PtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setRequestToken(rs.getString("rq.r_token"));
- chunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
-
- chunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setClientDN(rs.getString("rq.client_dn"));
-
- /**
- * This code is only for the 1.3.18. This is a workaround to get FQANs
- * using the proxy field on request_queue. The FE use the proxy field of
- * request_queue to insert a single FQAN string containing all FQAN
- * separeted by the "#" char. The proxy is a BLOB, hence it has to be
- * properly conveted in string.
- */
- java.sql.Blob blob = rs.getBlob("rq.proxy");
- if (!rs.wasNull() && blob != null) {
- byte[] bdata = blob.getBytes(1, (int) blob.length());
- chunkDataTO.setVomsAttributes(new String(bdata));
- }
- chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory"));
- chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive"));
- chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels"));
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- private String buildExpainationSet(String explanation) {
-
- return " sg.explanation='" + explanation + "' ";
- }
-
- private String buildTokenWhereClause(TRequestToken requestToken) {
-
- return " rq.r_token='" + requestToken.toString() + "' ";
- }
-
- private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) {
-
- return " ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rg.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java
index df12c1e1b..7b7e42ec7 100644
--- a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java
+++ b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java
@@ -4,13 +4,41 @@
*/
package it.grid.storm.catalogs;
-import it.grid.storm.common.types.SizeUnit;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
import it.grid.storm.common.types.TURLPrefix;
import it.grid.storm.common.types.TimeUnit;
-import it.grid.storm.config.Configuration;
+import it.grid.storm.config.StormConfiguration;
import it.grid.storm.griduser.AbstractGridUser;
import it.grid.storm.griduser.GridUserInterface;
import it.grid.storm.griduser.GridUserManager;
+import it.grid.storm.persistence.converter.FileLifetimeConverter;
+import it.grid.storm.persistence.converter.FileStorageTypeConverter;
+import it.grid.storm.persistence.converter.OverwriteModeConverter;
+import it.grid.storm.persistence.converter.PinLifetimeConverter;
+import it.grid.storm.persistence.converter.SizeInBytesIntConverter;
+import it.grid.storm.persistence.converter.SpaceTokenStringConverter;
+import it.grid.storm.persistence.converter.StatusCodeConverter;
+import it.grid.storm.persistence.converter.TURLConverter;
+import it.grid.storm.persistence.converter.TransferProtocolListConverter;
+import it.grid.storm.persistence.dao.PtPChunkDAO;
+import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidPtPPersistentChunkDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidReducedPtPChunkDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.impl.mysql.PtPChunkDAOMySql;
+import it.grid.storm.persistence.model.PtPChunkDataTO;
+import it.grid.storm.persistence.model.PtPPersistentChunkData;
+import it.grid.storm.persistence.model.ReducedPtPChunkData;
+import it.grid.storm.persistence.model.ReducedPtPChunkDataTO;
import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException;
import it.grid.storm.srm.types.InvalidTSURLAttributesException;
import it.grid.storm.srm.types.InvalidTSizeAttributesException;
@@ -26,575 +54,412 @@
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.srm.types.TTURL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class that represents StoRMs PtPChunkCatalog: it collects PtPChunkData and
- * provides methods for looking up a PtPChunkData based on TRequestToken, as
- * well as for updating data into persistence. Methods are also supplied to
- * evaluate if a SURL is in SRM_SPACE_AVAILABLE state, and to transit expired
- * SURLs in SRM_SPACE_AVAILABLE state to SRM_FILE_LIFETIME_EXPIRED.
- *
- * @author EGRID - ICTP Trieste
- * @date June, 2005
- * @version 3.0
- */
public class PtPChunkCatalog {
- private static final Logger log = LoggerFactory
- .getLogger(PtPChunkCatalog.class);
-
- /* only instance of PtPChunkCatalog present in StoRM! */
- private static final PtPChunkCatalog cat = new PtPChunkCatalog();
- private final PtPChunkDAO dao = PtPChunkDAO.getInstance();
-
- private PtPChunkCatalog() {}
-
- /**
- * Method that returns the only instance of PtPChunkCatalog available.
- */
- public static PtPChunkCatalog getInstance() {
-
- return cat;
- }
-
- /**
- * Method used to update into Persistence a retrieved PtPChunkData.
- */
- synchronized public void update(PtPPersistentChunkData chunkData) {
-
- PtPChunkDataTO to = new PtPChunkDataTO();
- /* rimary key needed by DAO Object */
- to.setPrimaryKey(chunkData.getPrimaryKey());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setTransferURL(TURLConverter.getInstance().toDB(
- chunkData.getTransferURL().toString()));
- to.setPinLifetime(PinLifetimeConverter.getInstance().toDB(
- chunkData.pinLifetime().value()));
- to.setFileLifetime(FileLifetimeConverter.getInstance().toDB(
- chunkData.fileLifetime().value()));
- to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(
- chunkData.fileStorageType()));
- to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB(
- chunkData.overwriteOption()));
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
- to.setClientDN(chunkData.getUser().getDn());
- if (chunkData.getUser() instanceof AbstractGridUser) {
- if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
- to.setVomsAttributes(((AbstractGridUser) chunkData.getUser())
- .getFQANsAsString());
- }
-
- }
- dao.update(to);
- }
-
- /**
- * Method that returns a Collection of PtPChunkData Objects matching the
- * supplied TRequestToken. If any of the data associated to the TRequestToken
- * is not well formed and so does not allow a PtPChunkData Object to be
- * created, then that part of the request is dropped, gets logged and an
- * attempt is made to write in the DB that the chunk was malformed; the
- * processing continues with the next part. Only the valid chunks get
- * returned. If there are no chunks to process then an empty Collection is
- * returned, and a messagge gets logged. NOTE! Chunks in SRM_ABORTED status
- * are NOT returned! This is imporant because this method is intended to be
- * used by the Feeders to fetch all chunks in the request, and aborted chunks
- * should not be picked up for processing!
- */
- synchronized public Collection lookup(
- final TRequestToken rt) {
-
- Collection chunkTOs = dao.find(rt);
- log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs);
- return buildChunkDataList(chunkTOs);
- }
-
- /**
- * Private method used to create a PtPChunkData object, from a PtPChunkDataTO
- * and TRequestToken. If a chunk cannot be created, an error messagge gets
- * logged and an attempt is made to signal in the DB that the chunk is
- * malformed.
- */
- private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) {
-
- StringBuilder errorSb = new StringBuilder();
- // toSURL
- TSURL toSURL = null;
- try {
- toSURL = TSURL.makeFromStringValidate(auxTO.toSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (auxTO.normalizedStFN() != null) {
- toSURL.setNormalizedStFN(auxTO.normalizedStFN());
- }
- if (auxTO.surlUniqueID() != null) {
- toSURL.setUniqueID(auxTO.surlUniqueID().intValue());
- }
- // pinLifetime
- TLifeTimeInSeconds pinLifetime = null;
- try {
- long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(
- auxTO.pinLifetime());
- // Check for max value allowed
- long max = Configuration.getInstance().getPinLifetimeMaximum();
- if (pinLifeTime > max) {
- log.warn("PinLifeTime is greater than the max value allowed. Drop the "
- + "value to the max = {} seconds", max);
- pinLifeTime = max;
- }
- pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // fileLifetime
- TLifeTimeInSeconds fileLifetime = null;
- try {
- fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter
- .getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // fileStorageType
- TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance()
- .toSTORM(auxTO.fileStorageType());
- if (fileStorageType == TFileStorageType.EMPTY) {
- errorSb.append("\nTFileStorageType could not be translated from "
- + "its String representation! String: " + auxTO.fileStorageType());
- // Use the default value defined in Configuration.
- fileStorageType = TFileStorageType.getTFileStorageType(Configuration
- .getInstance().getDefaultFileStorageType());
- errorSb.append("\nUsed the default TFileStorageType as defined "
- + "in StoRM config.: " + fileStorageType);
- }
- // expectedFileSize
- //
- // WARNING! A converter is used because the DB uses 0 for empty, whereas
- // StoRM object model does allow a 0 size! Since this is an optional
- // field
- // in the SRM specs, null must be converted explicitly to Empty
- // TSizeInBytes
- // because it is indeed well formed!
- TSizeInBytes expectedFileSize = null;
- TSizeInBytes emptySize = TSizeInBytes.makeEmpty();
- long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM(
- auxTO.expectedFileSize());
- if (emptySize.value() == sizeTranslation) {
- expectedFileSize = emptySize;
- } else {
- try {
- expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize(),
- SizeUnit.BYTES);
- } catch (InvalidTSizeAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- }
- // spaceToken!
- //
- // WARNING! A converter is still needed because of DB logic for missing
- // SpaceToken makes use of NULL, whereas StoRM object model does not
- // allow
- // for null! It makes use of a specific Empty type.
- //
- // Indeed, the SpaceToken field is optional, so a request with a null
- // value
- // for the SpaceToken field in the DB, _is_ well formed!
- TSpaceToken spaceToken = null;
- TSpaceToken emptyToken = TSpaceToken.makeEmpty();
- /**
- * convert empty string representation of DPM into StoRM representation;
- */
- String spaceTokenTranslation = SpaceTokenStringConverter.getInstance()
- .toStoRM(auxTO.spaceToken());
- if (emptyToken.toString().equals(spaceTokenTranslation)) {
- spaceToken = emptyToken;
- } else {
- try {
- spaceToken = TSpaceToken.make(spaceTokenTranslation);
- } catch (InvalidTSpaceTokenAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- }
- // overwriteOption!
- TOverwriteMode overwriteOption = OverwriteModeConverter.getInstance()
- .toSTORM(auxTO.overwriteOption());
- if (overwriteOption == TOverwriteMode.EMPTY) {
- errorSb.append("\nTOverwriteMode could not be translated "
- + "from its String representation! String: " + auxTO.overwriteOption());
- overwriteOption = null;
- }
- // transferProtocols
- TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO
- .protocolList());
- if (transferProtocols.size() == 0) {
- errorSb.append("\nEmpty list of TransferProtocols "
- + "or could not translate TransferProtocols!");
- transferProtocols = null; // fail construction of PtPChunkData!
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance()
- .toSTORM(auxTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + auxTO.status());
- } else {
- status = new TReturnStatus(code, auxTO.errString());
- }
- GridUserInterface gridUser = null;
- try {
- if (auxTO.vomsAttributes() != null
- && !auxTO.vomsAttributes().trim().equals("")) {
- gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(),
- auxTO.vomsAttributesArray());
- } else {
- gridUser = GridUserManager.makeGridUser(auxTO.clientDN());
- }
-
- } catch (IllegalArgumentException e) {
- log.error("Unexpected error on voms grid user creation. "
- + "IllegalArgumentException: {}", e.getMessage(), e);
- }
-
- // transferURL
- /**
- * whatever is read is just meaningless because PtP will fill it in!!! So
- * create an Empty TTURL by default! Vital to avoid problems with unknown
- * DPM NULL/EMPTY logic policy!
- */
- TTURL transferURL = TTURL.makeEmpty();
- // make PtPChunkData
- PtPPersistentChunkData aux = null;
- try {
- aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime,
- fileLifetime, fileStorageType, spaceToken, expectedFileSize,
- transferProtocols, overwriteOption, status, transferURL);
- aux.setPrimaryKey(auxTO.primaryKey());
- } catch (InvalidPtPPersistentChunkDataAttributesException e) {
- dao.signalMalformedPtPChunk(auxTO);
- log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
- + " from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- } catch (InvalidPtPDataAttributesException e) {
- dao.signalMalformedPtPChunk(auxTO);
- log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
- + " from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- } catch (InvalidFileTransferDataAttributesException e) {
- dao.signalMalformedPtPChunk(auxTO);
- log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
- + " from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- } catch (InvalidSurlRequestDataAttributesException e) {
- dao.signalMalformedPtPChunk(auxTO);
- log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
- + " from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- *
- * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique
- * ID taken from the PtPChunkData
- *
- * @param chunkTO
- * @param chunk
- */
- private void completeTO(ReducedPtPChunkDataTO chunkTO,
- final ReducedPtPChunkData chunk) {
-
- chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN());
- chunkTO.setSurlUniqueID(new Integer(chunk.toSURL().uniqueId()));
- }
-
- /**
- *
- * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and
- * completes it with the normalized StFN and the SURL unique ID taken from the
- * PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- * @return
- * @throws InvalidReducedPtPChunkDataAttributesException
- */
- private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO,
- final PtPPersistentChunkData chunk)
- throws InvalidReducedPtPChunkDataAttributesException {
-
- ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO);
- this.completeTO(reducedChunkTO, this.reduce(chunk));
- return reducedChunkTO;
- }
-
- /**
- * Creates a ReducedPtPChunkData from the data contained in the received
- * PtPChunkData
- *
- * @param chunk
- * @return
- * @throws InvalidReducedPtPChunkDataAttributesException
- */
- private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk)
- throws InvalidReducedPtPChunkDataAttributesException {
-
- ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(),
- chunk.getStatus(), chunk.fileStorageType(), chunk.fileLifetime());
- reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
- return reducedChunk;
- }
-
- /**
- * Creates a ReducedPtPChunkDataTO from the data contained in the received
- * PtPChunkDataTO
- *
- * @param chunkTO
- * @return
- */
- private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) {
-
- ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO();
- reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
- reducedChunkTO.setToSURL(chunkTO.toSURL());
- reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
- reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID());
- reducedChunkTO.setStatus(chunkTO.status());
- reducedChunkTO.setErrString(chunkTO.errString());
- return reducedChunkTO;
- }
-
- /**
- * Checks if the received PtPChunkDataTO contains the fields not set by the
- * front end but required
- *
- * @param chunkTO
- * @return
- */
- private boolean isComplete(PtPChunkDataTO chunkTO) {
-
- return (chunkTO.normalizedStFN() != null)
- && (chunkTO.surlUniqueID() != null);
- }
-
- /**
- * Checks if the received ReducedPtGChunkDataTO contains the fields not set by
- * the front end but required
- *
- * @param reducedChunkTO
- * @return
- */
- private boolean isComplete(ReducedPtPChunkDataTO reducedChunkTO) {
-
- return (reducedChunkTO.normalizedStFN() != null)
- && (reducedChunkTO.surlUniqueID() != null);
- }
-
- public Collection lookupReducedPtPChunkData(
- TRequestToken requestToken, Collection surls) {
-
- Collection reducedChunkDataTOs = dao.findReduced(
- requestToken.getValue(), surls);
- log.debug("PtP CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs);
- return buildReducedChunkDataList(reducedChunkDataTOs);
- }
-
- public Collection lookupPtPChunkData(TSURL surl,
- GridUserInterface user) {
-
- return lookupPtPChunkData(
- (List) Arrays.asList(new TSURL[] { surl }), user);
- }
-
- private Collection lookupPtPChunkData(
- List surls, GridUserInterface user) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOs = dao.find(surlsUniqueIDs,
- surlsArray, user.getDn());
- log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs);
- return buildChunkDataList(chunkDataTOs);
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOs) {
-
- ArrayList list = new ArrayList();
- PtPPersistentChunkData chunk;
- for (PtPChunkDataTO chunkTO : chunkDataTOs) {
- chunk = makeOne(chunkTO);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(completeTO(chunkTO, chunk));
- } catch (InvalidReducedPtPChunkDataAttributesException e) {
- log.warn("PtG CHUNK CATALOG! unable to add missing informations on "
- + "DB to the request: {}", e.getMessage());
- }
- }
- log.debug("PtPChunkCatalog: returning {}\n\n", list);
- return list;
- }
-
- private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) {
-
- try {
- return makeOne(chunkTO,
- new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp()));
- } catch (InvalidTRequestTokenAttributesException e) {
- throw new IllegalStateException(
- "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: "
- + e);
- }
- }
-
- private Collection buildReducedChunkDataList(
- Collection chunkDataTOCollection) {
-
- ArrayList list = new ArrayList();
- ReducedPtPChunkData reducedChunkData;
- for (ReducedPtPChunkDataTO reducedChunkDataTO : chunkDataTOCollection) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!this.isComplete(reducedChunkDataTO)) {
- this.completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("PtP CHUNK CATALOG: returning {}", list);
- return list;
- }
-
- private ReducedPtPChunkData makeOneReduced(
- ReducedPtPChunkDataTO reducedChunkDataTO) {
-
- StringBuilder errorSb = new StringBuilder();
- // fromSURL
- TSURL toSURL = null;
- try {
- toSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.toSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (reducedChunkDataTO.normalizedStFN() != null) {
- toSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN());
- }
- if (reducedChunkDataTO.surlUniqueID() != null) {
- toSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue());
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- reducedChunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + reducedChunkDataTO.status());
- } else {
- status = new TReturnStatus(code, reducedChunkDataTO.errString());
- }
- // fileStorageType
- TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance()
- .toSTORM(reducedChunkDataTO.fileStorageType());
- if (fileStorageType == TFileStorageType.EMPTY) {
- errorSb.append("\nTFileStorageType could not be "
- + "translated from its String representation! String: "
- + reducedChunkDataTO.fileStorageType());
- // Use the default value defined in Configuration.
- fileStorageType = TFileStorageType.getTFileStorageType(Configuration
- .getInstance().getDefaultFileStorageType());
- errorSb
- .append("\nUsed the default TFileStorageType as defined in StoRM config.: "
- + fileStorageType);
- }
- // fileLifetime
- TLifeTimeInSeconds fileLifetime = null;
- try {
- fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter
- .getInstance().toStoRM(reducedChunkDataTO.fileLifetime()),
- TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // make ReducedPtPChunkData
- ReducedPtPChunkData aux = null;
- try {
- aux = new ReducedPtPChunkData(toSURL, status, fileStorageType,
- fileLifetime);
- aux.setPrimaryKey(reducedChunkDataTO.primaryKey());
- } catch (InvalidReducedPtPChunkDataAttributesException e) {
- log.warn("PtP CHUNK CATALOG! Retrieved malformed Reduced PtP"
- + " chunk data from persistence: dropping reduced chunk...");
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- public int updateStatus(TRequestToken requestToken, TSURL surl,
- TStatusCode statusCode, String explanation) {
-
- return dao.updateStatus(requestToken, new int[] { surl.uniqueId() },
- new String[] { surl.rawSurl() }, statusCode, explanation);
- }
-
- public int updateFromPreviousStatus(TRequestToken requestToken,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode,
- newStatusCode, explanation);
- }
-
- public int updateFromPreviousStatus(TRequestToken requestToken,
- List surlList, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode) {
-
- int[] surlsUniqueIDs = new int[surlList.size()];
- String[] surls = new String[surlList.size()];
- int index = 0;
- for (TSURL tsurl : surlList) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode);
- }
+ private static final Logger log = LoggerFactory.getLogger(PtPChunkCatalog.class);
+
+ private static PtPChunkCatalog instance;
+
+ public static synchronized PtPChunkCatalog getInstance() {
+ if (instance == null) {
+ instance = new PtPChunkCatalog();
+ }
+ return instance;
+ }
+
+ private final PtPChunkDAO dao;
+
+ private PtPChunkCatalog() {
+ dao = PtPChunkDAOMySql.getInstance();
+ }
+
+ /**
+ * Method used to update into Persistence a retrieved PtPChunkData.
+ */
+ public synchronized void update(PtPPersistentChunkData chunkData) {
+
+ PtPChunkDataTO to = new PtPChunkDataTO();
+ /* Primary key needed by DAO Object */
+ to.setPrimaryKey(chunkData.getPrimaryKey());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode()));
+ to.setErrString(chunkData.getStatus().getExplanation());
+ to.setTransferURL(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString()));
+ to.setPinLifetime(PinLifetimeConverter.getInstance().toDB(chunkData.pinLifetime().value()));
+ to.setFileLifetime(FileLifetimeConverter.getInstance().toDB(chunkData.fileLifetime().value()));
+ to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(chunkData.fileStorageType()));
+ to.setOverwriteOption(OverwriteModeConverter.toDB(chunkData.overwriteOption()));
+ to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
+ to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId()));
+ to.setClientDN(chunkData.getUser().getDn());
+ if (chunkData.getUser() instanceof AbstractGridUser) {
+ if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
+ to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString());
+ }
+
+ }
+ dao.update(to);
+ }
+
+ /**
+ * Method that returns a Collection of PtPChunkData Objects matching the supplied TRequestToken.
+ * If any of the data associated to the TRequestToken is not well formed and so does not allow a
+ * PtPChunkData Object to be created, then that part of the request is dropped, gets logged and an
+ * attempt is made to write in the DB that the chunk was malformed; the processing continues with
+ * the next part. Only the valid chunks get returned. If there are no chunks to process then an
+ * empty Collection is returned, and a message gets logged. NOTE! Chunks in SRM_ABORTED status are
+ * NOT returned! This is important because this method is intended to be used by the Feeders to
+ * fetch all chunks in the request, and aborted chunks should not be picked up for processing!
+ */
+ public synchronized Collection lookup(final TRequestToken rt) {
+
+ Collection chunkTOs = dao.find(rt);
+ log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs);
+ return buildChunkDataList(chunkTOs);
+ }
+
+ /**
+ * Private method used to create a PtPChunkData object, from a PtPChunkDataTO and TRequestToken.
+ * If a chunk cannot be created, an error messagge gets logged and an attempt is made to signal in
+ * the DB that the chunk is malformed.
+ */
+ private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) {
+
+ StringBuilder errorSb = new StringBuilder();
+ // toSURL
+ TSURL toSURL = null;
+ try {
+ toSURL = TSURL.makeFromStringValidate(auxTO.toSURL());
+ } catch (InvalidTSURLAttributesException e) {
+ errorSb.append(e);
+ }
+ if (auxTO.normalizedStFN() != null) {
+ toSURL.setNormalizedStFN(auxTO.normalizedStFN());
+ }
+ if (auxTO.surlUniqueID() != null) {
+ toSURL.setUniqueID(auxTO.surlUniqueID().intValue());
+ }
+ // pinLifetime
+ TLifeTimeInSeconds pinLifetime = null;
+ try {
+ long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.pinLifetime());
+ // Check for max value allowed
+ long max = StormConfiguration.getInstance().getPinLifetimeMaximum();
+ if (pinLifeTime > max) {
+ log.warn("PinLifeTime is greater than the max value allowed. Drop the "
+ + "value to the max = {} seconds", max);
+ pinLifeTime = max;
+ }
+ pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS);
+ } catch (IllegalArgumentException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // fileLifetime
+ TLifeTimeInSeconds fileLifetime = null;
+ try {
+ fileLifetime = TLifeTimeInSeconds
+ .make(FileLifetimeConverter.getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS);
+ } catch (IllegalArgumentException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // fileStorageType
+ TFileStorageType fileStorageType =
+ FileStorageTypeConverter.getInstance().toSTORM(auxTO.fileStorageType());
+ if (fileStorageType == TFileStorageType.EMPTY) {
+ errorSb.append("\nTFileStorageType could not be translated from "
+ + "its String representation! String: " + auxTO.fileStorageType());
+ // Use the default value defined in Configuration.
+ fileStorageType = TFileStorageType
+ .getTFileStorageType(StormConfiguration.getInstance().getDefaultFileStorageType());
+ errorSb.append("\nUsed the default TFileStorageType as defined " + "in StoRM config.: "
+ + fileStorageType);
+ }
+ // expectedFileSize
+ //
+ // WARNING! A converter is used because the DB uses 0 for empty, whereas
+ // StoRM object model does allow a 0 size! Since this is an optional
+ // field
+ // in the SRM specs, null must be converted explicitly to Empty
+ // TSizeInBytes
+ // because it is indeed well formed!
+ TSizeInBytes expectedFileSize = null;
+ TSizeInBytes emptySize = TSizeInBytes.makeEmpty();
+ long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM(auxTO.expectedFileSize());
+ if (emptySize.value() == sizeTranslation) {
+ expectedFileSize = emptySize;
+ } else {
+ try {
+ expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize());
+ } catch (InvalidTSizeAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ }
+ // spaceToken!
+ //
+ // WARNING! A converter is still needed because of DB logic for missing
+ // SpaceToken makes use of NULL, whereas StoRM object model does not
+ // allow
+ // for null! It makes use of a specific Empty type.
+ //
+ // Indeed, the SpaceToken field is optional, so a request with a null
+ // value
+ // for the SpaceToken field in the DB, _is_ well formed!
+ TSpaceToken spaceToken = null;
+ TSpaceToken emptyToken = TSpaceToken.makeEmpty();
+ /**
+ * convert empty string representation of DPM into StoRM representation;
+ */
+ String spaceTokenTranslation = SpaceTokenStringConverter.toStoRM(auxTO.spaceToken());
+ if (emptyToken.toString().equals(spaceTokenTranslation)) {
+ spaceToken = emptyToken;
+ } else {
+ try {
+ spaceToken = TSpaceToken.make(spaceTokenTranslation);
+ } catch (InvalidTSpaceTokenAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ }
+ // overwriteOption!
+ TOverwriteMode overwriteOption = OverwriteModeConverter.toSTORM(auxTO.overwriteOption());
+ if (overwriteOption == TOverwriteMode.EMPTY) {
+ errorSb.append("\nTOverwriteMode could not be translated "
+ + "from its String representation! String: " + auxTO.overwriteOption());
+ overwriteOption = null;
+ }
+ // transferProtocols
+ TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.protocolList());
+ if (transferProtocols.size() == 0) {
+ errorSb
+ .append("\nEmpty list of TransferProtocols " + "or could not translate TransferProtocols!");
+ transferProtocols = null; // fail construction of PtPChunkData!
+ }
+ // status
+ TReturnStatus status = null;
+ TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.status());
+ if (code == TStatusCode.EMPTY) {
+ errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.status());
+ } else {
+ status = new TReturnStatus(code, auxTO.errString());
+ }
+ GridUserInterface gridUser = null;
+ try {
+ if (auxTO.vomsAttributes() != null && !auxTO.vomsAttributes().trim().equals("")) {
+ gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(), auxTO.vomsAttributesArray());
+ } else {
+ gridUser = GridUserManager.makeGridUser(auxTO.clientDN());
+ }
+
+ } catch (IllegalArgumentException e) {
+ log.error("Unexpected error on voms grid user creation. " + "IllegalArgumentException: {}",
+ e.getMessage(), e);
+ }
+
+ // transferURL
+ /**
+ * whatever is read is just meaningless because PtP will fill it in!!! So create an Empty TTURL
+ * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy!
+ */
+ TTURL transferURL = TTURL.makeEmpty();
+ // make PtPChunkData
+ PtPPersistentChunkData aux = null;
+ try {
+ aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime, fileLifetime,
+ fileStorageType, spaceToken, expectedFileSize, transferProtocols, overwriteOption, status,
+ transferURL);
+ aux.setPrimaryKey(auxTO.primaryKey());
+ } catch (InvalidPtPPersistentChunkDataAttributesException e) {
+ dao.fail(auxTO);
+ log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
+ + " from persistence. Dropping chunk from request: {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ } catch (InvalidPtPDataAttributesException e) {
+ dao.fail(auxTO);
+ log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
+ + " from persistence. Dropping chunk from request: {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ } catch (InvalidFileTransferDataAttributesException e) {
+ dao.fail(auxTO);
+ log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
+ + " from persistence. Dropping chunk from request: {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ } catch (InvalidSurlRequestDataAttributesException e) {
+ dao.fail(auxTO);
+ log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
+ + " from persistence. Dropping chunk from request: {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ }
+ // end...
+ return aux;
+ }
+
+ /**
+ *
+ * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique ID taken from the
+ * PtPChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ */
+ private void completeTO(ReducedPtPChunkDataTO chunkTO, final ReducedPtPChunkData chunk) {
+
+ chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN());
+ chunkTO.setSurlUniqueID(Integer.valueOf(chunk.toSURL().uniqueId()));
+ }
+
+ /**
+ *
+ * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the
+ * normalized StFN and the SURL unique ID taken from the PtGChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ * @return
+ * @throws InvalidReducedPtPChunkDataAttributesException
+ */
+ private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO,
+ final PtPPersistentChunkData chunk) throws InvalidReducedPtPChunkDataAttributesException {
+
+ ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO);
+ this.completeTO(reducedChunkTO, this.reduce(chunk));
+ return reducedChunkTO;
+ }
+
+ /**
+ * Creates a ReducedPtPChunkData from the data contained in the received PtPChunkData
+ *
+ * @param chunk
+ * @return
+ * @throws InvalidReducedPtPChunkDataAttributesException
+ */
+ private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk)
+ throws InvalidReducedPtPChunkDataAttributesException {
+
+ ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(), chunk.getStatus(),
+ chunk.fileStorageType(), chunk.fileLifetime());
+ reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
+ return reducedChunk;
+ }
+
+ /**
+ * Creates a ReducedPtPChunkDataTO from the data contained in the received PtPChunkDataTO
+ *
+ * @param chunkTO
+ * @return
+ */
+ private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) {
+
+ ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO();
+ reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
+ reducedChunkTO.setToSURL(chunkTO.toSURL());
+ reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
+ reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID());
+ reducedChunkTO.setStatus(chunkTO.status());
+ reducedChunkTO.setErrString(chunkTO.errString());
+ return reducedChunkTO;
+ }
+
+ /**
+ * Checks if the received PtPChunkDataTO contains the fields not set by the front end but required
+ *
+ * @param chunkTO
+ * @return
+ */
+ private boolean isComplete(PtPChunkDataTO chunkTO) {
+
+ return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null);
+ }
+
+ public Collection lookupPtPChunkData(TSURL surl, GridUserInterface user) {
+
+ return lookupPtPChunkData((List) Arrays.asList(new TSURL[] {surl}), user);
+ }
+
+ private Collection lookupPtPChunkData(List surls,
+ GridUserInterface user) {
+
+ int[] surlsUniqueIDs = new int[surls.size()];
+ String[] surlsArray = new String[surls.size()];
+ int index = 0;
+ for (TSURL tsurl : surls) {
+ surlsUniqueIDs[index] = tsurl.uniqueId();
+ surlsArray[index] = tsurl.rawSurl();
+ index++;
+ }
+ Collection chunkDataTOs = dao.find(surlsUniqueIDs, surlsArray, user.getDn());
+ log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs);
+ return buildChunkDataList(chunkDataTOs);
+ }
+
+ private Collection buildChunkDataList(
+ Collection chunkDataTOs) {
+
+ Collection list = Lists.newArrayList();
+ PtPPersistentChunkData chunk;
+ for (PtPChunkDataTO chunkTO : chunkDataTOs) {
+ chunk = makeOne(chunkTO);
+ if (chunk == null) {
+ continue;
+ }
+ list.add(chunk);
+ if (isComplete(chunkTO)) {
+ continue;
+ }
+ try {
+ dao.updateIncomplete(completeTO(chunkTO, chunk));
+ } catch (InvalidReducedPtPChunkDataAttributesException e) {
+ log.warn(
+ "PtG CHUNK CATALOG! unable to add missing informations on " + "DB to the request: {}",
+ e.getMessage());
+ }
+ }
+ log.debug("PtPChunkCatalog: returning {}\n\n", list);
+ return list;
+ }
+
+ private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) {
+
+ try {
+ return makeOne(chunkTO, new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp()));
+ } catch (InvalidTRequestTokenAttributesException e) {
+ throw new IllegalStateException(
+ "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " + e);
+ }
+ }
+
+ public int updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode,
+ String explanation) {
+
+ return dao.updateStatus(requestToken, new int[] {surl.uniqueId()},
+ new String[] {surl.rawSurl()}, statusCode, explanation);
+ }
+
+ public int updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode,
+ TStatusCode newStatusCode, String explanation) {
+
+ return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode,
+ explanation);
+ }
+
+ public int updateFromPreviousStatus(TRequestToken requestToken, List surlList,
+ TStatusCode expectedStatusCode, TStatusCode newStatusCode) {
+
+ int[] surlsUniqueIDs = new int[surlList.size()];
+ String[] surls = new String[surlList.size()];
+ int index = 0;
+ for (TSURL tsurl : surlList) {
+ surlsUniqueIDs[index] = tsurl.uniqueId();
+ surls[index] = tsurl.rawSurl();
+ index++;
+ }
+ return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, expectedStatusCode,
+ newStatusCode);
+ }
}
diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java
deleted file mode 100644
index 388c7853a..000000000
--- a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java
+++ /dev/null
@@ -1,1670 +0,0 @@
-/**
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN).
- * SPDX-License-Identifier: Apache-2.0
- */
-package it.grid.storm.catalogs;
-
-import static it.grid.storm.catalogs.ChunkDAOUtils.buildInClauseForArray;
-import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings;
-import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED;
-import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE;
-import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED;
-import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS;
-import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE;
-import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-import it.grid.storm.config.Configuration;
-import it.grid.storm.namespace.NamespaceException;
-import it.grid.storm.namespace.naming.SURL;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TStatusCode;
-
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Timer;
-import java.util.TimerTask;
-
-/**
- * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect
- * to a MySQL DB. The raw data found in those tables is pre-treated in order to
- * turn it into the Object Model of StoRM. See Method comments for further info.
- * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the
- * object model.
- *
- * @author EGRID ICTP
- * @version 2.0
- * @date June 2005
- */
-public class PtPChunkDAO {
-
- private static final Logger log = LoggerFactory.getLogger(PtPChunkDAO.class);
-
- /* String with the name of the class for the DB driver */
- private final String driver = Configuration.getInstance().getDBDriver();
- /* String referring to the URL of the DB */
- private final String url = Configuration.getInstance().getStormDbURL();
- /* String with the password for the DB */
- private final String password = Configuration.getInstance().getDBPassword();
- /* String with the name for the DB */
- private final String name = Configuration.getInstance().getDBUserName();
- /* Connection to DB - WARNING!!! It is kept open all the time! */
- private Connection con = null;
-
- private static final PtPChunkDAO dao = new PtPChunkDAO();
-
- /* timer thread that will run a task to alert when reconnecting is necessary! */
- private Timer clock = null;
- /*
- * timer task that will update the boolean signaling that a reconnection is
- * needed
- */
- private TimerTask clockTask = null;
- /* milliseconds that must pass before reconnecting to DB */
- private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000;
- /* initial delay in milliseconds before starting timer */
- private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000;
-
- /* boolean that tells whether reconnection is needed because of MySQL bug! */
- private boolean reconnect = false;
-
- private StatusCodeConverter statusCodeConverter = StatusCodeConverter.getInstance();
-
- private PtPChunkDAO() {
-
- setUpConnection();
- clock = new Timer();
- clockTask = new TimerTask() {
-
- @Override
- public void run() {
-
- reconnect = true;
- }
- }; // clock task
- clock.scheduleAtFixedRate(clockTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of the PtPChunkDAO.
- */
- public static PtPChunkDAO getInstance() {
-
- return dao;
- }
-
- /**
- * Method used to save the changes made to a retrieved PtPChunkDataTO, back
- * into the MySQL DB. Only the transferURL, statusCode and explanation, of
- * status_Put table get written to the DB. Likewise for the pinLifetime and
- * fileLifetime of request_queue. In case of any error, an error messagge gets
- * logged but no exception is thrown.
- */
- public synchronized void update(PtPChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("PtP CHUNK DAO: update - unable to get a valid connection!");
- return;
- }
- PreparedStatement updatePut = null;
- try {
- // prepare statement...
- updatePut = con
- .prepareStatement("UPDATE "
- + "request_queue rq JOIN (status_Put sp, request_Put rp) ON "
- + "(rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) "
- + "SET sp.transferURL=?, sp.statusCode=?, sp.explanation=?, rq.pinLifetime=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, "
- + "rp.normalized_targetSURL_StFN=?, rp.targetSURL_uniqueID=? "
- + "WHERE rp.ID=?");
- printWarnings(con.getWarnings());
-
- updatePut.setString(1, to.transferURL());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setInt(2, to.status());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setString(3, to.errString());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setInt(4, to.pinLifetime());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setInt(5, to.fileLifetime());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setString(6, to.fileStorageType());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setString(7, to.overwriteOption());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setString(8, to.normalizedStFN());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setInt(9, to.surlUniqueID());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setLong(10, to.primaryKey());
- printWarnings(updatePut.getWarnings());
- // run updateStatusPut...
- log.trace("PtP CHUNK DAO - update method: {}", updatePut);
- updatePut.executeUpdate();
- printWarnings(updatePut.getWarnings());
- } catch (SQLException e) {
- log.error("PtP CHUNK DAO: Unable to complete update! {}", e.getMessage(), e);
- } finally {
- close(updatePut);
- }
- }
-
- /**
- * Updates the request_Put represented by the received ReducedPtPChunkDataTO
- * by setting its normalized_targetSURL_StFN and targetSURL_uniqueID
- *
- * @param chunkTO
- */
- public synchronized void updateIncomplete(ReducedPtPChunkDataTO chunkTO) {
-
- if (!checkConnection()) {
- log
- .error("PtP CHUNK DAO: updateIncomplete - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_Put SET normalized_targetSURL_StFN=?, targetSURL_uniqueID=? "
- + "WHERE ID=?";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- stmt.setString(1, chunkTO.normalizedStFN());
- printWarnings(stmt.getWarnings());
-
- stmt.setInt(2, chunkTO.surlUniqueID());
- printWarnings(stmt.getWarnings());
-
- stmt.setLong(3, chunkTO.primaryKey());
- printWarnings(stmt.getWarnings());
-
- log.trace("PtP CHUNK DAO - update incomplete: {}", stmt);
- stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- } catch (SQLException e) {
- log.error("PtP CHUNK DAO: Unable to complete update incomplete! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Method used to refresh the PtPChunkDataTO information from the MySQL DB.
- * This method is intended to be used during the srmAbortRequest/File
- * operation. In case of any error, an error message gets logged but no
- * exception is thrown; a null PtPChunkDataTO is returned.
- */
- public synchronized PtPChunkDataTO refresh(long id) {
-
- if (!checkConnection()) {
- log.error("PtP CHUNK DAO: refresh - unable to get a valid connection!");
- return null;
- }
- String prot = "SELECT tp.config_ProtocolsID FROM request_TransferProtocols tp "
- + "WHERE tp.request_queueID IN "
- + "(SELECT rp.request_queueID FROM request_Put rp WHERE rp.ID=?)";
-
- String refresh = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.r_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode, sp.transferURL "
- + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) "
- + "ON (rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) "
- + "WHERE rp.ID=?";
-
- PreparedStatement stmt = null;
- ResultSet rs = null;
- PtPChunkDataTO chunkDataTO = null;
-
- try {
- // get protocols for the request
- stmt = con.prepareStatement(prot);
- printWarnings(con.getWarnings());
-
- List protocols = Lists.newArrayList();
- stmt.setLong(1, id);
- printWarnings(stmt.getWarnings());
-
- log.trace("PtP CHUNK DAO - refresh method: {}", stmt);
- rs = stmt.executeQuery();
- printWarnings(stmt.getWarnings());
- while (rs.next()) {
- protocols.add(rs.getString("tp.config_ProtocolsID"));
- }
- close(rs);
- close(stmt);
-
- // get chunk of the request
- stmt = con.prepareStatement(refresh);
- printWarnings(con.getWarnings());
-
- stmt.setLong(1, id);
- printWarnings(stmt.getWarnings());
-
- log.trace("PtP CHUNK DAO - refresh method: {}", stmt);
- rs = stmt.executeQuery();
- printWarnings(stmt.getWarnings());
-
- if (rs.next()) {
- chunkDataTO = new PtPChunkDataTO();
- chunkDataTO.setFileStorageType(rs
- .getString("rq.config_FileStorageTypeID"));
- chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime"));
- chunkDataTO.setSpaceToken(rs.getString("rq.s_token"));
- chunkDataTO.setRequestToken(rs.getString("rq.r_token"));
- chunkDataTO.setPrimaryKey(rs.getLong("rp.ID"));
- chunkDataTO.setToSURL(rs.getString("rp.targetSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rp.normalized_targetSURL_StFN"));
- int uniqueID = rs.getInt("rp.targetSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID));
- }
-
- chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize"));
- chunkDataTO.setProtocolList(protocols);
- chunkDataTO.setStatus(rs.getInt("sp.statusCode"));
- chunkDataTO.setTransferURL(rs.getString("sp.transferURL"));
- chunkDataTO.setClientDN(rs.getString("rq.client_dn"));
-
- /**
- * This code is only for the 1.3.18. This is a workaround to get FQANs
- * using the proxy field on request_queue. The FE use the proxy field of
- * request_queue to insert a single FQAN string containing all FQAN
- * separated by the "#" char. The proxy is a BLOB, hence it has to be
- * properly converted in string.
- */
- java.sql.Blob blob = rs.getBlob("rq.proxy");
- if (!rs.wasNull() && blob != null) {
- byte[] bdata = blob.getBytes(1, (int) blob.length());
- chunkDataTO.setVomsAttributes(new String(bdata));
- }
- if (rs.next()) {
- log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! "
- + "refresh method invoked for specific chunk with id {}, but found "
- + "more than one such chunks!", id);
- }
- } else {
- log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! "
- + "refresh method invoked for specific chunk with id {}, but chunk "
- + "NOT found in persistence!", id);
- }
- } catch (SQLException e) {
- log.error("PtP CHUNK DAO! Unable to refresh chunk! {}", e.getMessage(), e);
- chunkDataTO = null;
- } finally {
- close(rs);
- close(stmt);
- }
- return chunkDataTO;
- }
-
- /**
- * Method that queries the MySQL DB to find all entries matching the supplied
- * TRequestToken. The Collection contains the corresponding PtPChunkDataTO
- * objects. An initial simple query establishes the list of protocols
- * associated with the request. A second complex query establishes all chunks
- * associated with the request, by properly joining request_queue, request_Put
- * and status_Put. The considered fields are: (1) From status_Put: the ID
- * field which becomes the TOs primary key, and statusCode. (2) From
- * request_Put: targetSURL and expectedFileSize. (3) From request_queue:
- * pinLifetime, fileLifetime, config_FileStorageTypeID, s_token,
- * config_OverwriteID. In case of any error, a log gets written and an empty
- * collection is returned. No exception is returned. NOTE! Chunks in
- * SRM_ABORTED status are NOT returned! This is important because this method
- * is intended to be used by the Feeders to fetch all chunks in the request,
- * and aborted chunks should not be picked up for processing!
- */
- public synchronized Collection find(TRequestToken requestToken) {
-
- if (!checkConnection()) {
- log.error("PtP CHUNK DAO: find - unable to get a valid connection!");
- return null;
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- str = "SELECT tp.config_ProtocolsID "
- + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID "
- + "WHERE rq.r_token=?";
-
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- List protocols = Lists.newArrayList();
- find.setString(1, strToken);
- printWarnings(find.getWarnings());
-
- log.trace("PtP CHUNK DAO - find method: {}", find);
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- while (rs.next()) {
- protocols.add(rs.getString("tp.config_ProtocolsID"));
- }
- close(rs);
- close(find);
-
- // get chunks of the request
- str = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode "
- + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) "
- + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) "
- + "WHERE rq.r_token=? AND sp.statusCode<>?";
-
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- List